Commit 9bbb6717 authored by Bernd Schubert's avatar Bernd Schubert Committed by Miklos Szeredi

fuse: add fuse_dio_lock/unlock helper functions

So far this is just a helper to remove complex locking logic out of
fuse_direct_write_iter.  Especially needed by the next patch in the series
to that adds the fuse inode cache IO mode and adds in even more locking
complexity.
Signed-off-by: default avatarBernd Schubert <bschubert@ddn.com>
Signed-off-by: default avatarMiklos Szeredi <mszeredi@redhat.com>
parent 699cf824
...@@ -1340,6 +1340,37 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from ...@@ -1340,6 +1340,37 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from
return false; return false;
} }
static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
bool *exclusive)
{
struct inode *inode = file_inode(iocb->ki_filp);
*exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
if (*exclusive) {
inode_lock(inode);
} else {
inode_lock_shared(inode);
/*
* Previous check was without inode lock and might have raced,
* check again.
*/
if (fuse_io_past_eof(iocb, from)) {
inode_unlock_shared(inode);
inode_lock(inode);
*exclusive = true;
}
}
}
static void fuse_dio_unlock(struct inode *inode, bool exclusive)
{
if (exclusive) {
inode_unlock(inode);
} else {
inode_unlock_shared(inode);
}
}
static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
...@@ -1604,30 +1635,9 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -1604,30 +1635,9 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(iocb->ki_filp);
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
ssize_t res; ssize_t res;
bool exclusive_lock = fuse_dio_wr_exclusive_lock(iocb, from); bool exclusive;
/*
* Take exclusive lock if
* - Parallel direct writes are disabled - a user space decision
* - Parallel direct writes are enabled and i_size is being extended.
* - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP).
* This might not be needed at all, but needs further investigation.
*/
if (exclusive_lock)
inode_lock(inode);
else {
inode_lock_shared(inode);
/*
* Previous check was without any lock and might have raced.
*/
if (fuse_io_past_eof(iocb, from)) {
inode_unlock_shared(inode);
inode_lock(inode);
exclusive_lock = true;
}
}
fuse_dio_lock(iocb, from, &exclusive);
res = generic_write_checks(iocb, from); res = generic_write_checks(iocb, from);
if (res > 0) { if (res > 0) {
if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
...@@ -1638,10 +1648,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -1638,10 +1648,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
fuse_write_update_attr(inode, iocb->ki_pos, res); fuse_write_update_attr(inode, iocb->ki_pos, res);
} }
} }
if (exclusive_lock) fuse_dio_unlock(inode, exclusive);
inode_unlock(inode);
else
inode_unlock_shared(inode);
return res; return res;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment