Commit cb098dd2 authored by Amir Goldstein's avatar Amir Goldstein Committed by Miklos Szeredi

fuse: introduce inode io modes

The fuse inode io mode is determined by the mode of its open files/mmaps
and parallel dio opens and expressed in the value of fi->iocachectr:

 > 0 - caching io: files open in caching mode or mmap on direct_io file
 < 0 - parallel dio: direct io mode with parallel dio writes enabled
== 0 - direct io: no files open in caching mode and no files mmaped

Note that iocachectr value of 0 might become positive or negative,
while non-parallel dio is getting processed.

direct_io mmap uses page cache, so first mmap will mark the file as
ff->io_opened and increment fi->iocachectr to enter the caching io mode.

If the server opens the file in caching mode while it is already open
for parallel dio or vice versa the open fails.

This allows executing parallel dio when inode is not in caching mode
and no mmaps have been performed on the inode in question.
Signed-off-by: default avatarBernd Schubert <bschubert@ddn.com>
Signed-off-by: default avatarAmir Goldstein <amir73il@gmail.com>
Signed-off-by: default avatarMiklos Szeredi <mszeredi@redhat.com>
parent d2c487f1
......@@ -8,6 +8,7 @@ obj-$(CONFIG_CUSE) += cuse.o
obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o
fuse-y += iomode.o
fuse-$(CONFIG_FUSE_DAX) += dax.o
virtiofs-y := virtio_fs.o
......@@ -113,6 +113,9 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
struct fuse_release_args *ra = ff->release_args;
struct fuse_args *args = (ra ? &ra->args : NULL);
if (ra && ra->inode)
fuse_file_io_release(ff, ra->inode);
if (!args) {
/* Do nothing when server does not implement 'open' */
} else if (sync) {
......@@ -204,6 +207,11 @@ int fuse_finish_open(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
err = fuse_file_io_open(file, inode);
if (err)
return err;
if (ff->open_flags & FOPEN_STREAM)
stream_open(inode, file);
......@@ -2509,6 +2517,7 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fm->fc;
int rc;
/* DAX mmap is superior to direct_io mmap */
if (FUSE_IS_DAX(file_inode(file)))
......@@ -2528,6 +2537,11 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
/* MAP_PRIVATE */
return generic_file_mmap(file, vma);
}
/* First mmap of direct_io file enters caching inode io mode. */
rc = fuse_file_cached_io_start(file_inode(file), ff);
if (rc)
return rc;
}
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
......@@ -3296,6 +3310,7 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags)
INIT_LIST_HEAD(&fi->write_files);
INIT_LIST_HEAD(&fi->queued_writes);
fi->writectr = 0;
fi->iocachectr = 0;
init_waitqueue_head(&fi->page_waitq);
fi->writepages = RB_ROOT;
......
......@@ -111,7 +111,7 @@ struct fuse_inode {
u64 attr_version;
union {
/* Write related fields (regular file only) */
/* read/write io cache (regular file only) */
struct {
/* Files usable in writepage. Protected by fi->lock */
struct list_head write_files;
......@@ -123,6 +123,9 @@ struct fuse_inode {
* (FUSE_NOWRITE) means more writes are blocked */
int writectr;
/** Number of files/maps using page cache */
int iocachectr;
/* Waitq for writepage completion */
wait_queue_head_t page_waitq;
......@@ -187,6 +190,8 @@ enum {
FUSE_I_BAD,
/* Has btime */
FUSE_I_BTIME,
/* Wants or already has page cache IO */
FUSE_I_CACHE_IO_MODE,
};
struct fuse_conn;
......@@ -244,6 +249,9 @@ struct fuse_file {
/** Wait queue head for poll */
wait_queue_head_t poll_wait;
/** Does file hold a fi->iocachectr refcount? */
enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED } iomode;
/** Has flock been performed on this file? */
bool flock:1;
};
......@@ -1343,8 +1351,13 @@ int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa);
int fuse_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa);
/* file.c */
/* iomode.c */
int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff);
int fuse_file_io_open(struct file *file, struct inode *inode);
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode);
/* file.c */
struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
unsigned int open_flags, bool isdir);
void fuse_file_release(struct inode *inode, struct fuse_file *ff,
......
// SPDX-License-Identifier: GPL-2.0
/*
* FUSE inode io modes.
*
* Copyright (c) 2024 CTERA Networks.
*/
#include "fuse_i.h"
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fs.h>
/*
* Start cached io mode, where parallel dio writes are not allowed.
*/
int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
{
struct fuse_inode *fi = get_fuse_inode(inode);
int err = 0;
/* There are no io modes if server does not implement open */
if (!ff->release_args)
return 0;
spin_lock(&fi->lock);
if (fi->iocachectr < 0) {
err = -ETXTBSY;
goto unlock;
}
WARN_ON(ff->iomode == IOM_UNCACHED);
if (ff->iomode == IOM_NONE) {
ff->iomode = IOM_CACHED;
if (fi->iocachectr == 0)
set_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
fi->iocachectr++;
}
unlock:
spin_unlock(&fi->lock);
return err;
}
static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
{
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fi->lock);
WARN_ON(fi->iocachectr <= 0);
WARN_ON(ff->iomode != IOM_CACHED);
ff->iomode = IOM_NONE;
fi->iocachectr--;
if (fi->iocachectr == 0)
clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
spin_unlock(&fi->lock);
}
/* Start strictly uncached io mode where cache access is not allowed */
static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff)
{
struct fuse_inode *fi = get_fuse_inode(inode);
int err = 0;
spin_lock(&fi->lock);
if (fi->iocachectr > 0) {
err = -ETXTBSY;
goto unlock;
}
WARN_ON(ff->iomode != IOM_NONE);
fi->iocachectr--;
ff->iomode = IOM_UNCACHED;
unlock:
spin_unlock(&fi->lock);
return err;
}
static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
{
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fi->lock);
WARN_ON(fi->iocachectr >= 0);
WARN_ON(ff->iomode != IOM_UNCACHED);
ff->iomode = IOM_NONE;
fi->iocachectr++;
spin_unlock(&fi->lock);
}
/* Request access to submit new io to inode via open file */
int fuse_file_io_open(struct file *file, struct inode *inode)
{
struct fuse_file *ff = file->private_data;
int err;
/*
* io modes are not relevant with DAX and with server that does not
* implement open.
*/
if (FUSE_IS_DAX(inode) || !ff->release_args)
return 0;
/*
* FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO.
*/
if (!(ff->open_flags & FOPEN_DIRECT_IO))
ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES;
/*
* First parallel dio open denies caching inode io mode.
* First caching file open enters caching inode io mode.
*
* Note that if user opens a file open with O_DIRECT, but server did
* not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT,
* so we put the inode in caching mode to prevent parallel dio.
*/
if (ff->open_flags & FOPEN_DIRECT_IO) {
if (ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)
err = fuse_file_uncached_io_start(inode, ff);
else
return 0;
} else {
err = fuse_file_cached_io_start(inode, ff);
}
if (err)
goto fail;
return 0;
fail:
pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n",
ff->open_flags, err);
/*
* The file open mode determines the inode io mode.
* Using incorrect open mode is a server mistake, which results in
* user visible failure of open() with EIO error.
*/
return -EIO;
}
/* No more pending io and no new io possible to inode via open/mmapped file */
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
{
/*
* Last parallel dio close allows caching inode io mode.
* Last caching file close exits caching inode io mode.
*/
switch (ff->iomode) {
case IOM_NONE:
/* Nothing to do */
break;
case IOM_UNCACHED:
fuse_file_uncached_io_end(inode, ff);
break;
case IOM_CACHED:
fuse_file_cached_io_end(inode, ff);
break;
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment