Commit 5fb70e72 authored by David Howells's avatar David Howells

netfs, 9p: Implement helpers for new write code

Implement the helpers for the new write code in 9p.  There's now an
optional ->prepare_write() that allows the filesystem to set the parameters
for the next write, such as maximum size and maximum segment count, and an
->issue_write() that is called to initiate an (asynchronous) write
operation.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: v9fs@lists.linux.dev
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
parent ed22e1db
......@@ -26,6 +26,40 @@
#include "cache.h"
#include "fid.h"
/*
* Writeback calls this when it finds a folio that needs uploading. This isn't
* called if writeback only has copy-to-cache to deal with.
*/
static void v9fs_begin_writeback(struct netfs_io_request *wreq)
{
struct p9_fid *fid;
fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
if (!fid) {
WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
wreq->inode->i_ino);
return;
}
wreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
if (fid->iounit)
wreq->wsize = min(wreq->wsize, fid->iounit);
wreq->netfs_priv = fid;
wreq->io_streams[0].avail = true;
}
/*
* Issue a subrequest to write to the server.
*/
static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
{
struct p9_fid *fid = subreq->rreq->netfs_priv;
int err, len;
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
netfs_write_subrequest_terminated(subreq, len ?: err, false);
}
static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq)
{
struct p9_fid *fid = subreq->rreq->netfs_priv;
......@@ -92,6 +126,14 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
rreq->origin == NETFS_UNBUFFERED_WRITE ||
rreq->origin == NETFS_DIO_WRITE);
#if 0 // TODO: Cut over
if (rreq->origin == NETFS_WRITEBACK)
return 0; /* We don't get the write handle until we find we
* have actually dirty data and not just
* copy-to-cache data.
*/
#endif
if (file) {
fid = file->private_data;
if (!fid)
......@@ -103,6 +145,10 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
goto no_fid;
}
rreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
if (fid->iounit)
rreq->wsize = min(rreq->wsize, fid->iounit);
/* we might need to read from a fid that was opened write-only
* for read-modify-write of page cache, use the writeback fid
* for that */
......@@ -131,6 +177,8 @@ const struct netfs_request_ops v9fs_req_ops = {
.init_request = v9fs_init_request,
.free_request = v9fs_free_request,
.issue_read = v9fs_issue_read,
.begin_writeback = v9fs_begin_writeback,
.issue_write = v9fs_issue_write,
.create_write_requests = v9fs_create_write_requests,
};
......
......@@ -207,6 +207,8 @@ int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err
int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
int *err);
int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
struct netfs_io_subrequest;
void p9_client_write_subreq(struct netfs_io_subrequest *subreq);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent);
......
......@@ -5,6 +5,7 @@
menuconfig NET_9P
tristate "Plan 9 Resource Sharing Support (9P2000)"
select NETFS_SUPPORT
help
If you say Y here, you will get experimental support for
Plan 9 resource sharing via the 9P2000 protocol.
......
......@@ -18,6 +18,7 @@
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <linux/netfs.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
......@@ -1661,6 +1662,54 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
}
EXPORT_SYMBOL(p9_client_write);
void
p9_client_write_subreq(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *wreq = subreq->rreq;
struct p9_fid *fid = wreq->netfs_priv;
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
unsigned long long start = subreq->start + subreq->transferred;
int written, len = subreq->len - subreq->transferred;
int err;
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu len %d\n",
fid->fid, start, len);
/* Don't bother zerocopy for small IO (< 1024) */
if (clnt->trans_mod->zc_request && len > 1024) {
req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, &subreq->io_iter,
0, wreq->len, P9_ZC_HDR_SZ, "dqd",
fid->fid, start, len);
} else {
req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid,
start, len, &subreq->io_iter);
}
if (IS_ERR(req)) {
netfs_write_subrequest_terminated(subreq, PTR_ERR(req), false);
return;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &written);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
netfs_write_subrequest_terminated(subreq, err, false);
return;
}
if (written > len) {
pr_err("bogus RWRITE count (%d > %u)\n", written, len);
written = len;
}
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len);
p9_req_put(clnt, req);
netfs_write_subrequest_terminated(subreq, written, false);
}
EXPORT_SYMBOL(p9_client_write_subreq);
struct p9_wstat *p9_client_stat(struct p9_fid *fid)
{
int err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment