Commit 709fe09e authored by Jingbo Xu's avatar Jingbo Xu Committed by Gao Xiang

erofs: switch to prepare_ondemand_read() in fscache mode

Switch to prepare_ondemand_read() interface and a self-contained request
completion to get rid of netfs_io_[request|subrequest].

The whole request will still be split into slices (subrequest) according
to the cache state of the backing file.  As long as one of the
subrequests fails, the whole request will be marked as failed.
Reviewed-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Signed-off-by: default avatarJingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: default avatarJia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20221124034212.81892-3-jefflexu@linux.alibaba.comSigned-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
parent 86692475
...@@ -11,257 +11,180 @@ static DEFINE_MUTEX(erofs_domain_cookies_lock); ...@@ -11,257 +11,180 @@ static DEFINE_MUTEX(erofs_domain_cookies_lock);
static LIST_HEAD(erofs_domain_list); static LIST_HEAD(erofs_domain_list);
static struct vfsmount *erofs_pseudo_mnt; static struct vfsmount *erofs_pseudo_mnt;
static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping, struct erofs_fscache_request {
struct netfs_cache_resources cache_resources;
struct address_space *mapping; /* The mapping being accessed */
loff_t start; /* Start position */
size_t len; /* Length of the request */
size_t submitted; /* Length of submitted */
short error; /* 0 or error that occurred */
refcount_t ref;
};
static struct erofs_fscache_request *erofs_fscache_req_alloc(struct address_space *mapping,
loff_t start, size_t len) loff_t start, size_t len)
{ {
struct netfs_io_request *rreq; struct erofs_fscache_request *req;
rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL); req = kzalloc(sizeof(struct erofs_fscache_request), GFP_KERNEL);
if (!rreq) if (!req)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
rreq->start = start; req->mapping = mapping;
rreq->len = len; req->start = start;
rreq->mapping = mapping; req->len = len;
rreq->inode = mapping->host; refcount_set(&req->ref, 1);
INIT_LIST_HEAD(&rreq->subrequests);
refcount_set(&rreq->ref, 1);
return rreq;
}
static void erofs_fscache_put_request(struct netfs_io_request *rreq)
{
if (!refcount_dec_and_test(&rreq->ref))
return;
if (rreq->cache_resources.ops)
rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
kfree(rreq);
}
static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
{
if (!refcount_dec_and_test(&subreq->ref))
return;
erofs_fscache_put_request(subreq->rreq);
kfree(subreq);
}
static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq) return req;
{
struct netfs_io_subrequest *subreq;
while (!list_empty(&rreq->subrequests)) {
subreq = list_first_entry(&rreq->subrequests,
struct netfs_io_subrequest, rreq_link);
list_del(&subreq->rreq_link);
erofs_fscache_put_subrequest(subreq);
}
} }
static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq) static void erofs_fscache_req_complete(struct erofs_fscache_request *req)
{ {
struct netfs_io_subrequest *subreq;
struct folio *folio; struct folio *folio;
unsigned int iopos = 0; bool failed = req->error;
pgoff_t start_page = rreq->start / PAGE_SIZE; pgoff_t start_page = req->start / PAGE_SIZE;
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; pgoff_t last_page = ((req->start + req->len) / PAGE_SIZE) - 1;
bool subreq_failed = false;
XA_STATE(xas, &rreq->mapping->i_pages, start_page); XA_STATE(xas, &req->mapping->i_pages, start_page);
subreq = list_first_entry(&rreq->subrequests,
struct netfs_io_subrequest, rreq_link);
subreq_failed = (subreq->error < 0);
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, folio, last_page) { xas_for_each(&xas, folio, last_page) {
unsigned int pgpos, pgend;
bool pg_failed = false;
if (xas_retry(&xas, folio)) if (xas_retry(&xas, folio))
continue; continue;
if (!failed)
pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
pgend = pgpos + folio_size(folio);
for (;;) {
if (!subreq) {
pg_failed = true;
break;
}
pg_failed |= subreq_failed;
if (pgend < iopos + subreq->len)
break;
iopos += subreq->len;
if (!list_is_last(&subreq->rreq_link,
&rreq->subrequests)) {
subreq = list_next_entry(subreq, rreq_link);
subreq_failed = (subreq->error < 0);
} else {
subreq = NULL;
subreq_failed = false;
}
if (pgend == iopos)
break;
}
if (!pg_failed)
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
folio_unlock(folio); folio_unlock(folio);
} }
rcu_read_unlock(); rcu_read_unlock();
if (req->cache_resources.ops)
req->cache_resources.ops->end_operation(&req->cache_resources);
kfree(req);
} }
static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq) static void erofs_fscache_req_put(struct erofs_fscache_request *req)
{ {
erofs_fscache_rreq_unlock_folios(rreq); if (refcount_dec_and_test(&req->ref))
erofs_fscache_clear_subrequests(rreq); erofs_fscache_req_complete(req);
erofs_fscache_put_request(rreq);
} }
static void erofc_fscache_subreq_complete(void *priv, static void erofs_fscache_subreq_complete(void *priv,
ssize_t transferred_or_error, bool was_async) ssize_t transferred_or_error, bool was_async)
{ {
struct netfs_io_subrequest *subreq = priv; struct erofs_fscache_request *req = priv;
struct netfs_io_request *rreq = subreq->rreq;
if (IS_ERR_VALUE(transferred_or_error)) if (IS_ERR_VALUE(transferred_or_error))
subreq->error = transferred_or_error; req->error = transferred_or_error;
erofs_fscache_req_put(req);
if (atomic_dec_and_test(&rreq->nr_outstanding))
erofs_fscache_rreq_complete(rreq);
erofs_fscache_put_subrequest(subreq);
} }
/* /*
* Read data from fscache and fill the read data into page cache described by * Read data from fscache (cookie, pstart, len), and fill the read data into
* @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes * page cache described by (req->mapping, lstart, len). @pstart describeis the
* the start physical address in the cache file. * start physical address in the cache file.
*/ */
static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie, static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
struct netfs_io_request *rreq, loff_t pstart) struct erofs_fscache_request *req, loff_t pstart, size_t len)
{ {
enum netfs_io_source source; enum netfs_io_source source;
struct super_block *sb = rreq->mapping->host->i_sb; struct super_block *sb = req->mapping->host->i_sb;
struct netfs_io_subrequest *subreq; struct netfs_cache_resources *cres = &req->cache_resources;
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct iov_iter iter; struct iov_iter iter;
loff_t start = rreq->start; loff_t lstart = req->start + req->submitted;
size_t len = rreq->len;
size_t done = 0; size_t done = 0;
int ret; int ret;
atomic_set(&rreq->nr_outstanding, 1); DBG_BUGON(len > req->len - req->submitted);
ret = fscache_begin_read_operation(cres, cookie); ret = fscache_begin_read_operation(cres, cookie);
if (ret) if (ret)
goto out; return ret;
while (done < len) { while (done < len) {
subreq = kzalloc(sizeof(struct netfs_io_subrequest), loff_t sstart = pstart + done;
GFP_KERNEL); size_t slen = len - done;
if (subreq) { unsigned long flags = 1 << NETFS_SREQ_ONDEMAND;
INIT_LIST_HEAD(&subreq->rreq_link);
refcount_set(&subreq->ref, 2);
subreq->rreq = rreq;
refcount_inc(&rreq->ref);
} else {
ret = -ENOMEM;
goto out;
}
subreq->start = pstart + done;
subreq->len = len - done;
subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
list_add_tail(&subreq->rreq_link, &rreq->subrequests); source = cres->ops->prepare_ondemand_read(cres,
sstart, &slen, LLONG_MAX, &flags, 0);
source = cres->ops->prepare_read(subreq, LLONG_MAX); if (WARN_ON(slen == 0))
if (WARN_ON(subreq->len == 0))
source = NETFS_INVALID_READ; source = NETFS_INVALID_READ;
if (source != NETFS_READ_FROM_CACHE) { if (source != NETFS_READ_FROM_CACHE) {
erofs_err(sb, "failed to fscache prepare_read (source %d)", erofs_err(sb, "failed to fscache prepare_read (source %d)", source);
source); return -EIO;
ret = -EIO;
subreq->error = ret;
erofs_fscache_put_subrequest(subreq);
goto out;
} }
atomic_inc(&rreq->nr_outstanding); refcount_inc(&req->ref);
iov_iter_xarray(&iter, READ, &req->mapping->i_pages,
lstart + done, slen);
iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, ret = fscache_read(cres, sstart, &iter, NETFS_READ_HOLE_FAIL,
start + done, subreq->len); erofs_fscache_subreq_complete, req);
ret = fscache_read(cres, subreq->start, &iter,
NETFS_READ_HOLE_FAIL,
erofc_fscache_subreq_complete, subreq);
if (ret == -EIOCBQUEUED) if (ret == -EIOCBQUEUED)
ret = 0; ret = 0;
if (ret) { if (ret) {
erofs_err(sb, "failed to fscache_read (ret %d)", ret); erofs_err(sb, "failed to fscache_read (ret %d)", ret);
goto out; return ret;
} }
done += subreq->len; done += slen;
} }
out: DBG_BUGON(done != len);
if (atomic_dec_and_test(&rreq->nr_outstanding)) req->submitted += len;
erofs_fscache_rreq_complete(rreq); return 0;
return ret;
} }
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
{ {
int ret; int ret;
struct super_block *sb = folio_mapping(folio)->host->i_sb; struct super_block *sb = folio_mapping(folio)->host->i_sb;
struct netfs_io_request *rreq; struct erofs_fscache_request *req;
struct erofs_map_dev mdev = { struct erofs_map_dev mdev = {
.m_deviceid = 0, .m_deviceid = 0,
.m_pa = folio_pos(folio), .m_pa = folio_pos(folio),
}; };
ret = erofs_map_dev(sb, &mdev); ret = erofs_map_dev(sb, &mdev);
if (ret) if (ret) {
goto out; folio_unlock(folio);
return ret;
}
rreq = erofs_fscache_alloc_request(folio_mapping(folio), req = erofs_fscache_req_alloc(folio_mapping(folio),
folio_pos(folio), folio_size(folio)); folio_pos(folio), folio_size(folio));
if (IS_ERR(rreq)) { if (IS_ERR(req)) {
ret = PTR_ERR(rreq); folio_unlock(folio);
goto out; return PTR_ERR(req);
} }
return erofs_fscache_read_folios_async(mdev.m_fscache->cookie, ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
rreq, mdev.m_pa); req, mdev.m_pa, folio_size(folio));
out: if (ret)
folio_unlock(folio); req->error = ret;
erofs_fscache_req_put(req);
return ret; return ret;
} }
/* /*
* Read into page cache in the range described by (@pos, @len). * Read into page cache in the range described by (@pos, @len).
* *
* On return, the caller is responsible for page unlocking if the output @unlock * On return, if the output @unlock is true, the caller is responsible for page
* is true, or the callee will take this responsibility through netfs_io_request * unlocking; otherwise the callee will take this responsibility through request
* interface. * completion.
* *
* The return value is the number of bytes successfully handled, or negative * The return value is the number of bytes successfully handled, or negative
* error code on failure. The only exception is that, the length of the range * error code on failure. The only exception is that, the length of the range
* instead of the error code is returned on failure after netfs_io_request is * instead of the error code is returned on failure after request is allocated,
* allocated, so that .readahead() could advance rac accordingly. * so that .readahead() could advance rac accordingly.
*/ */
static int erofs_fscache_data_read(struct address_space *mapping, static int erofs_fscache_data_read(struct address_space *mapping,
loff_t pos, size_t len, bool *unlock) loff_t pos, size_t len, bool *unlock)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct netfs_io_request *rreq; struct erofs_fscache_request *req;
struct erofs_map_blocks map; struct erofs_map_blocks map;
struct erofs_map_dev mdev; struct erofs_map_dev mdev;
struct iov_iter iter; struct iov_iter iter;
...@@ -318,13 +241,17 @@ static int erofs_fscache_data_read(struct address_space *mapping, ...@@ -318,13 +241,17 @@ static int erofs_fscache_data_read(struct address_space *mapping,
if (ret) if (ret)
return ret; return ret;
rreq = erofs_fscache_alloc_request(mapping, pos, count); req = erofs_fscache_req_alloc(mapping, pos, count);
if (IS_ERR(rreq)) if (IS_ERR(req))
return PTR_ERR(rreq); return PTR_ERR(req);
*unlock = false; *unlock = false;
erofs_fscache_read_folios_async(mdev.m_fscache->cookie, ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
rreq, mdev.m_pa + (pos - map.m_la)); req, mdev.m_pa + (pos - map.m_la), count);
if (ret)
req->error = ret;
erofs_fscache_req_put(req);
return count; return count;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment