Commit 202bc57b authored by David Howells's avatar David Howells

netfs: Don't use certain unnecessary folio_*() functions

Filesystems should use folio->index and folio->mapping, instead of
folio_index(folio), folio_mapping() and folio_file_mapping() since
they know that it's in the pagecache.

Change this automagically with:

perl -p -i -e 's/folio_mapping[(]([^)]*)[)]/\1->mapping/g' fs/netfs/*.c
perl -p -i -e 's/folio_file_mapping[(]([^)]*)[)]/\1->mapping/g' fs/netfs/*.c
perl -p -i -e 's/folio_index[(]([^)]*)[)]/\1->index/g' fs/netfs/*.c
Reported-by: default avatarMatthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-afs@lists.infradead.org
cc: linux-cachefs@redhat.com
cc: linux-cifs@vger.kernel.org
cc: linux-erofs@lists.ozlabs.org
cc: linux-fsdevel@vger.kernel.org
parent 6613476e
...@@ -101,7 +101,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) ...@@ -101,7 +101,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
} }
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio_index(folio) == rreq->no_unlock_folio && if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
_debug("no unlock"); _debug("no unlock");
else else
...@@ -246,13 +246,13 @@ EXPORT_SYMBOL(netfs_readahead); ...@@ -246,13 +246,13 @@ EXPORT_SYMBOL(netfs_readahead);
*/ */
int netfs_read_folio(struct file *file, struct folio *folio) int netfs_read_folio(struct file *file, struct folio *folio)
{ {
struct address_space *mapping = folio_file_mapping(folio); struct address_space *mapping = folio->mapping;
struct netfs_io_request *rreq; struct netfs_io_request *rreq;
struct netfs_inode *ctx = netfs_inode(mapping->host); struct netfs_inode *ctx = netfs_inode(mapping->host);
struct folio *sink = NULL; struct folio *sink = NULL;
int ret; int ret;
_enter("%lx", folio_index(folio)); _enter("%lx", folio->index);
rreq = netfs_alloc_request(mapping, file, rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio), folio_file_pos(folio), folio_size(folio),
...@@ -460,7 +460,7 @@ int netfs_write_begin(struct netfs_inode *ctx, ...@@ -460,7 +460,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
ret = PTR_ERR(rreq); ret = PTR_ERR(rreq);
goto error; goto error;
} }
rreq->no_unlock_folio = folio_index(folio); rreq->no_unlock_folio = folio->index;
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
ret = netfs_begin_cache_read(rreq, ctx); ret = netfs_begin_cache_read(rreq, ctx);
...@@ -518,7 +518,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, ...@@ -518,7 +518,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t offset, size_t len) size_t offset, size_t len)
{ {
struct netfs_io_request *rreq; struct netfs_io_request *rreq;
struct address_space *mapping = folio_file_mapping(folio); struct address_space *mapping = folio->mapping;
struct netfs_inode *ctx = netfs_inode(mapping->host); struct netfs_inode *ctx = netfs_inode(mapping->host);
unsigned long long start = folio_pos(folio); unsigned long long start = folio_pos(folio);
size_t flen = folio_size(folio); size_t flen = folio_size(folio);
...@@ -535,7 +535,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, ...@@ -535,7 +535,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
goto error; goto error;
} }
rreq->no_unlock_folio = folio_index(folio); rreq->no_unlock_folio = folio->index;
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
ret = netfs_begin_cache_read(rreq, ctx); ret = netfs_begin_cache_read(rreq, ctx);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
......
...@@ -343,7 +343,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -343,7 +343,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
break; break;
default: default:
WARN(true, "Unexpected modify type %u ix=%lx\n", WARN(true, "Unexpected modify type %u ix=%lx\n",
howto, folio_index(folio)); howto, folio->index);
ret = -EIO; ret = -EIO;
goto error_folio_unlock; goto error_folio_unlock;
} }
...@@ -648,7 +648,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq) ...@@ -648,7 +648,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq)
xas_for_each(&xas, folio, last) { xas_for_each(&xas, folio, last) {
WARN(!folio_test_writeback(folio), WARN(!folio_test_writeback(folio),
"bad %zx @%llx page %lx %lx\n", "bad %zx @%llx page %lx %lx\n",
wreq->len, wreq->start, folio_index(folio), last); wreq->len, wreq->start, folio->index, last);
if ((finfo = netfs_folio_info(folio))) { if ((finfo = netfs_folio_info(folio))) {
/* Streaming writes cannot be redirtied whilst under /* Streaming writes cannot be redirtied whilst under
...@@ -795,7 +795,7 @@ static void netfs_extend_writeback(struct address_space *mapping, ...@@ -795,7 +795,7 @@ static void netfs_extend_writeback(struct address_space *mapping,
continue; continue;
if (xa_is_value(folio)) if (xa_is_value(folio))
break; break;
if (folio_index(folio) != index) { if (folio->index != index) {
xas_reset(xas); xas_reset(xas);
break; break;
} }
...@@ -901,7 +901,7 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, ...@@ -901,7 +901,7 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
long count = wbc->nr_to_write; long count = wbc->nr_to_write;
int ret; int ret;
_enter(",%lx,%llx-%llx,%u", folio_index(folio), start, end, caching); _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
NETFS_WRITEBACK); NETFS_WRITEBACK);
...@@ -1047,7 +1047,7 @@ static ssize_t netfs_writepages_begin(struct address_space *mapping, ...@@ -1047,7 +1047,7 @@ static ssize_t netfs_writepages_begin(struct address_space *mapping,
start = folio_pos(folio); /* May regress with THPs */ start = folio_pos(folio); /* May regress with THPs */
_debug("wback %lx", folio_index(folio)); _debug("wback %lx", folio->index);
/* At this point we hold neither the i_pages lock nor the page lock: /* At this point we hold neither the i_pages lock nor the page lock:
* the page may be truncated or invalidated (changing page->mapping to * the page may be truncated or invalidated (changing page->mapping to
......
...@@ -124,7 +124,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, ...@@ -124,7 +124,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
/* We might have multiple writes from the same huge /* We might have multiple writes from the same huge
* folio, but we mustn't unlock a folio more than once. * folio, but we mustn't unlock a folio more than once.
*/ */
if (have_unlocked && folio_index(folio) <= unlocked) if (have_unlocked && folio->index <= unlocked)
continue; continue;
unlocked = folio_next_index(folio) - 1; unlocked = folio_next_index(folio) - 1;
trace_netfs_folio(folio, netfs_folio_trace_end_copy); trace_netfs_folio(folio, netfs_folio_trace_end_copy);
......
...@@ -180,7 +180,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) ...@@ -180,7 +180,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
struct netfs_folio *finfo = NULL; struct netfs_folio *finfo = NULL;
size_t flen = folio_size(folio); size_t flen = folio_size(folio);
_enter("{%lx},%zx,%zx", folio_index(folio), offset, length); _enter("{%lx},%zx,%zx", folio->index, offset, length);
folio_wait_fscache(folio); folio_wait_fscache(folio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment