Commit b4fa966f authored by David Howells's avatar David Howells Committed by Andrew Morton

mm, netfs, fscache: stop read optimisation when folio removed from pagecache

Fscache has an optimisation by which reads from the cache are skipped
until we know that (a) there's data there to be read and (b) that data
isn't entirely covered by pages resident in the netfs pagecache.  This is
done with two flags manipulated by fscache_note_page_release():

	if (...
	    test_bit(FSCACHE_COOKIE_HAVE_DATA, &cookie->flags) &&
	    test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags))
		clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);

where the NO_DATA_TO_READ flag causes cachefiles_prepare_read() to
indicate that netfslib should download from the server or clear the page
instead.

The fscache_note_page_release() function is intended to be called from
->releasepage() - but that only gets called if PG_private or PG_private_2
is set - and currently the former is at the discretion of the network
filesystem and the latter is only set whilst a page is being written to
the cache, so sometimes we miss clearing the optimisation.

Fix this by following Willy's suggestion[1] and adding an address_space
flag, AS_RELEASE_ALWAYS, that causes filemap_release_folio() to always call
->release_folio() if it's set, even if PG_private or PG_private_2 aren't
set.

Note that this would require folio_test_private() and page_has_private() to
become more complicated.  To avoid that, in the places[*] where these are
used to conditionalise calls to filemap_release_folio() and
try_to_release_page(), the tests are removed the those functions just
jumped to unconditionally and the test is performed there.

[*] There are some exceptions in vmscan.c where the check guards more than
just a call to the releaser.  I've added a function, folio_needs_release()
to wrap all the checks for that.

AS_RELEASE_ALWAYS should be set if a non-NULL cookie is obtained from
fscache and cleared in ->evict_inode() before truncate_inode_pages_final()
is called.

Additionally, the FSCACHE_COOKIE_NO_DATA_TO_READ flag needs to be cleared
and the optimisation cancelled if a cachefiles object already contains data
when we open it.

[dwysocha@redhat.com: call folio_mapping() inside folio_needs_release()]
  Link: https://github.com/DaveWysochanskiRH/kernel/commit/902c990e311120179fa5de99d68364b2947b79ec
Link: https://lkml.kernel.org/r/20230628104852.3391651-3-dhowells@redhat.com
Fixes: 1f67e6d0 ("fscache: Provide a function to note the release of a page")
Fixes: 047487c9 ("cachefiles: Implement the I/O routines")
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarDave Wysochanski <dwysocha@redhat.com>
Reported-by: default avatarRohith Surabattula <rohiths.msft@gmail.com>
Suggested-by: default avatarMatthew Wilcox <willy@infradead.org>
Tested-by: default avatarSeongJae Park <sj@kernel.org>
Cc: Daire Byrne <daire.byrne@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Steve French <sfrench@samba.org>
Cc: Shyam Prasad N <nspmangalore@gmail.com>
Cc: Rohith Surabattula <rohiths.msft@gmail.com>
Cc: Dave Wysochanski <dwysocha@redhat.com>
Cc: Dominique Martinet <asmadeus@codewreck.org>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Jingbo Xu <jefflexu@linux.alibaba.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Xiubo Li <xiubli@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0201ebf2
...@@ -68,6 +68,8 @@ void v9fs_cache_inode_get_cookie(struct inode *inode) ...@@ -68,6 +68,8 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
&path, sizeof(path), &path, sizeof(path),
&version, sizeof(version), &version, sizeof(version),
i_size_read(&v9inode->netfs.inode)); i_size_read(&v9inode->netfs.inode));
if (v9inode->netfs.cache)
mapping_set_release_always(inode->i_mapping);
p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n", p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
inode, v9fs_inode_cookie(v9inode)); inode, v9fs_inode_cookie(v9inode));
......
...@@ -681,6 +681,8 @@ static inline void afs_vnode_set_cache(struct afs_vnode *vnode, ...@@ -681,6 +681,8 @@ static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
{ {
#ifdef CONFIG_AFS_FSCACHE #ifdef CONFIG_AFS_FSCACHE
vnode->netfs.cache = cookie; vnode->netfs.cache = cookie;
if (cookie)
mapping_set_release_always(vnode->netfs.inode.i_mapping);
#endif #endif
} }
......
...@@ -585,6 +585,8 @@ static bool cachefiles_open_file(struct cachefiles_object *object, ...@@ -585,6 +585,8 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
if (ret < 0) if (ret < 0)
goto check_failed; goto check_failed;
clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
object->file = file; object->file = file;
/* Always update the atime on an object we've just looked up (this is /* Always update the atime on an object we've just looked up (this is
......
...@@ -36,6 +36,8 @@ void ceph_fscache_register_inode_cookie(struct inode *inode) ...@@ -36,6 +36,8 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
&ci->i_vino, sizeof(ci->i_vino), &ci->i_vino, sizeof(ci->i_vino),
&ci->i_version, sizeof(ci->i_version), &ci->i_version, sizeof(ci->i_version),
i_size_read(inode)); i_size_read(inode));
if (ci->netfs.cache)
mapping_set_release_always(inode->i_mapping);
} }
void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci) void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci)
......
...@@ -180,6 +180,9 @@ void nfs_fscache_init_inode(struct inode *inode) ...@@ -180,6 +180,9 @@ void nfs_fscache_init_inode(struct inode *inode)
&auxdata, /* aux_data */ &auxdata, /* aux_data */
sizeof(auxdata), sizeof(auxdata),
i_size_read(inode)); i_size_read(inode));
if (netfs_inode(inode)->cache)
mapping_set_release_always(inode->i_mapping);
} }
/* /*
......
...@@ -108,6 +108,8 @@ void cifs_fscache_get_inode_cookie(struct inode *inode) ...@@ -108,6 +108,8 @@ void cifs_fscache_get_inode_cookie(struct inode *inode)
&cifsi->uniqueid, sizeof(cifsi->uniqueid), &cifsi->uniqueid, sizeof(cifsi->uniqueid),
&cd, sizeof(cd), &cd, sizeof(cd),
i_size_read(&cifsi->netfs.inode)); i_size_read(&cifsi->netfs.inode));
if (cifsi->netfs.cache)
mapping_set_release_always(inode->i_mapping);
} }
void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
......
...@@ -203,6 +203,7 @@ enum mapping_flags { ...@@ -203,6 +203,7 @@ enum mapping_flags {
/* writeback related tags are not used */ /* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5, AS_NO_WRITEBACK_TAGS = 5,
AS_LARGE_FOLIO_SUPPORT = 6, AS_LARGE_FOLIO_SUPPORT = 6,
AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
}; };
/** /**
...@@ -273,6 +274,21 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping) ...@@ -273,6 +274,21 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping)
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
} }
static inline bool mapping_release_always(const struct address_space *mapping)
{
return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
}
static inline void mapping_set_release_always(struct address_space *mapping)
{
set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
}
static inline void mapping_clear_release_always(struct address_space *mapping)
{
clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
}
static inline gfp_t mapping_gfp_mask(struct address_space * mapping) static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{ {
return mapping->gfp_mask; return mapping->gfp_mask;
......
...@@ -181,7 +181,10 @@ static inline void set_page_refcounted(struct page *page) ...@@ -181,7 +181,10 @@ static inline void set_page_refcounted(struct page *page)
*/ */
static inline bool folio_needs_release(struct folio *folio) static inline bool folio_needs_release(struct folio *folio)
{ {
return folio_has_private(folio); struct address_space *mapping = folio_mapping(folio);
return folio_has_private(folio) ||
(mapping && mapping_release_always(mapping));
} }
extern unsigned long highest_memmap_pfn; extern unsigned long highest_memmap_pfn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment