mm: Add folio_put()

If we know we have a folio, we can call folio_put() instead of put_page()
and save the overhead of calling compound_head().  Also skips the
devmap checks.

This commit looks like it should be a no-op, but actually saves 684 bytes
of text with the distro-derived config that I'm testing.  Some functions
grow a little while others shrink.  I presume the compiler is making
different inlining decisions.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarJeff Layton <jlayton@kernel.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
parent c24016ac
...@@ -748,6 +748,11 @@ static inline int put_page_testzero(struct page *page) ...@@ -748,6 +748,11 @@ static inline int put_page_testzero(struct page *page)
return page_ref_dec_and_test(page); return page_ref_dec_and_test(page);
} }
static inline int folio_put_testzero(struct folio *folio)
{
return put_page_testzero(&folio->page);
}
/* /*
* Try to grab a ref unless the page has a refcount of zero, return false if * Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case. * that is the case.
...@@ -1247,9 +1252,28 @@ static inline __must_check bool try_get_page(struct page *page) ...@@ -1247,9 +1252,28 @@ static inline __must_check bool try_get_page(struct page *page)
return true; return true;
} }
/**
* folio_put - Decrement the reference count on a folio.
* @folio: The folio.
*
* If the folio's reference count reaches zero, the memory will be
* released back to the page allocator and may be used by another
* allocation immediately. Do not access the memory or the struct folio
* after calling folio_put() unless you can be sure that it wasn't the
* last reference.
*
* Context: May be called in process or interrupt context, but not in NMI
* context. May be called while holding a spinlock.
*/
static inline void folio_put(struct folio *folio)
{
if (folio_put_testzero(folio))
__put_page(&folio->page);
}
static inline void put_page(struct page *page) static inline void put_page(struct page *page)
{ {
page = compound_head(page); struct folio *folio = page_folio(page);
/* /*
* For devmap managed pages we need to catch refcount transition from * For devmap managed pages we need to catch refcount transition from
...@@ -1257,13 +1281,12 @@ static inline void put_page(struct page *page) ...@@ -1257,13 +1281,12 @@ static inline void put_page(struct page *page)
* need to inform the device driver through callback. See * need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details. * include/linux/memremap.h and HMM for details.
*/ */
if (page_is_devmap_managed(page)) { if (page_is_devmap_managed(&folio->page)) {
put_devmap_managed_page(page); put_devmap_managed_page(&folio->page);
return; return;
} }
if (put_page_testzero(page)) folio_put(folio);
__put_page(page);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment