Commit b68b10b6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'folio-5.17a' of git://git.infradead.org/users/willy/pagecache

Pull more folio updates from Matthew Wilcox:
 "Three small folio patches.

  One bug fix, one patch pulled forward from the patches destined for
  5.18 and then a patch to make use of that functionality"

* tag 'folio-5.17a' of git://git.infradead.org/users/willy/pagecache:
  filemap: Use folio_put_refs() in filemap_free_folio()
  mm: Add folio_put_refs()
  pagevec: Initialise folio_batch->percpu_pvec_drained
parents 369af20a 3abb28e2
...@@ -1199,6 +1199,26 @@ static inline void folio_put(struct folio *folio) ...@@ -1199,6 +1199,26 @@ static inline void folio_put(struct folio *folio)
__put_page(&folio->page); __put_page(&folio->page);
} }
/**
* folio_put_refs - Reduce the reference count on a folio.
* @folio: The folio.
* @refs: The amount to subtract from the folio's reference count.
*
* If the folio's reference count reaches zero, the memory will be
* released back to the page allocator and may be used by another
* allocation immediately. Do not access the memory or the struct folio
* after calling folio_put_refs() unless you can be sure that these weren't
* the last references.
*
* Context: May be called in process or interrupt context, but not in NMI
* context. May be called while holding a spinlock.
*/
static inline void folio_put_refs(struct folio *folio, int refs)
{
if (folio_ref_sub_and_test(folio, refs))
__put_page(&folio->page);
}
static inline void put_page(struct page *page) static inline void put_page(struct page *page)
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
......
...@@ -111,6 +111,7 @@ static_assert(offsetof(struct pagevec, pages) == ...@@ -111,6 +111,7 @@ static_assert(offsetof(struct pagevec, pages) ==
static inline void folio_batch_init(struct folio_batch *fbatch) static inline void folio_batch_init(struct folio_batch *fbatch)
{ {
fbatch->nr = 0; fbatch->nr = 0;
fbatch->percpu_pvec_drained = false;
} }
static inline unsigned int folio_batch_count(struct folio_batch *fbatch) static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
......
...@@ -231,17 +231,15 @@ void __filemap_remove_folio(struct folio *folio, void *shadow) ...@@ -231,17 +231,15 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
void filemap_free_folio(struct address_space *mapping, struct folio *folio) void filemap_free_folio(struct address_space *mapping, struct folio *folio)
{ {
void (*freepage)(struct page *); void (*freepage)(struct page *);
int refs = 1;
freepage = mapping->a_ops->freepage; freepage = mapping->a_ops->freepage;
if (freepage) if (freepage)
freepage(&folio->page); freepage(&folio->page);
if (folio_test_large(folio) && !folio_test_hugetlb(folio)) { if (folio_test_large(folio) && !folio_test_hugetlb(folio))
folio_ref_sub(folio, folio_nr_pages(folio)); refs = folio_nr_pages(folio);
VM_BUG_ON_FOLIO(folio_ref_count(folio) <= 0, folio); folio_put_refs(folio, refs);
} else {
folio_put(folio);
}
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment