Commit 6e1ca48d authored by Vishal Moola (Oracle)'s avatar Vishal Moola (Oracle) Committed by Andrew Morton

folio-compat: remove lru_cache_add()

There are no longer any callers of lru_cache_add(), so remove it.  This
saves 79 bytes of kernel text.  Also cleanup some comments such that
they reference the new folio_add_lru() instead.

Link: https://lkml.kernel.org/r/20221101175326.13265-6-vishal.moola@gmail.comSigned-off-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 284a344e
...@@ -381,7 +381,6 @@ void lru_note_cost(struct lruvec *lruvec, bool file, ...@@ -381,7 +381,6 @@ void lru_note_cost(struct lruvec *lruvec, bool file,
void lru_note_cost_refault(struct folio *); void lru_note_cost_refault(struct folio *);
void folio_add_lru(struct folio *); void folio_add_lru(struct folio *);
void folio_add_lru_vma(struct folio *, struct vm_area_struct *); void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
void lru_cache_add(struct page *);
void mark_page_accessed(struct page *); void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *); void folio_mark_accessed(struct folio *);
......
...@@ -76,12 +76,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc, ...@@ -76,12 +76,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
} }
EXPORT_SYMBOL(redirty_page_for_writepage); EXPORT_SYMBOL(redirty_page_for_writepage);
void lru_cache_add(struct page *page)
{
folio_add_lru(page_folio(page));
}
EXPORT_SYMBOL(lru_cache_add);
void lru_cache_add_inactive_or_unevictable(struct page *page, void lru_cache_add_inactive_or_unevictable(struct page *page,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
......
...@@ -565,7 +565,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages); ...@@ -565,7 +565,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
* refcount. We do this because invalidate_inode_pages2() needs stronger * refcount. We do this because invalidate_inode_pages2() needs stronger
* invalidation guarantees, and cannot afford to leave pages behind because * invalidation guarantees, and cannot afford to leave pages behind because
* shrink_page_list() has a temp ref on them, or because they're transiently * shrink_page_list() has a temp ref on them, or because they're transiently
* sitting in the lru_cache_add() pagevecs. * sitting in the folio_add_lru() pagevecs.
*/ */
static int invalidate_complete_folio2(struct address_space *mapping, static int invalidate_complete_folio2(struct address_space *mapping,
struct folio *folio) struct folio *folio)
......
...@@ -492,7 +492,10 @@ void workingset_refault(struct folio *folio, void *shadow) ...@@ -492,7 +492,10 @@ void workingset_refault(struct folio *folio, void *shadow)
/* Folio was active prior to eviction */ /* Folio was active prior to eviction */
if (workingset) { if (workingset) {
folio_set_workingset(folio); folio_set_workingset(folio);
/* XXX: Move to lru_cache_add() when it supports new vs putback */ /*
* XXX: Move to folio_add_lru() when it supports new vs
* putback
*/
lru_note_cost_refault(folio); lru_note_cost_refault(folio);
mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment