Commit c53954a0 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: remove lru parameter from __lru_cache_add and lru_cache_add_lru

Similar to __pagevec_lru_add, this patch removes the LRU parameter from
__lru_cache_add and lru_cache_add_lru as the caller does not control the
exact LRU the page gets added to.  lru_cache_add_lru gets renamed to
lru_cache_add the name is silly without the lru parameter.  With the
parameter removed, it is required that the caller indicate if they want
the page added to the active or inactive list by setting or clearing
PageActive respectively.

[akpm@linux-foundation.org: Suggested the patch]
[gang.chen@asianux.com: fix used-unintialized warning]
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarChen Gang <gang.chen@asianux.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Alexey Lyahkov <alexey.lyashkov@gmail.com>
Cc: Andrew Perepechko <anserper@ya.ru>
Cc: Robin Dong <sanbai@taobao.com>
Cc: Theodore Tso <tytso@mit.edu>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Bernd Schubert <bernd.schubert@fastmail.fm>
Cc: David Howells <dhowells@redhat.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a0b8cab3
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/node.h> #include <linux/node.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/page-flags.h>
#include <asm/page.h> #include <asm/page.h>
struct notifier_block; struct notifier_block;
...@@ -233,8 +234,8 @@ extern unsigned long nr_free_pagecache_pages(void); ...@@ -233,8 +234,8 @@ extern unsigned long nr_free_pagecache_pages(void);
/* linux/mm/swap.c */ /* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru); extern void __lru_cache_add(struct page *);
extern void lru_cache_add_lru(struct page *, enum lru_list lru); extern void lru_cache_add(struct page *);
extern void lru_add_page_tail(struct page *page, struct page *page_tail, extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *head); struct lruvec *lruvec, struct list_head *head);
extern void activate_page(struct page *); extern void activate_page(struct page *);
...@@ -254,12 +255,14 @@ extern void add_page_to_unevictable_list(struct page *page); ...@@ -254,12 +255,14 @@ extern void add_page_to_unevictable_list(struct page *page);
*/ */
static inline void lru_cache_add_anon(struct page *page) static inline void lru_cache_add_anon(struct page *page)
{ {
__lru_cache_add(page, LRU_INACTIVE_ANON); ClearPageActive(page);
__lru_cache_add(page);
} }
static inline void lru_cache_add_file(struct page *page) static inline void lru_cache_add_file(struct page *page)
{ {
__lru_cache_add(page, LRU_INACTIVE_FILE); ClearPageActive(page);
__lru_cache_add(page);
} }
/* linux/mm/vmscan.c */ /* linux/mm/vmscan.c */
......
...@@ -1093,9 +1093,10 @@ void page_add_new_anon_rmap(struct page *page, ...@@ -1093,9 +1093,10 @@ void page_add_new_anon_rmap(struct page *page,
else else
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__page_set_anon_rmap(page, vma, address, 1); __page_set_anon_rmap(page, vma, address, 1);
if (!mlocked_vma_newpage(vma, page)) if (!mlocked_vma_newpage(vma, page)) {
lru_cache_add_lru(page, LRU_ACTIVE_ANON); SetPageActive(page);
else lru_cache_add(page);
} else
add_page_to_unevictable_list(page); add_page_to_unevictable_list(page);
} }
......
...@@ -494,15 +494,10 @@ EXPORT_SYMBOL(mark_page_accessed); ...@@ -494,15 +494,10 @@ EXPORT_SYMBOL(mark_page_accessed);
* pagevec is drained. This gives a chance for the caller of __lru_cache_add() * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
* have the page added to the active list using mark_page_accessed(). * have the page added to the active list using mark_page_accessed().
*/ */
void __lru_cache_add(struct page *page, enum lru_list lru) void __lru_cache_add(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(lru_add_pvec); struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
if (is_active_lru(lru))
SetPageActive(page);
else
ClearPageActive(page);
page_cache_get(page); page_cache_get(page);
if (!pagevec_space(pvec)) if (!pagevec_space(pvec))
__pagevec_lru_add(pvec); __pagevec_lru_add(pvec);
...@@ -512,11 +507,10 @@ void __lru_cache_add(struct page *page, enum lru_list lru) ...@@ -512,11 +507,10 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
EXPORT_SYMBOL(__lru_cache_add); EXPORT_SYMBOL(__lru_cache_add);
/** /**
* lru_cache_add_lru - add a page to a page list * lru_cache_add - add a page to a page list
* @page: the page to be added to the LRU. * @page: the page to be added to the LRU.
* @lru: the LRU list to which the page is added.
*/ */
void lru_cache_add_lru(struct page *page, enum lru_list lru) void lru_cache_add(struct page *page)
{ {
if (PageActive(page)) { if (PageActive(page)) {
VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageUnevictable(page));
...@@ -525,7 +519,7 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru) ...@@ -525,7 +519,7 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
} }
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
__lru_cache_add(page, lru); __lru_cache_add(page);
} }
/** /**
...@@ -745,6 +739,9 @@ void release_pages(struct page **pages, int nr, int cold) ...@@ -745,6 +739,9 @@ void release_pages(struct page **pages, int nr, int cold)
del_page_from_lru_list(page, lruvec, page_off_lru(page)); del_page_from_lru_list(page, lruvec, page_off_lru(page));
} }
/* Clear Active bit in case of parallel mark_page_accessed */
ClearPageActive(page);
list_add(&page->lru, &pages_to_free); list_add(&page->lru, &pages_to_free);
} }
if (zone) if (zone)
......
...@@ -546,7 +546,6 @@ int remove_mapping(struct address_space *mapping, struct page *page) ...@@ -546,7 +546,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
void putback_lru_page(struct page *page) void putback_lru_page(struct page *page)
{ {
int lru; int lru;
int active = !!TestClearPageActive(page);
int was_unevictable = PageUnevictable(page); int was_unevictable = PageUnevictable(page);
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
...@@ -561,8 +560,8 @@ void putback_lru_page(struct page *page) ...@@ -561,8 +560,8 @@ void putback_lru_page(struct page *page)
* unevictable page on [in]active list. * unevictable page on [in]active list.
* We know how to handle that. * We know how to handle that.
*/ */
lru = active + page_lru_base_type(page); lru = page_lru_base_type(page);
lru_cache_add_lru(page, lru); lru_cache_add(page);
} else { } else {
/* /*
* Put unevictable pages directly on zone's unevictable * Put unevictable pages directly on zone's unevictable
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment