Commit b5934c53 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: add_active_or_unevictable into rmap

lru_cache_add_active_or_unevictable() and page_add_new_anon_rmap() always
appear together.  Save some symbol table space and some jumping around by
removing lru_cache_add_active_or_unevictable(), folding its code into
page_add_new_anon_rmap(): like how we add file pages to lru just after
adding them to page cache.

Remove the nearby "TODO: is this safe?" comments (yes, it is safe), and
change page_add_new_anon_rmap()'s address BUG_ON to VM_BUG_ON as
originally intended.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 51726b12
...@@ -174,8 +174,6 @@ extern unsigned int nr_free_pagecache_pages(void); ...@@ -174,8 +174,6 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */ /* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru); extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru); extern void lru_cache_add_lru(struct page *, enum lru_list lru);
extern void lru_cache_add_active_or_unevictable(struct page *,
struct vm_area_struct *);
extern void activate_page(struct page *); extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *); extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void); extern void lru_add_drain(void);
......
...@@ -1949,10 +1949,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1949,10 +1949,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/ */
ptep_clear_flush_notify(vma, address, page_table); ptep_clear_flush_notify(vma, address, page_table);
SetPageSwapBacked(new_page); SetPageSwapBacked(new_page);
lru_cache_add_active_or_unevictable(new_page, vma);
page_add_new_anon_rmap(new_page, vma, address); page_add_new_anon_rmap(new_page, vma, address);
//TODO: is this safe? do_anonymous_page() does it this way.
set_pte_at(mm, address, page_table, entry); set_pte_at(mm, address, page_table, entry);
update_mmu_cache(vma, address, entry); update_mmu_cache(vma, address, entry);
if (old_page) { if (old_page) {
...@@ -2448,7 +2445,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2448,7 +2445,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto release; goto release;
inc_mm_counter(mm, anon_rss); inc_mm_counter(mm, anon_rss);
SetPageSwapBacked(page); SetPageSwapBacked(page);
lru_cache_add_active_or_unevictable(page, vma);
page_add_new_anon_rmap(page, vma, address); page_add_new_anon_rmap(page, vma, address);
set_pte_at(mm, address, page_table, entry); set_pte_at(mm, address, page_table, entry);
...@@ -2597,7 +2593,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2597,7 +2593,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (anon) { if (anon) {
inc_mm_counter(mm, anon_rss); inc_mm_counter(mm, anon_rss);
SetPageSwapBacked(page); SetPageSwapBacked(page);
lru_cache_add_active_or_unevictable(page, vma);
page_add_new_anon_rmap(page, vma, address); page_add_new_anon_rmap(page, vma, address);
} else { } else {
inc_mm_counter(mm, file_rss); inc_mm_counter(mm, file_rss);
...@@ -2607,7 +2602,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2607,7 +2602,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
get_page(dirty_page); get_page(dirty_page);
} }
} }
//TODO: is this safe? do_anonymous_page() does it this way.
set_pte_at(mm, address, page_table, entry); set_pte_at(mm, address, page_table, entry);
/* no need to invalidate: a not-present page won't be cached */ /* no need to invalidate: a not-present page won't be cached */
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/rmap.h> #include <linux/rmap.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm_inline.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
...@@ -671,9 +672,13 @@ void page_add_anon_rmap(struct page *page, ...@@ -671,9 +672,13 @@ void page_add_anon_rmap(struct page *page,
void page_add_new_anon_rmap(struct page *page, void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
{ {
BUG_ON(address < vma->vm_start || address >= vma->vm_end); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
__page_set_anon_rmap(page, vma, address); __page_set_anon_rmap(page, vma, address);
if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
else
add_page_to_unevictable_list(page);
} }
/** /**
......
...@@ -246,25 +246,6 @@ void add_page_to_unevictable_list(struct page *page) ...@@ -246,25 +246,6 @@ void add_page_to_unevictable_list(struct page *page)
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
/**
* lru_cache_add_active_or_unevictable
* @page: the page to be added to LRU
* @vma: vma in which page is mapped for determining reclaimability
*
* place @page on active or unevictable LRU list, depending on
* page_evictable(). Note that if the page is not evictable,
* it goes directly back onto it's zone's unevictable list. It does
* NOT use a per cpu pagevec.
*/
void lru_cache_add_active_or_unevictable(struct page *page,
struct vm_area_struct *vma)
{
if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
else
add_page_to_unevictable_list(page);
}
/* /*
* Drain pages out of the cpu's pagevecs. * Drain pages out of the cpu's pagevecs.
* Either "cpu" is the current CPU, and preemption has already been * Either "cpu" is the current CPU, and preemption has already been
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment