Commit 092cead6 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds

page allocator: move free_page_mlock() to page_alloc.c

Currently, free_page_mlock() is only called from page_alloc.c.  Thus, we
can move it to page_alloc.c.

Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b6e68bc1
...@@ -150,18 +150,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) ...@@ -150,18 +150,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
} }
} }
/*
* free_page_mlock() -- clean up attempts to free and mlocked() page.
* Page should not be on lru, so no need to fix that up.
* free_pages_check() will verify...
*/
static inline void free_page_mlock(struct page *page)
{
__ClearPageMlocked(page);
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{ {
...@@ -170,7 +158,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) ...@@ -170,7 +158,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
static inline void clear_page_mlock(struct page *page) { } static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { } static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { }
static inline void free_page_mlock(struct page *page) { }
#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
......
...@@ -493,6 +493,22 @@ static inline void __free_one_page(struct page *page, ...@@ -493,6 +493,22 @@ static inline void __free_one_page(struct page *page,
zone->free_area[order].nr_free++; zone->free_area[order].nr_free++;
} }
#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* free_page_mlock() -- clean up attempts to free and mlocked() page.
* Page should not be on lru, so no need to fix that up.
* free_pages_check() will verify...
*/
static inline void free_page_mlock(struct page *page)
{
__ClearPageMlocked(page);
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
#else
static void free_page_mlock(struct page *page) { }
#endif
static inline int free_pages_check(struct page *page) static inline int free_pages_check(struct page *page)
{ {
if (unlikely(page_mapcount(page) | if (unlikely(page_mapcount(page) |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment