Commit eed29d66 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] pagemap_lru_lock wrapup

Some fallout from the pagemap_lru_lock changes:

- lru_cache_del() is no longer used.  Kill it.

- page_cache_release() almost never actually frees pages.  So inline
  page_cache_release() and move its rarely-called slow path into (the
  misnamed) mm/swap.c

- update the locking comment in filemap.c.  pagemap_lru_lock used to
  be one of the outermost locks in the VM locking hierarchy.  Now, we
  never take any other locks while holding pagemap_lru_lock.  So it
  doesn't have any relationship with anything.

- put_page() now removes pages from the LRU on the final put.  The
  lock is interrupt safe.
parent aaba9265
...@@ -194,11 +194,16 @@ struct page { ...@@ -194,11 +194,16 @@ struct page {
* routine so they can be sure the page doesn't go away from under them. * routine so they can be sure the page doesn't go away from under them.
*/ */
#define get_page(p) atomic_inc(&(p)->count) #define get_page(p) atomic_inc(&(p)->count)
#define put_page(p) __free_page(p)
#define __put_page(p) atomic_dec(&(p)->count) #define __put_page(p) atomic_dec(&(p)->count)
#define put_page_testzero(p) atomic_dec_and_test(&(p)->count) #define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
#define page_count(p) atomic_read(&(p)->count) #define page_count(p) atomic_read(&(p)->count)
#define set_page_count(p,v) atomic_set(&(p)->count, v) #define set_page_count(p,v) atomic_set(&(p)->count, v)
extern void FASTCALL(__page_cache_release(struct page *));
#define put_page(p) \
do { \
if (put_page_testzero(p)) \
__page_cache_release(p); \
} while (0)
/* /*
* Multiple processes may "see" the same page. E.g. for untouched * Multiple processes may "see" the same page. E.g. for untouched
......
...@@ -23,14 +23,18 @@ ...@@ -23,14 +23,18 @@
#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
#define page_cache_get(x) get_page(x) #define page_cache_get(x) get_page(x)
extern void FASTCALL(page_cache_release(struct page *));
static inline void page_cache_release(struct page *page)
{
if (!PageReserved(page) && put_page_testzero(page))
__page_cache_release(page);
}
static inline struct page *page_cache_alloc(struct address_space *x) static inline struct page *page_cache_alloc(struct address_space *x)
{ {
return alloc_pages(x->gfp_mask, 0); return alloc_pages(x->gfp_mask, 0);
} }
typedef int filler_t(void *, struct page *); typedef int filler_t(void *, struct page *);
extern struct page * find_get_page(struct address_space *mapping, extern struct page * find_get_page(struct address_space *mapping,
......
...@@ -156,8 +156,6 @@ extern int FASTCALL(page_over_rsslimit(struct page *)); ...@@ -156,8 +156,6 @@ extern int FASTCALL(page_over_rsslimit(struct page *));
/* linux/mm/swap.c */ /* linux/mm/swap.c */
extern void FASTCALL(lru_cache_add(struct page *)); extern void FASTCALL(lru_cache_add(struct page *));
extern void FASTCALL(__lru_cache_del(struct page *));
extern void FASTCALL(lru_cache_del(struct page *));
extern void FASTCALL(activate_page(struct page *)); extern void FASTCALL(activate_page(struct page *));
......
...@@ -93,7 +93,7 @@ EXPORT_SYMBOL(__alloc_pages); ...@@ -93,7 +93,7 @@ EXPORT_SYMBOL(__alloc_pages);
EXPORT_SYMBOL(alloc_pages_node); EXPORT_SYMBOL(alloc_pages_node);
EXPORT_SYMBOL(__get_free_pages); EXPORT_SYMBOL(__get_free_pages);
EXPORT_SYMBOL(get_zeroed_page); EXPORT_SYMBOL(get_zeroed_page);
EXPORT_SYMBOL(page_cache_release); EXPORT_SYMBOL(__page_cache_release);
EXPORT_SYMBOL(__free_pages); EXPORT_SYMBOL(__free_pages);
EXPORT_SYMBOL(free_pages); EXPORT_SYMBOL(free_pages);
EXPORT_SYMBOL(num_physpages); EXPORT_SYMBOL(num_physpages);
......
...@@ -53,7 +53,6 @@ ...@@ -53,7 +53,6 @@
/* /*
* Lock ordering: * Lock ordering:
* *
* pagemap_lru_lock
* ->i_shared_lock (vmtruncate) * ->i_shared_lock (vmtruncate)
* ->private_lock (__free_pte->__set_page_dirty_buffers) * ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_list_lock * ->swap_list_lock
......
...@@ -90,6 +90,7 @@ static void __free_pages_ok (struct page *page, unsigned int order) ...@@ -90,6 +90,7 @@ static void __free_pages_ok (struct page *page, unsigned int order)
KERNEL_STAT_ADD(pgfree, 1<<order); KERNEL_STAT_ADD(pgfree, 1<<order);
BUG_ON(PageLRU(page));
BUG_ON(PagePrivate(page)); BUG_ON(PagePrivate(page));
BUG_ON(page->mapping != NULL); BUG_ON(page->mapping != NULL);
BUG_ON(PageLocked(page)); BUG_ON(PageLocked(page));
...@@ -450,15 +451,6 @@ unsigned long get_zeroed_page(unsigned int gfp_mask) ...@@ -450,15 +451,6 @@ unsigned long get_zeroed_page(unsigned int gfp_mask)
return 0; return 0;
} }
void page_cache_release(struct page *page)
{
if (!PageReserved(page) && put_page_testzero(page)) {
if (PageLRU(page))
lru_cache_del(page);
__free_pages_ok(page, 0);
}
}
void __pagevec_free(struct pagevec *pvec) void __pagevec_free(struct pagevec *pvec)
{ {
int i = pagevec_count(pvec); int i = pagevec_count(pvec);
......
...@@ -60,32 +60,25 @@ void lru_cache_add(struct page * page) ...@@ -60,32 +60,25 @@ void lru_cache_add(struct page * page)
} }
} }
/** /*
* __lru_cache_del: remove a page from the page lists * This path almost never happens - pages are normally freed via pagevecs.
* @page: the page to add
*
* This function is for when the caller already holds
* the pagemap_lru_lock.
*/ */
void __lru_cache_del(struct page * page) void __page_cache_release(struct page *page)
{ {
if (TestClearPageLRU(page)) { BUG_ON(page_count(page) != 0);
if (PageLRU(page)) {
unsigned long flags;
spin_lock_irqsave(&_pagemap_lru_lock, flags);
if (!TestClearPageLRU(page))
BUG();
if (PageActive(page)) if (PageActive(page))
del_page_from_active_list(page); del_page_from_active_list(page);
else else
del_page_from_inactive_list(page); del_page_from_inactive_list(page);
spin_unlock_irqrestore(&_pagemap_lru_lock, flags);
} }
} __free_page(page);
/**
* lru_cache_del: remove a page from the page lists
* @page: the page to remove
*/
void lru_cache_del(struct page * page)
{
spin_lock_irq(&_pagemap_lru_lock);
__lru_cache_del(page);
spin_unlock_irq(&_pagemap_lru_lock);
} }
/* /*
......
...@@ -165,6 +165,12 @@ shrink_list(struct list_head *page_list, int nr_pages, zone_t *classzone, ...@@ -165,6 +165,12 @@ shrink_list(struct list_head *page_list, int nr_pages, zone_t *classzone,
pte_chain_unlock(page); pte_chain_unlock(page);
mapping = page->mapping; mapping = page->mapping;
/*
* FIXME: this is CPU-inefficient for shared mappings.
* try_to_unmap() will set the page dirty and ->vm_writeback
* will write it. So we're back to page-at-a-time writepage
* in LRU order.
*/
if (PageDirty(page) && is_page_cache_freeable(page) && if (PageDirty(page) && is_page_cache_freeable(page) &&
mapping && may_enter_fs) { mapping && may_enter_fs) {
int (*writeback)(struct page *, int *); int (*writeback)(struct page *, int *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment