Commit 689bcebf authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] unpaged: PG_reserved bad_page

It used to be the case that PG_reserved pages were silently never freed, but
in 2.6.15-rc1 they may be freed with a "Bad page state" message.  We should
work through such cases as they appear, fixing the code; but for now it's
safer to issue the message without freeing the page, leaving PG_reserved set.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f57e88a8
...@@ -140,8 +140,7 @@ static void bad_page(const char *function, struct page *page) ...@@ -140,8 +140,7 @@ static void bad_page(const char *function, struct page *page)
1 << PG_reclaim | 1 << PG_reclaim |
1 << PG_slab | 1 << PG_slab |
1 << PG_swapcache | 1 << PG_swapcache |
1 << PG_writeback | 1 << PG_writeback );
1 << PG_reserved );
set_page_count(page, 0); set_page_count(page, 0);
reset_page_mapcount(page); reset_page_mapcount(page);
page->mapping = NULL; page->mapping = NULL;
...@@ -335,7 +334,7 @@ static inline void __free_pages_bulk (struct page *page, ...@@ -335,7 +334,7 @@ static inline void __free_pages_bulk (struct page *page,
zone->free_area[order].nr_free++; zone->free_area[order].nr_free++;
} }
static inline void free_pages_check(const char *function, struct page *page) static inline int free_pages_check(const char *function, struct page *page)
{ {
if ( page_mapcount(page) || if ( page_mapcount(page) ||
page->mapping != NULL || page->mapping != NULL ||
...@@ -353,6 +352,12 @@ static inline void free_pages_check(const char *function, struct page *page) ...@@ -353,6 +352,12 @@ static inline void free_pages_check(const char *function, struct page *page)
bad_page(function, page); bad_page(function, page);
if (PageDirty(page)) if (PageDirty(page))
__ClearPageDirty(page); __ClearPageDirty(page);
/*
* For now, we report if PG_reserved was found set, but do not
* clear it, and do not free the page. But we shall soon need
* to do more, for when the ZERO_PAGE count wraps negative.
*/
return PageReserved(page);
} }
/* /*
...@@ -392,11 +397,10 @@ void __free_pages_ok(struct page *page, unsigned int order) ...@@ -392,11 +397,10 @@ void __free_pages_ok(struct page *page, unsigned int order)
{ {
LIST_HEAD(list); LIST_HEAD(list);
int i; int i;
int reserved = 0;
arch_free_page(page, order); arch_free_page(page, order);
mod_page_state(pgfree, 1 << order);
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
if (order > 0) if (order > 0)
for (i = 1 ; i < (1 << order) ; ++i) for (i = 1 ; i < (1 << order) ; ++i)
...@@ -404,8 +408,12 @@ void __free_pages_ok(struct page *page, unsigned int order) ...@@ -404,8 +408,12 @@ void __free_pages_ok(struct page *page, unsigned int order)
#endif #endif
for (i = 0 ; i < (1 << order) ; ++i) for (i = 0 ; i < (1 << order) ; ++i)
free_pages_check(__FUNCTION__, page + i); reserved += free_pages_check(__FUNCTION__, page + i);
if (reserved)
return;
list_add(&page->lru, &list); list_add(&page->lru, &list);
mod_page_state(pgfree, 1 << order);
kernel_map_pages(page, 1<<order, 0); kernel_map_pages(page, 1<<order, 0);
free_pages_bulk(page_zone(page), 1, &list, order); free_pages_bulk(page_zone(page), 1, &list, order);
} }
...@@ -463,7 +471,7 @@ void set_page_refs(struct page *page, int order) ...@@ -463,7 +471,7 @@ void set_page_refs(struct page *page, int order)
/* /*
* This page is about to be returned from the page allocator * This page is about to be returned from the page allocator
*/ */
static void prep_new_page(struct page *page, int order) static int prep_new_page(struct page *page, int order)
{ {
if ( page_mapcount(page) || if ( page_mapcount(page) ||
page->mapping != NULL || page->mapping != NULL ||
...@@ -481,12 +489,20 @@ static void prep_new_page(struct page *page, int order) ...@@ -481,12 +489,20 @@ static void prep_new_page(struct page *page, int order)
1 << PG_reserved ))) 1 << PG_reserved )))
bad_page(__FUNCTION__, page); bad_page(__FUNCTION__, page);
/*
* For now, we report if PG_reserved was found set, but do not
* clear it, and do not allocate the page: as a safety net.
*/
if (PageReserved(page))
return 1;
page->flags &= ~(1 << PG_uptodate | 1 << PG_error | page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_referenced | 1 << PG_arch_1 |
1 << PG_checked | 1 << PG_mappedtodisk); 1 << PG_checked | 1 << PG_mappedtodisk);
set_page_private(page, 0); set_page_private(page, 0);
set_page_refs(page, order); set_page_refs(page, order);
kernel_map_pages(page, 1 << order, 1); kernel_map_pages(page, 1 << order, 1);
return 0;
} }
/* /*
...@@ -669,11 +685,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) ...@@ -669,11 +685,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
arch_free_page(page, 0); arch_free_page(page, 0);
kernel_map_pages(page, 1, 0);
inc_page_state(pgfree);
if (PageAnon(page)) if (PageAnon(page))
page->mapping = NULL; page->mapping = NULL;
free_pages_check(__FUNCTION__, page); if (free_pages_check(__FUNCTION__, page))
return;
inc_page_state(pgfree);
kernel_map_pages(page, 1, 0);
pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
local_irq_save(flags); local_irq_save(flags);
list_add(&page->lru, &pcp->list); list_add(&page->lru, &pcp->list);
...@@ -712,12 +731,14 @@ static struct page * ...@@ -712,12 +731,14 @@ static struct page *
buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
{ {
unsigned long flags; unsigned long flags;
struct page *page = NULL; struct page *page;
int cold = !!(gfp_flags & __GFP_COLD); int cold = !!(gfp_flags & __GFP_COLD);
again:
if (order == 0) { if (order == 0) {
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
page = NULL;
pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
local_irq_save(flags); local_irq_save(flags);
if (pcp->count <= pcp->low) if (pcp->count <= pcp->low)
...@@ -739,7 +760,8 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) ...@@ -739,7 +760,8 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
if (page != NULL) { if (page != NULL) {
BUG_ON(bad_range(zone, page)); BUG_ON(bad_range(zone, page));
mod_page_state_zone(zone, pgalloc, 1 << order); mod_page_state_zone(zone, pgalloc, 1 << order);
prep_new_page(page, order); if (prep_new_page(page, order))
goto again;
if (gfp_flags & __GFP_ZERO) if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags); prep_zero_page(page, order, gfp_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment