Commit 63a07153 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] debug: check page refcount in __free_pages_ok()

Add a BUG() check to __free_pages_ok() - to catch someone freeing a
page which has a non-zero refcount.  Actually, this check is mainly to
catch someone (ie: shrink_cache()) incrementing a page's refcount
shortly after it has been freed

Also clean up __free_pages_ok() a bit and convert lots of BUGs to BUG_ON.
parent ea66b69c
...@@ -86,23 +86,24 @@ static void __free_pages_ok (struct page *page, unsigned int order) ...@@ -86,23 +86,24 @@ static void __free_pages_ok (struct page *page, unsigned int order)
struct page *base; struct page *base;
zone_t *zone; zone_t *zone;
if (PagePrivate(page)) BUG_ON(PagePrivate(page));
BUG(); BUG_ON(page->mapping != NULL);
if (page->mapping) BUG_ON(PageLocked(page));
BUG(); BUG_ON(PageLRU(page));
if (PageLocked(page)) BUG_ON(PageActive(page));
BUG(); BUG_ON(PageWriteback(page));
if (PageLRU(page)) if (PageDirty(page))
BUG(); ClearPageDirty(page);
if (PageActive(page)) BUG_ON(page_count(page) != 0);
BUG();
if (PageWriteback(page)) if (unlikely(current->flags & PF_FREE_PAGES)) {
BUG(); if (!current->nr_local_pages && !in_interrupt()) {
ClearPageDirty(page); list_add(&page->list, &current->local_pages);
page->index = order;
if (current->flags & PF_FREE_PAGES) current->nr_local_pages++;
goto local_freelist; goto out;
back_local_freelist: }
}
zone = page_zone(page); zone = page_zone(page);
...@@ -112,18 +113,14 @@ static void __free_pages_ok (struct page *page, unsigned int order) ...@@ -112,18 +113,14 @@ static void __free_pages_ok (struct page *page, unsigned int order)
if (page_idx & ~mask) if (page_idx & ~mask)
BUG(); BUG();
index = page_idx >> (1 + order); index = page_idx >> (1 + order);
area = zone->free_area + order; area = zone->free_area + order;
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
zone->free_pages -= mask; zone->free_pages -= mask;
while (mask + (1 << (MAX_ORDER-1))) { while (mask + (1 << (MAX_ORDER-1))) {
struct page *buddy1, *buddy2; struct page *buddy1, *buddy2;
if (area >= zone->free_area + MAX_ORDER) BUG_ON(area >= zone->free_area + MAX_ORDER);
BUG();
if (!__test_and_change_bit(index, area->map)) if (!__test_and_change_bit(index, area->map))
/* /*
* the buddy page is still allocated. * the buddy page is still allocated.
...@@ -136,11 +133,8 @@ static void __free_pages_ok (struct page *page, unsigned int order) ...@@ -136,11 +133,8 @@ static void __free_pages_ok (struct page *page, unsigned int order)
*/ */
buddy1 = base + (page_idx ^ -mask); buddy1 = base + (page_idx ^ -mask);
buddy2 = base + page_idx; buddy2 = base + page_idx;
if (bad_range(zone, buddy1)) BUG_ON(bad_range(zone, buddy1));
BUG(); BUG_ON(bad_range(zone, buddy2));
if (bad_range(zone, buddy2))
BUG();
list_del(&buddy1->list); list_del(&buddy1->list);
mask <<= 1; mask <<= 1;
area++; area++;
...@@ -148,19 +142,9 @@ static void __free_pages_ok (struct page *page, unsigned int order) ...@@ -148,19 +142,9 @@ static void __free_pages_ok (struct page *page, unsigned int order)
page_idx &= mask; page_idx &= mask;
} }
list_add(&(base + page_idx)->list, &area->free_list); list_add(&(base + page_idx)->list, &area->free_list);
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
out:
return; return;
local_freelist:
if (current->nr_local_pages)
goto back_local_freelist;
if (in_interrupt())
goto back_local_freelist;
list_add(&page->list, &current->local_pages);
page->index = order;
current->nr_local_pages++;
} }
#define MARK_USED(index, order, area) \ #define MARK_USED(index, order, area) \
...@@ -172,8 +156,7 @@ static inline struct page * expand (zone_t *zone, struct page *page, ...@@ -172,8 +156,7 @@ static inline struct page * expand (zone_t *zone, struct page *page,
unsigned long size = 1 << high; unsigned long size = 1 << high;
while (high > low) { while (high > low) {
if (bad_range(zone, page)) BUG_ON(bad_range(zone, page));
BUG();
area--; area--;
high--; high--;
size >>= 1; size >>= 1;
...@@ -182,8 +165,7 @@ static inline struct page * expand (zone_t *zone, struct page *page, ...@@ -182,8 +165,7 @@ static inline struct page * expand (zone_t *zone, struct page *page,
index += size; index += size;
page += size; page += size;
} }
if (bad_range(zone, page)) BUG_ON(bad_range(zone, page));
BUG();
return page; return page;
} }
...@@ -223,8 +205,7 @@ static struct page * rmqueue(zone_t *zone, unsigned int order) ...@@ -223,8 +205,7 @@ static struct page * rmqueue(zone_t *zone, unsigned int order)
unsigned int index; unsigned int index;
page = list_entry(curr, struct page, list); page = list_entry(curr, struct page, list);
if (bad_range(zone, page)) BUG_ON(bad_range(zone, page));
BUG();
list_del(curr); list_del(curr);
index = page - zone->zone_mem_map; index = page - zone->zone_mem_map;
if (curr_order != MAX_ORDER-1) if (curr_order != MAX_ORDER-1)
...@@ -279,14 +260,14 @@ struct page *_alloc_pages(unsigned int gfp_mask, unsigned int order) ...@@ -279,14 +260,14 @@ struct page *_alloc_pages(unsigned int gfp_mask, unsigned int order)
} }
#endif #endif
static struct page * FASTCALL(balance_classzone(zone_t *, unsigned int, unsigned int, int *)); static /* inline */ struct page *
static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask, unsigned int order, int * freed) balance_classzone(zone_t * classzone, unsigned int gfp_mask,
unsigned int order, int * freed)
{ {
struct page * page = NULL; struct page * page = NULL;
int __freed = 0; int __freed = 0;
if (in_interrupt()) BUG_ON(in_interrupt());
BUG();
current->allocation_order = order; current->allocation_order = order;
current->flags |= PF_MEMALLOC | PF_FREE_PAGES; current->flags |= PF_MEMALLOC | PF_FREE_PAGES;
...@@ -793,8 +774,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, ...@@ -793,8 +774,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
unsigned long totalpages, offset, realtotalpages; unsigned long totalpages, offset, realtotalpages;
const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1); const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
if (zone_start_paddr & ~PAGE_MASK) BUG_ON(zone_start_paddr & ~PAGE_MASK);
BUG();
totalpages = 0; totalpages = 0;
for (i = 0; i < MAX_NR_ZONES; i++) { for (i = 0; i < MAX_NR_ZONES; i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment