Commit 62e52945 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] stop using page.list in the page allocator

Switch the page allocator over to using page.lru for the buddy lists.
parent 02979dcb
......@@ -199,13 +199,13 @@ static inline void __free_pages_bulk (struct page *page, struct page *base,
buddy2 = base + page_idx;
BUG_ON(bad_range(zone, buddy1));
BUG_ON(bad_range(zone, buddy2));
list_del(&buddy1->list);
list_del(&buddy1->lru);
mask <<= 1;
area++;
index >>= 1;
page_idx &= mask;
}
list_add(&(base + page_idx)->list, &area->free_list);
list_add(&(base + page_idx)->lru, &area->free_list);
}
static inline void free_pages_check(const char *function, struct page *page)
......@@ -253,9 +253,9 @@ free_pages_bulk(struct zone *zone, int count,
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
while (!list_empty(list) && count--) {
page = list_entry(list->prev, struct page, list);
page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_pages_bulk list manipulates */
list_del(&page->list);
list_del(&page->lru);
__free_pages_bulk(page, base, zone, area, mask, order);
ret++;
}
......@@ -271,7 +271,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
mod_page_state(pgfree, 1 << order);
for (i = 0 ; i < (1 << order) ; ++i)
free_pages_check(__FUNCTION__, page + i);
list_add(&page->list, &list);
list_add(&page->lru, &list);
kernel_map_pages(page, 1<<order, 0);
free_pages_bulk(page_zone(page), 1, &list, order);
}
......@@ -290,7 +290,7 @@ expand(struct zone *zone, struct page *page,
area--;
high--;
size >>= 1;
list_add(&page->list, &area->free_list);
list_add(&page->lru, &area->free_list);
MARK_USED(index, high, area);
index += size;
page += size;
......@@ -353,8 +353,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
if (list_empty(&area->free_list))
continue;
page = list_entry(area->free_list.next, struct page, list);
list_del(&page->list);
page = list_entry(area->free_list.next, struct page, lru);
list_del(&page->lru);
index = page - zone->zone_mem_map;
if (current_order != MAX_ORDER-1)
MARK_USED(index, current_order, area);
......@@ -384,7 +384,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
if (page == NULL)
break;
allocated++;
list_add_tail(&page->list, list);
list_add_tail(&page->lru, list);
}
spin_unlock_irqrestore(&zone->lock, flags);
return allocated;
......@@ -426,7 +426,7 @@ int is_head_of_free_region(struct page *page)
spin_lock_irqsave(&zone->lock, flags);
for (order = MAX_ORDER - 1; order >= 0; --order)
list_for_each(curr, &zone->free_area[order].free_list)
if (page == list_entry(curr, struct page, list)) {
if (page == list_entry(curr, struct page, lru)) {
spin_unlock_irqrestore(&zone->lock, flags);
return 1 << order;
}
......@@ -464,7 +464,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
local_irq_save(flags);
if (pcp->count >= pcp->high)
pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
list_add(&page->list, &pcp->list);
list_add(&page->lru, &pcp->list);
pcp->count++;
local_irq_restore(flags);
put_cpu();
......@@ -500,8 +500,8 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold)
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list);
if (pcp->count) {
page = list_entry(pcp->list.next, struct page, list);
list_del(&page->list);
page = list_entry(pcp->list.next, struct page, lru);
list_del(&page->lru);
pcp->count--;
}
local_irq_restore(flags);
......@@ -1368,7 +1368,7 @@ void __init memmap_init_zone(struct page *start, unsigned long size, int nid,
set_page_zone(page, NODEZONE(nid, zone));
set_page_count(page, 0);
SetPageReserved(page);
INIT_LIST_HEAD(&page->list);
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
if (zone != ZONE_HIGHMEM)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment