Commit 62e52945 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] stop using page.list in the page allocator

Switch the page allocator over to using page.lru for the buddy lists.
parent 02979dcb
...@@ -199,13 +199,13 @@ static inline void __free_pages_bulk (struct page *page, struct page *base, ...@@ -199,13 +199,13 @@ static inline void __free_pages_bulk (struct page *page, struct page *base,
buddy2 = base + page_idx; buddy2 = base + page_idx;
BUG_ON(bad_range(zone, buddy1)); BUG_ON(bad_range(zone, buddy1));
BUG_ON(bad_range(zone, buddy2)); BUG_ON(bad_range(zone, buddy2));
list_del(&buddy1->list); list_del(&buddy1->lru);
mask <<= 1; mask <<= 1;
area++; area++;
index >>= 1; index >>= 1;
page_idx &= mask; page_idx &= mask;
} }
list_add(&(base + page_idx)->list, &area->free_list); list_add(&(base + page_idx)->lru, &area->free_list);
} }
static inline void free_pages_check(const char *function, struct page *page) static inline void free_pages_check(const char *function, struct page *page)
...@@ -253,9 +253,9 @@ free_pages_bulk(struct zone *zone, int count, ...@@ -253,9 +253,9 @@ free_pages_bulk(struct zone *zone, int count,
zone->all_unreclaimable = 0; zone->all_unreclaimable = 0;
zone->pages_scanned = 0; zone->pages_scanned = 0;
while (!list_empty(list) && count--) { while (!list_empty(list) && count--) {
page = list_entry(list->prev, struct page, list); page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_pages_bulk list manipulates */ /* have to delete it as __free_pages_bulk list manipulates */
list_del(&page->list); list_del(&page->lru);
__free_pages_bulk(page, base, zone, area, mask, order); __free_pages_bulk(page, base, zone, area, mask, order);
ret++; ret++;
} }
...@@ -271,7 +271,7 @@ void __free_pages_ok(struct page *page, unsigned int order) ...@@ -271,7 +271,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
mod_page_state(pgfree, 1 << order); mod_page_state(pgfree, 1 << order);
for (i = 0 ; i < (1 << order) ; ++i) for (i = 0 ; i < (1 << order) ; ++i)
free_pages_check(__FUNCTION__, page + i); free_pages_check(__FUNCTION__, page + i);
list_add(&page->list, &list); list_add(&page->lru, &list);
kernel_map_pages(page, 1<<order, 0); kernel_map_pages(page, 1<<order, 0);
free_pages_bulk(page_zone(page), 1, &list, order); free_pages_bulk(page_zone(page), 1, &list, order);
} }
...@@ -290,7 +290,7 @@ expand(struct zone *zone, struct page *page, ...@@ -290,7 +290,7 @@ expand(struct zone *zone, struct page *page,
area--; area--;
high--; high--;
size >>= 1; size >>= 1;
list_add(&page->list, &area->free_list); list_add(&page->lru, &area->free_list);
MARK_USED(index, high, area); MARK_USED(index, high, area);
index += size; index += size;
page += size; page += size;
...@@ -353,8 +353,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order) ...@@ -353,8 +353,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
if (list_empty(&area->free_list)) if (list_empty(&area->free_list))
continue; continue;
page = list_entry(area->free_list.next, struct page, list); page = list_entry(area->free_list.next, struct page, lru);
list_del(&page->list); list_del(&page->lru);
index = page - zone->zone_mem_map; index = page - zone->zone_mem_map;
if (current_order != MAX_ORDER-1) if (current_order != MAX_ORDER-1)
MARK_USED(index, current_order, area); MARK_USED(index, current_order, area);
...@@ -384,7 +384,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, ...@@ -384,7 +384,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
if (page == NULL) if (page == NULL)
break; break;
allocated++; allocated++;
list_add_tail(&page->list, list); list_add_tail(&page->lru, list);
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
return allocated; return allocated;
...@@ -426,7 +426,7 @@ int is_head_of_free_region(struct page *page) ...@@ -426,7 +426,7 @@ int is_head_of_free_region(struct page *page)
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
for (order = MAX_ORDER - 1; order >= 0; --order) for (order = MAX_ORDER - 1; order >= 0; --order)
list_for_each(curr, &zone->free_area[order].free_list) list_for_each(curr, &zone->free_area[order].free_list)
if (page == list_entry(curr, struct page, list)) { if (page == list_entry(curr, struct page, lru)) {
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
return 1 << order; return 1 << order;
} }
...@@ -464,7 +464,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) ...@@ -464,7 +464,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
local_irq_save(flags); local_irq_save(flags);
if (pcp->count >= pcp->high) if (pcp->count >= pcp->high)
pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0); pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
list_add(&page->list, &pcp->list); list_add(&page->lru, &pcp->list);
pcp->count++; pcp->count++;
local_irq_restore(flags); local_irq_restore(flags);
put_cpu(); put_cpu();
...@@ -500,8 +500,8 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold) ...@@ -500,8 +500,8 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold)
pcp->count += rmqueue_bulk(zone, 0, pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list); pcp->batch, &pcp->list);
if (pcp->count) { if (pcp->count) {
page = list_entry(pcp->list.next, struct page, list); page = list_entry(pcp->list.next, struct page, lru);
list_del(&page->list); list_del(&page->lru);
pcp->count--; pcp->count--;
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -1368,7 +1368,7 @@ void __init memmap_init_zone(struct page *start, unsigned long size, int nid, ...@@ -1368,7 +1368,7 @@ void __init memmap_init_zone(struct page *start, unsigned long size, int nid,
set_page_zone(page, NODEZONE(nid, zone)); set_page_zone(page, NODEZONE(nid, zone));
set_page_count(page, 0); set_page_count(page, 0);
SetPageReserved(page); SetPageReserved(page);
INIT_LIST_HEAD(&page->list); INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL #ifdef WANT_PAGE_VIRTUAL
/* The shift won't overflow because ZONE_NORMAL is below 4G. */ /* The shift won't overflow because ZONE_NORMAL is below 4G. */
if (zone != ZONE_HIGHMEM) if (zone != ZONE_HIGHMEM)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment