Commit c1ab3459 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] show_free_areas() cleanup

Cleanup to show_free_areas() from Bill Irwin:

show_free_areas() and show_free_areas_core() is a mess.
(1) it uses a bizarre and ugly form of list iteration to walk buddy lists
        use standard list functions instead
(2) it prints the same information repeatedly once per-node
        rationalize the braindamaged iteration logic
(3) show_free_areas_node() is useless and not called anywhere
        remove it entirely
(4) show_free_areas() itself just calls show_free_areas_core()
        remove show_free_areas_core() and do the stuff directly
(5) SWAP_CACHE_INFO is always #defined, remove it
(6) INC_CACHE_INFO() doesn't use the do { } while (0) construct

This patch also includes Matthew Dobson's patch which removes
mm/numa.c:node_lock.  The consensus is that it doesn't do anything now
that show_free_areas_node() isn't there.
parent cbb6e8ec
......@@ -327,7 +327,6 @@ static inline void set_page_zone(struct page *page, unsigned long zone_num)
extern struct page *mem_map;
extern void show_free_areas(void);
extern void show_free_areas_node(pg_data_t *pgdat);
extern int fail_writepage(struct page *);
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);
......
......@@ -176,10 +176,7 @@ int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page);
/* linux/mm/page_alloc.c */
/* linux/mm/swap_state.c */
#define SWAP_CACHE_INFO
#ifdef SWAP_CACHE_INFO
extern void show_swap_cache_info(void);
#endif
extern int add_to_swap_cache(struct page *, swp_entry_t);
extern int add_to_swap(struct page *);
extern void __delete_from_swap_cache(struct page *page);
......
......@@ -44,17 +44,6 @@ struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int orde
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
void show_free_areas_node(pg_data_t *pgdat)
{
unsigned long flags;
spin_lock_irqsave(&node_lock, flags);
show_free_areas_core(pgdat);
spin_unlock_irqrestore(&node_lock, flags);
}
/*
* Nodes can be initialized parallely, in no particular order.
*/
......@@ -106,11 +95,10 @@ struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order)
#ifdef CONFIG_NUMA
temp = NODE_DATA(numa_node_id());
#else
spin_lock_irqsave(&node_lock, flags);
if (!next) next = pgdat_list;
if (!next)
next = pgdat_list;
temp = next;
next = next->node_next;
spin_unlock_irqrestore(&node_lock, flags);
#endif
start = temp;
while (temp) {
......
......@@ -601,12 +601,11 @@ void si_meminfo(struct sysinfo *val)
* We also calculate the percentage fragmentation. We do this by counting the
* memory on each free list with the exception of the first item on the list.
*/
void show_free_areas_core(pg_data_t *pgdat)
void show_free_areas(void)
{
unsigned int order;
unsigned type;
pg_data_t *tmpdat = pgdat;
pg_data_t *pgdat;
struct page_state ps;
int type;
get_page_state(&ps);
......@@ -614,20 +613,20 @@ void show_free_areas_core(pg_data_t *pgdat)
K(nr_free_pages()),
K(nr_free_highpages()));
while (tmpdat) {
zone_t *zone;
for (zone = tmpdat->node_zones;
zone < tmpdat->node_zones + MAX_NR_ZONES; zone++)
printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB "
"high:%6lukB\n",
zone->name,
K(zone->free_pages),
K(zone->pages_min),
K(zone->pages_low),
K(zone->pages_high));
tmpdat = tmpdat->node_next;
}
for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
for (type = 0; type < MAX_NR_ZONES; ++type) {
zone_t *zone = &pgdat->node_zones[type];
printk("Zone:%s "
"freepages:%6lukB "
"min:%6lukB "
"low:%6lukB "
"high:%6lukB\n",
zone->name,
K(zone->free_pages),
K(zone->pages_min),
K(zone->pages_low),
K(zone->pages_high));
}
printk("( Active:%lu inactive:%lu dirty:%lu writeback:%lu free:%u )\n",
ps.nr_active,
......@@ -636,40 +635,28 @@ void show_free_areas_core(pg_data_t *pgdat)
ps.nr_writeback,
nr_free_pages());
for (type = 0; type < MAX_NR_ZONES; type++) {
struct list_head *head, *curr;
zone_t *zone = pgdat->node_zones + type;
unsigned long nr, total, flags;
for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
for (type = 0; type < MAX_NR_ZONES; type++) {
list_t *elem;
zone_t *zone = &pgdat->node_zones[type];
unsigned long nr, flags, order, total = 0;
if (!zone->size)
continue;
total = 0;
if (zone->size) {
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
head = &(zone->free_area + order)->free_list;
curr = head;
for (order = 0; order < MAX_ORDER; order++) {
nr = 0;
for (;;) {
curr = curr->next;
if (curr == head)
break;
nr++;
}
total += nr * (1 << order);
list_for_each(elem, &zone->free_area[order].free_list)
++nr;
total += nr << order;
printk("%lu*%lukB ", nr, K(1UL) << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
printk("= %lukB)\n", K(total));
}
printk("= %lukB)\n", K(total));
}
#ifdef SWAP_CACHE_INFO
show_swap_cache_info();
#endif
}
void show_free_areas(void)
{
show_free_areas_core(pgdat_list);
}
/*
......
......@@ -42,8 +42,7 @@ struct address_space swapper_space = {
private_list: LIST_HEAD_INIT(swapper_space.private_list),
};
#ifdef SWAP_CACHE_INFO
#define INC_CACHE_INFO(x) (swap_cache_info.x++)
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
static struct {
unsigned long add_total;
......@@ -61,9 +60,6 @@ void show_swap_cache_info(void)
swap_cache_info.find_success, swap_cache_info.find_total,
swap_cache_info.noent_race, swap_cache_info.exist_race);
}
#else
#define INC_CACHE_INFO(x) do { } while (0)
#endif
int add_to_swap_cache(struct page *page, swp_entry_t entry)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment