Commit e07316f9 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] free_area_init cleanup

Patch from Martin Bligh.  It should only affect machines using
discontigmem.

"This patch cleans up free_area_init stuff, and undefines mem_map and
max_mapnr for discontigmem, where they were horrible kludges anyway
...  We just use the lmem_maps instead, which makes much more sense.
It also kills pgdat->node_start_mapnr, which is tarred with the same
brush.

It breaks free_area_init_core into a couple of sections, pulls the
allocation of the lmem_map back into the next higher function, and
passes more things via the pgdat.  But that's not very interesting,
the objective was to kill mem_map for discontigmem, which seems to
attract bugs like flypaper.  This brings any misuses to obvious
compile-time errors rather than wierd oopses, which I can't help but
feel is a good thing.

It does break other discontigmem architectures, but in a very obvious
way (they won't compile) and it's easy to fix.  I think that's a small
price to pay ...  ;-) At some point soon I will follow up with a patch
to remove free_area_init_node for the contig mem case, or at the very
least rename it to something more sensible, like __free_area_init.

Christoph has grander plans to kill mem_map more extensively in
addition to the attatched, but I've heard nobody disagree that it
should die for the discontigmem case at least.

Oh, and I renamed mem_map in drivers/pcmcia/sa1100 to pc_mem_map
because my tiny little brain (and cscope) find it confusing like that.

Tested on 16-way NUMA-Q with discontigmem + NUMA support and on a
standard PC (well, boots and appears functional).  On top of
2.5.33-mm4"
parent 967e6864
......@@ -286,7 +286,6 @@ void __init paging_init(void)
for (nid = 0; nid < numnodes; nid++) {
unsigned long start_pfn = plat_node_bdata[nid].node_boot_start >> PAGE_SHIFT;
unsigned long end_pfn = plat_node_bdata[nid].node_low_pfn;
unsigned long lmax_mapnr;
if (dma_local_pfn >= end_pfn - start_pfn)
zones_size[ZONE_DMA] = end_pfn - start_pfn;
......@@ -295,11 +294,6 @@ void __init paging_init(void)
zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
}
free_area_init_node(nid, NODE_DATA(nid), NULL, zones_size, start_pfn, NULL);
lmax_mapnr = PLAT_NODE_DATA_STARTNR(nid) + PLAT_NODE_DATA_SIZE(nid);
if (lmax_mapnr > max_mapnr) {
max_mapnr = lmax_mapnr;
DBGDCONT("Grow max_mapnr to %ld\n", max_mapnr);
}
}
/* Initialize the kernel's ZERO_PGE. */
......
......@@ -25,7 +25,7 @@ __asm__(".align 4\nvide: ret");
static void __init init_amd(struct cpuinfo_x86 *c)
{
u32 l, h;
int mbytes = max_mapnr >> (20-PAGE_SHIFT);
int mbytes = num_physpages >> (20-PAGE_SHIFT);
int r;
/*
......
......@@ -58,7 +58,11 @@ EXPORT_SYMBOL(boot_cpu_data);
EXPORT_SYMBOL(EISA_bus);
#endif
EXPORT_SYMBOL(MCA_bus);
#ifdef CONFIG_MULTIQUAD
#ifdef CONFIG_DISCONTIGMEM
EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(pfn_to_nid);
#endif
#ifdef CONFIG_X86_NUMAQ
EXPORT_SYMBOL(xquad_portio);
#endif
EXPORT_SYMBOL(__verify_write);
......
......@@ -82,27 +82,19 @@ static void __init smp_dump_qct(void)
*/
int physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1};
#define MB_TO_ELEMENT(x) (x >> ELEMENT_REPRESENTS)
#define PA_TO_MB(pa) (pa >> 20) /* assumption: a physical address is in bytes */
#define PFN_TO_ELEMENT(pfn) (pfn / PAGES_PER_ELEMENT)
#define PA_TO_ELEMENT(pa) (PFN_TO_ELEMENT(pa >> PAGE_SHIFT))
int pa_to_nid(u64 pa)
int pfn_to_nid(unsigned long pfn)
{
int nid;
nid = physnode_map[MB_TO_ELEMENT(PA_TO_MB(pa))];
int nid = physnode_map[PFN_TO_ELEMENT(pfn)];
/* the physical address passed in is not in the map for the system */
if (nid == -1)
BUG();
BUG(); /* address is not present */
return nid;
}
int pfn_to_nid(unsigned long pfn)
{
return pa_to_nid(((u64)pfn) << PAGE_SHIFT);
}
/*
* for each node mark the regions
* TOPOFMEM = hi_shrd_mem_start + hi_shrd_mem_size
......@@ -132,7 +124,7 @@ static void __init initialize_physnode_map(void)
topofmem = eq->hi_shrd_mem_start + eq->hi_shrd_mem_size;
while (cur < topofmem) {
physnode_map[cur >> 8] = nid;
cur += (ELEMENT_REPRESENTS - 1);
cur ++;
}
}
}
......
......@@ -275,20 +275,9 @@ void __init set_highmem_pages_init(int bad_ppro)
void __init set_max_mapnr_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long lmax_mapnr;
int nid;
highmem_start_page = mem_map + NODE_DATA(0)->node_zones[ZONE_HIGHMEM].zone_start_mapnr;
highmem_start_page = NODE_DATA(0)->node_zones[ZONE_HIGHMEM].zone_mem_map;
num_physpages = highend_pfn;
for (nid = 0; nid < numnodes; nid++) {
lmax_mapnr = node_startnr(nid) + node_size(nid);
if (lmax_mapnr > max_mapnr) {
max_mapnr = lmax_mapnr;
}
}
#else
max_mapnr = num_physpages = max_low_pfn;
num_physpages = max_low_pfn;
#endif
}
......@@ -440,8 +440,10 @@ void __init mem_init(void)
int tmp;
int bad_ppro;
#ifndef CONFIG_DISCONTIGMEM
if (!mem_map)
BUG();
#endif
bad_ppro = ppro_with_ram_bug();
......@@ -471,7 +473,7 @@ void __init mem_init(void)
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
......
......@@ -22,26 +22,29 @@
void show_mem(void)
{
int pfn, total = 0, reserved = 0;
int total = 0, reserved = 0;
int shared = 0, cached = 0;
int highmem = 0;
struct page *page;
pg_data_t *pgdat;
unsigned long i;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
pfn = max_mapnr;
while (pfn-- > 0) {
page = pfn_to_page(pfn);
total++;
if (PageHighMem(page))
highmem++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (page_count(page))
shared += page_count(page) - 1;
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_size; ++i) {
page = pgdat->node_mem_map + i;
total++;
if (PageHighMem(page))
highmem++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (page_count(page))
shared += page_count(page) - 1;
}
}
printk("%d pages of RAM\n", total);
printk("%d pages of HIGHMEM\n",highmem);
......
......@@ -254,10 +254,6 @@ void __init paging_init(void)
zones_size[ZONE_DMA] = end_pfn + 1 - start_pfn;
free_area_init_node(node, NODE_DATA(node), 0, zones_size,
start_pfn, 0);
if ((PLAT_NODE_DATA_STARTNR(node) +
PLAT_NODE_DATA_SIZE(node)) > pagenr)
pagenr = PLAT_NODE_DATA_STARTNR(node) +
PLAT_NODE_DATA_SIZE(node);
}
}
......@@ -271,7 +267,6 @@ void __init mem_init(void)
unsigned long codesize, datasize, initsize;
int slot, numslots;
struct page *pg, *pslot;
pfn_t pgnr;
num_physpages = numpages; /* memory already sized by szmem */
max_mapnr = pagenr; /* already found during paging_init */
......@@ -293,7 +288,6 @@ void __init mem_init(void)
* We need to manually do the other slots.
*/
pg = NODE_DATA(nid)->node_mem_map + slot_getsize(nid, 0);
pgnr = PLAT_NODE_DATA_STARTNR(nid) + slot_getsize(nid, 0);
numslots = node_getlastslot(nid);
for (slot = 1; slot <= numslots; slot++) {
pslot = NODE_DATA(nid)->node_mem_map +
......@@ -304,7 +298,7 @@ void __init mem_init(void)
* free up the pages that hold the memmap entries.
*/
while (pg < pslot) {
pg++; pgnr++;
pg++;
}
/*
......@@ -312,8 +306,8 @@ void __init mem_init(void)
*/
pslot += slot_getsize(nid, slot);
while (pg < pslot) {
if (!page_is_ram(pgnr))
continue;
/* if (!page_is_ram(pgnr)) continue; */
/* commented out until page_is_ram works */
ClearPageReserved(pg);
atomic_set(&pg->count, 1);
__free_page(pg);
......
......@@ -160,7 +160,7 @@ struct sa1100_pcmcia_socket {
*/
socket_state_t cs_state;
pccard_io_map io_map[MAX_IO_WIN];
pccard_mem_map mem_map[MAX_WIN];
pccard_mem_map pc_mem_map[MAX_WIN];
void (*handler)(void *, unsigned int);
void *handler_info;
......
......@@ -686,7 +686,7 @@ sa1100_pcmcia_get_mem_map(unsigned int sock, struct pccard_mem_map *map)
DEBUG(2, "%s() for sock %u\n", __FUNCTION__, sock);
if (map->map < MAX_WIN) {
*map = skt->mem_map[map->map];
*map = skt->pc_mem_map[map->map];
ret = 0;
}
......@@ -754,7 +754,7 @@ sa1100_pcmcia_set_mem_map(unsigned int sock, struct pccard_mem_map *map)
map->sys_stop += start;
map->sys_start = start;
skt->mem_map[map->map] = *map;
skt->pc_mem_map[map->map] = *map;
return 0;
} /* sa1100_pcmcia_set_mem_map() */
......
......@@ -46,8 +46,6 @@ extern plat_pg_data_t *plat_node_data[];
#define PHYSADDR_TO_NID(pa) ALPHA_PA_TO_NID(pa)
#define PLAT_NODE_DATA(n) (plat_node_data[(n)])
#define PLAT_NODE_DATA_STARTNR(n) \
(PLAT_NODE_DATA(n)->gendata.node_start_mapnr)
#define PLAT_NODE_DATA_SIZE(n) (PLAT_NODE_DATA(n)->gendata.node_size)
#if 1
......
......@@ -11,7 +11,6 @@
#ifdef CONFIG_X86_NUMAQ
#include <asm/numaq.h>
#else
#define pa_to_nid(pa) (0)
#define pfn_to_nid(pfn) (0)
#ifdef CONFIG_NUMA
#define _cpu_to_node(cpu) 0
......@@ -44,7 +43,6 @@ extern struct pglist_data *node_data[];
#define alloc_bootmem_low_pages_node(ignore, x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
#define node_startnr(nid) (node_data[nid]->node_start_mapnr)
#define node_size(nid) (node_data[nid]->node_size)
#define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn)
......@@ -55,7 +53,7 @@ extern struct pglist_data *node_data[];
/*
* Given a kernel address, find the home node of the underlying memory.
*/
#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
/*
* Return a pointer to the node data for node n.
......@@ -64,6 +62,8 @@ extern struct pglist_data *node_data[];
#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
NODE_DATA(nid)->node_size)
#define local_mapnr(kvaddr) \
( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) )
......@@ -74,5 +74,13 @@ extern struct pglist_data *node_data[];
#define pfn_to_page(pfn) (node_mem_map(pfn_to_nid(pfn)) + node_localnr(pfn, pfn_to_nid(pfn)))
#define page_to_pfn(page) ((page - page_zone(page)->zone_mem_map) + page_zone(page)->zone_start_pfn)
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
/*
* pfn_valid should be made as fast as possible, and the current definition
* is valid for machines that are NUMA, but still contiguous, which is what
* is currently supported. A more generalised, but slower definition would
* be something like this - mbligh:
* ( pfn_to_pgdat(pfn) && (pfn < node_end_pfn(pfn_to_nid(pfn))) )
*/
#define pfn_valid(pfn) (pfn < num_physpages)
#endif /* CONFIG_DISCONTIGMEM */
#endif /* _ASM_MMZONE_H_ */
......@@ -32,17 +32,18 @@
/*
* for now assume that 64Gb is max amount of RAM for whole system
* 64Gb * 1024Mb/Gb = 65536 Mb
* 65536 Mb / 256Mb = 256
* 64Gb / 4096bytes/page = 16777216 pages
*/
#define MAX_NR_PAGES 16777216
#define MAX_ELEMENTS 256
#define ELEMENT_REPRESENTS 8 /* 256 Mb */
#define PAGES_PER_ELEMENT (16777216/256)
#define pfn_to_pgdat(pfn) NODE_DATA(pfn_to_nid(pfn))
#define PHYSADDR_TO_NID(pa) pfn_to_nid(pa >> PAGE_SHIFT)
#define MAX_NUMNODES 8
#ifdef CONFIG_NUMA
#define _cpu_to_node(cpu) (cpu_to_logical_apicid(cpu) >> 4)
#endif /* CONFIG_NUMA */
extern int pa_to_nid(u64);
extern int pfn_to_nid(unsigned long);
extern void get_memcfg_numaq(void);
#define get_memcfg_numa() get_memcfg_numaq()
......
......@@ -145,10 +145,10 @@ static __inline__ int get_order(unsigned long size)
#ifndef CONFIG_DISCONTIGMEM
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* !CONFIG_DISCONTIGMEM */
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
......
......@@ -24,7 +24,6 @@ extern plat_pg_data_t *plat_node_data[];
#define PHYSADDR_TO_NID(pa) NASID_TO_COMPACT_NODEID(NASID_GET(pa))
#define PLAT_NODE_DATA(n) (plat_node_data[n])
#define PLAT_NODE_DATA_STARTNR(n) (PLAT_NODE_DATA(n)->gendata.node_start_mapnr)
#define PLAT_NODE_DATA_SIZE(n) (PLAT_NODE_DATA(n)->gendata.node_size)
#define PLAT_NODE_DATA_LOCALNR(p, n) \
(((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
......
......@@ -373,10 +373,10 @@ extern inline void pgd_clear(pgd_t *pgdp)
#ifndef CONFIG_DISCONTIGMEM
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#else
#define mips64_pte_pagenr(x) \
(PLAT_NODE_DATA_STARTNR(PHYSADDR_TO_NID(pte_val(x))) + \
PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))))
#define pte_page(x) (mem_map+mips64_pte_pagenr(x))
#define pte_page(x) ( NODE_MEM_MAP(PHYSADDR_TO_NID(pte_val(x))) +
PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))) )
#endif
/*
......
......@@ -15,7 +15,10 @@
#include <linux/rbtree.h>
#include <linux/fs.h>
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
#endif
extern unsigned long num_physpages;
extern void * high_memory;
extern int page_cluster;
......@@ -345,8 +348,10 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_MINOR 1
#define VM_FAULT_MAJOR 2
/* The array of struct pages */
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif
extern void show_free_areas(void);
......
......@@ -112,7 +112,6 @@ struct zone {
struct page *zone_mem_map;
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
unsigned long zone_start_pfn;
unsigned long zone_start_mapnr;
/*
* rarely used fields:
......@@ -163,7 +162,6 @@ typedef struct pglist_data {
unsigned long *valid_addr_bitmap;
struct bootmem_data *bdata;
unsigned long node_start_pfn;
unsigned long node_start_mapnr;
unsigned long node_size;
int node_id;
struct pglist_data *pgdat_next;
......@@ -187,9 +185,10 @@ memclass(struct zone *pgzone, struct zone *classzone)
* prototypes for the discontig memory code.
*/
struct page;
void free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
unsigned long *zones_size, unsigned long paddr, unsigned long *zholes_size,
struct page *pmap);
extern void calculate_totalpages (pg_data_t *pgdat, unsigned long *zones_size,
unsigned long *zholes_size);
extern void free_area_init_core(pg_data_t *pgdat, unsigned long *zones_size,
unsigned long *zholes_size);
void get_zone_counts(unsigned long *active, unsigned long *inactive);
extern pg_data_t contig_page_data;
......
......@@ -115,9 +115,11 @@ EXPORT_SYMBOL(vmalloc_32);
EXPORT_SYMBOL(vmap);
EXPORT_SYMBOL(vunmap);
EXPORT_SYMBOL(vmalloc_to_page);
EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(remap_page_range);
#ifndef CONFIG_DISCONTIGMEM
EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(max_mapnr);
#endif
EXPORT_SYMBOL(high_memory);
EXPORT_SYMBOL(vmtruncate);
EXPORT_SYMBOL(find_vma);
......
......@@ -471,10 +471,12 @@ static int count_and_copy_data_pages(struct pbe *pagedir_p)
int nr_copy_pages = 0;
int pfn;
struct page *page;
#ifndef CONFIG_DISCONTIGMEM
if (max_mapnr != num_physpages)
panic("mapnr is not expected");
for (pfn = 0; pfn < max_mapnr; pfn++) {
#endif
for (pfn = 0; pfn < num_physpages; pfn++) {
page = pfn_to_page(pfn);
if (PageHighMem(page))
panic("Swsusp not supported on highmem boxes. Send 1GB of RAM to <pavel@ucw.cz> and try again ;-).");
......@@ -514,19 +516,20 @@ static int count_and_copy_data_pages(struct pbe *pagedir_p)
static void free_suspend_pagedir(unsigned long this_pagedir)
{
struct page *page = mem_map;
int i;
struct page *page;
int pfn;
unsigned long this_pagedir_end = this_pagedir +
(PAGE_SIZE << pagedir_order);
for(i=0; i < num_physpages; i++, page++) {
for(pfn = 0; pfn < num_physpages; pfn++) {
page = pfn_to_page(pfn);
if (!TestClearPageNosave(page))
continue;
if (ADDRESS(i) >= this_pagedir && ADDRESS(i) < this_pagedir_end)
if (ADDRESS(pfn) >= this_pagedir && ADDRESS(pfn) < this_pagedir_end)
continue; /* old pagedir gets freed in one */
free_page(ADDRESS(i));
free_page(ADDRESS(pfn));
}
free_pages(this_pagedir, pagedir_order);
}
......
......@@ -53,7 +53,12 @@
#include <linux/swapops.h>
#ifndef CONFIG_DISCONTIGMEM
/* use the per-pgdat data instead for discontigmem - mbligh */
unsigned long max_mapnr;
struct page *mem_map;
#endif
unsigned long num_physpages;
void * high_memory;
struct page *highmem_start_page;
......@@ -72,8 +77,6 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned
copy_user_highpage(to, from, address);
}
struct page *mem_map;
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
......
......@@ -22,11 +22,21 @@ pg_data_t contig_page_data = { .bdata = &contig_bootmem_data };
* Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
*/
void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
unsigned long *zones_size, unsigned long zone_start_pfn,
unsigned long *zones_size, unsigned long node_start_pfn,
unsigned long *zholes_size)
{
free_area_init_core(0, &contig_page_data, &mem_map, zones_size,
zone_start_pfn, zholes_size, pmap);
unsigned long size;
contig_page_data.node_id = 0;
contig_page_data.node_start_pfn = node_start_pfn;
calculate_totalpages (&contig_page_data, zones_size, zholes_size);
if (pmap == (struct page *)0) {
size = (pgdat->node_size + 1) * sizeof(struct page);
pmap = (struct page *) alloc_bootmem_node(pgdat, size);
}
contig_page_data.node_mem_map = pmap;
free_area_init_core(&contig_page_data, zones_size, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
#endif /* !CONFIG_DISCONTIGMEM */
......@@ -48,22 +58,26 @@ struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int orde
* Nodes can be initialized parallely, in no particular order.
*/
void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
unsigned long *zones_size, unsigned long zone_start_pfn,
unsigned long *zones_size, unsigned long node_start_pfn,
unsigned long *zholes_size)
{
int i, size = 0;
struct page *discard;
if (mem_map == NULL)
mem_map = (struct page *)PAGE_OFFSET;
int i;
unsigned long size;
free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_pfn,
zholes_size, pmap);
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
calculate_totalpages (pgdat, zones_size, zholes_size);
if (pmap == (struct page *)0) {
size = (pgdat->node_size + 1) * sizeof(struct page);
pmap = (struct page *) alloc_bootmem_node(pgdat, size);
}
pgdat->node_mem_map = pmap;
free_area_init_core(pgdat, zones_size, zholes_size);
/*
* Get space for the valid bitmap.
*/
size = 0;
for (i = 0; i < MAX_NR_ZONES; i++)
size += zones_size[i];
size = LONG_ALIGN((size + 7) >> 3);
......
......@@ -724,6 +724,23 @@ static inline void build_zonelists(pg_data_t *pgdat)
}
}
void __init calculate_totalpages (pg_data_t *pgdat, unsigned long *zones_size,
unsigned long *zholes_size)
{
unsigned long realtotalpages, totalpages = 0;
int i;
for (i = 0; i < MAX_NR_ZONES; i++)
totalpages += zones_size[i];
pgdat->node_size = totalpages;
realtotalpages = totalpages;
if (zholes_size)
for (i = 0; i < MAX_NR_ZONES; i++)
realtotalpages -= zholes_size[i];
printk("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
}
/*
* Helper functions to size the waitqueue hash table.
* Essentially these want to choose hash table sizes sufficiently
......@@ -774,46 +791,18 @@ static inline unsigned long wait_table_bits(unsigned long size)
* - mark all memory queues empty
* - clear the memory bitmaps
*/
void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
unsigned long *zones_size, unsigned long zone_start_pfn,
unsigned long *zholes_size, struct page *lmem_map)
void __init free_area_init_core(pg_data_t *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long i, j;
unsigned long map_size;
unsigned long totalpages, offset, realtotalpages;
unsigned long local_offset;
const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
int nid = pgdat->node_id;
struct page *lmem_map = pgdat->node_mem_map;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
totalpages = 0;
for (i = 0; i < MAX_NR_ZONES; i++)
totalpages += zones_size[i];
realtotalpages = totalpages;
if (zholes_size)
for (i = 0; i < MAX_NR_ZONES; i++)
realtotalpages -= zholes_size[i];
printk("On node %d totalpages: %lu\n", nid, realtotalpages);
/*
* Some architectures (with lots of mem and discontinous memory
* maps) have to search for a good mem_map area:
* For discontigmem, the conceptual mem map array starts from
* PAGE_OFFSET, we need to align the actual array onto a mem map
* boundary, so that MAP_NR works.
*/
map_size = (totalpages + 1)*sizeof(struct page);
if (lmem_map == (struct page *)0) {
lmem_map = (struct page *) alloc_bootmem_node(pgdat, map_size);
lmem_map = (struct page *)(PAGE_OFFSET +
MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET));
}
*gmap = pgdat->node_mem_map = lmem_map;
pgdat->node_size = totalpages;
pgdat->node_start_pfn = zone_start_pfn;
pgdat->node_start_mapnr = (lmem_map - mem_map);
pgdat->nr_zones = 0;
offset = lmem_map - mem_map;
local_offset = 0; /* offset within lmem_map */
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long mask;
......@@ -865,8 +854,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
zone->pages_low = mask*2;
zone->pages_high = mask*3;
zone->zone_mem_map = mem_map + offset;
zone->zone_start_mapnr = offset;
zone->zone_mem_map = lmem_map + local_offset;
zone->zone_start_pfn = zone_start_pfn;
if ((zone_start_pfn) & (zone_required_alignment-1))
......@@ -878,7 +866,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
* done. Non-atomic initialization, single-pass.
*/
for (i = 0; i < size; i++) {
struct page *page = mem_map + offset + i;
struct page *page = lmem_map + local_offset + i;
set_page_zone(page, nid * MAX_NR_ZONES + j);
set_page_count(page, 0);
SetPageReserved(page);
......@@ -892,7 +880,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
zone_start_pfn++;
}
offset += size;
local_offset += size;
for (i = 0; ; i++) {
unsigned long bitmap_size;
......@@ -934,10 +922,13 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
build_zonelists(pgdat);
}
#ifndef CONFIG_DISCONTIGMEM
void __init free_area_init(unsigned long *zones_size)
{
free_area_init_core(0, &contig_page_data, &mem_map, zones_size, 0, 0, 0);
free_area_init_node(0, &contig_page_data, NULL, zones_size, 0, NULL);
mem_map = contig_page_data.node_mem_map;
}
#endif
static int __init setup_mem_frac(char *str)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment