Commit e175e9b2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] numa: alloc_pages_node cleanup

Patch from Christoph Hellwig

It turns alloc_pages_node into a static inline, in the same fashion as
alloc_pages.  There is no need for #ifdef CONFIG_NUMA in the patch, as
the numa node identification functions are sensible enough to do the
right thing for non-NUMA systems.  Moves alloc_pages_node from numa.c
to gfp.h, and removes the EXPORT_SYMBOL which is no longer needed as
this is now an inline.
parent 15e28e8d
...@@ -38,8 +38,6 @@ ...@@ -38,8 +38,6 @@
* can allocate highmem pages, the *get*page*() variants return * can allocate highmem pages, the *get*page*() variants return
* virtual kernel addresses to the allocated page(s). * virtual kernel addresses to the allocated page(s).
*/ */
extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned int order, struct zonelist *zonelist));
extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order);
/* /*
* We get the zone list from the current node and the gfp_mask. * We get the zone list from the current node and the gfp_mask.
...@@ -48,14 +46,23 @@ extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned i ...@@ -48,14 +46,23 @@ extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned i
* For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
* optimized to &contig_page_data at compile-time. * optimized to &contig_page_data at compile-time.
*/ */
static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order) extern struct page * FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
static inline struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order)
{ {
pg_data_t *pgdat = NODE_DATA(numa_node_id()); struct pglist_data *pgdat = NODE_DATA(nid);
unsigned int idx = (gfp_mask & GFP_ZONEMASK); unsigned int idx = (gfp_mask & GFP_ZONEMASK);
if (unlikely(order >= MAX_ORDER)) if (unlikely(order >= MAX_ORDER))
return NULL; return NULL;
return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + idx);
}
static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
{
struct pglist_data *pgdat = NODE_DATA(numa_node_id());
unsigned int idx = (gfp_mask & GFP_ZONEMASK);
if (unlikely(order >= MAX_ORDER))
return NULL;
return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + idx); return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + idx);
} }
......
...@@ -90,7 +90,6 @@ EXPORT_SYMBOL(exit_mm); ...@@ -90,7 +90,6 @@ EXPORT_SYMBOL(exit_mm);
/* internal kernel memory management */ /* internal kernel memory management */
EXPORT_SYMBOL(__alloc_pages); EXPORT_SYMBOL(__alloc_pages);
EXPORT_SYMBOL(alloc_pages_node);
EXPORT_SYMBOL(__get_free_pages); EXPORT_SYMBOL(__get_free_pages);
EXPORT_SYMBOL(get_zeroed_page); EXPORT_SYMBOL(get_zeroed_page);
EXPORT_SYMBOL(__page_cache_release); EXPORT_SYMBOL(__page_cache_release);
......
...@@ -40,18 +40,7 @@ void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap, ...@@ -40,18 +40,7 @@ void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
mem_map = contig_page_data.node_mem_map; mem_map = contig_page_data.node_mem_map;
} }
#endif /* !CONFIG_DISCONTIGMEM */ #else /* CONFIG_DISCONTIGMEM */
struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order)
{
#ifdef CONFIG_NUMA
return __alloc_pages(gfp_mask, order, NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
#else
return alloc_pages(gfp_mask, order);
#endif
}
#ifdef CONFIG_DISCONTIGMEM
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment