Commit 488514d1 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

Remove set_migrateflags()

Migrate flags must be set on slab creation as agreed upon when the antifrag
logic was reviewed.  Otherwise some slabs of a slabcache will end up in the
unmovable and others in the reclaimable section depending on which flag was
active when a new slab page was allocated.

This likely slid in somehow when antifrag was merged. Remove it.

The buffer_heads are always allocated with __GFP_RECLAIMABLE because the
SLAB_RECLAIM_ACCOUNT option is set.  The set_migrateflags() never had any
effect there.

Radix tree allocations are not directly reclaimable but they are allocated
with __GFP_RECLAIMABLE set on each allocation.  We now set
SLAB_RECLAIM_ACCOUNT on radix tree slab creation making sure that radix
tree slabs are consistently placed in the reclaimable section.  Radix tree
slabs will also be accounted as such.

There is then no user left of set_migratepages. So remove it.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e92adcba
...@@ -3180,8 +3180,7 @@ static void recalc_bh_state(void) ...@@ -3180,8 +3180,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{ {
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
if (ret) { if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers); INIT_LIST_HEAD(&ret->b_assoc_buffers);
get_cpu_var(bh_accounting).nr++; get_cpu_var(bh_accounting).nr++;
......
...@@ -144,12 +144,6 @@ static inline enum zone_type gfp_zone(gfp_t flags) ...@@ -144,12 +144,6 @@ static inline enum zone_type gfp_zone(gfp_t flags)
return base + ZONE_NORMAL; return base + ZONE_NORMAL;
} }
static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
{
BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
}
/* /*
* There is only one page-allocator function, and two main namespaces to * There is only one page-allocator function, and two main namespaces to
* it. The alloc_page*() variants return 'struct page *' and as such * it. The alloc_page*() variants return 'struct page *' and as such
......
...@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) ...@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
} }
} }
if (ret == NULL) if (ret == NULL)
ret = kmem_cache_alloc(radix_tree_node_cachep, ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
BUG_ON(radix_tree_is_indirect_ptr(ret)); BUG_ON(radix_tree_is_indirect_ptr(ret));
return ret; return ret;
...@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask) ...@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask)
rtp = &__get_cpu_var(radix_tree_preloads); rtp = &__get_cpu_var(radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable(); preempt_enable();
node = kmem_cache_alloc(radix_tree_node_cachep, node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
if (node == NULL) if (node == NULL)
goto out; goto out;
preempt_disable(); preempt_disable();
...@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void) ...@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void)
{ {
radix_tree_node_cachep = kmem_cache_create("radix_tree_node", radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0, sizeof(struct radix_tree_node), 0,
SLAB_PANIC, radix_tree_node_ctor); SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
radix_tree_node_ctor);
radix_tree_init_maxindex(); radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0); hotcpu_notifier(radix_tree_callback, 0);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment