Commit 0edaf86c authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

include/linux/nodemask.h: create next_node_in() helper

Lots of code does

	node = next_node(node, XXX);
	if (node == MAX_NUMNODES)
		node = first_node(XXX);

so create next_node_in() to do this and use it in various places.

[mhocko@suse.com: use next_node_in() helper]
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Hui Zhu <zhuhui@xiaomi.com>
Cc: Wang Xiaoqiang <wangxq10@lzu.edu.cn>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 48a27055
...@@ -962,9 +962,7 @@ static void __init setup_numa_mapping(void) ...@@ -962,9 +962,7 @@ static void __init setup_numa_mapping(void)
cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
cpu_2_node[best_cpu] = node; cpu_2_node[best_cpu] = node;
cpumask_clear_cpu(best_cpu, &unbound_cpus); cpumask_clear_cpu(best_cpu, &unbound_cpus);
node = next_node(node, default_nodes); node = next_node_in(node, default_nodes);
if (node == MAX_NUMNODES)
node = first_node(default_nodes);
} }
/* Print out node assignments and set defaults for disabled cpus */ /* Print out node assignments and set defaults for disabled cpus */
......
...@@ -617,9 +617,7 @@ static void __init numa_init_array(void) ...@@ -617,9 +617,7 @@ static void __init numa_init_array(void)
if (early_cpu_to_node(i) != NUMA_NO_NODE) if (early_cpu_to_node(i) != NUMA_NO_NODE)
continue; continue;
numa_set_node(i, rr); numa_set_node(i, rr);
rr = next_node(rr, node_online_map); rr = next_node_in(rr, node_online_map);
if (rr == MAX_NUMNODES)
rr = first_node(node_online_map);
} }
} }
......
...@@ -43,8 +43,10 @@ ...@@ -43,8 +43,10 @@
* *
* int first_node(mask) Number lowest set bit, or MAX_NUMNODES * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
* int next_node(node, mask) Next node past 'node', or MAX_NUMNODES * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
* int next_node_in(node, mask) Next node past 'node', or wrap to first,
* or MAX_NUMNODES
* int first_unset_node(mask) First node not set in mask, or * int first_unset_node(mask) First node not set in mask, or
* MAX_NUMNODES. * MAX_NUMNODES
* *
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
* NODE_MASK_ALL Initializer - all bits set * NODE_MASK_ALL Initializer - all bits set
...@@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp) ...@@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp)
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
} }
/*
* Find the next present node in src, starting after node n, wrapping around to
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
int __next_node_in(int node, const nodemask_t *srcp);
static inline void init_nodemask_of_node(nodemask_t *mask, int node) static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{ {
nodes_clear(*mask); nodes_clear(*mask);
......
...@@ -2591,13 +2591,7 @@ int __cpuset_node_allowed(int node, gfp_t gfp_mask) ...@@ -2591,13 +2591,7 @@ int __cpuset_node_allowed(int node, gfp_t gfp_mask)
static int cpuset_spread_node(int *rotor) static int cpuset_spread_node(int *rotor)
{ {
int node; return *rotor = next_node_in(*rotor, current->mems_allowed);
node = next_node(*rotor, current->mems_allowed);
if (node == MAX_NUMNODES)
node = first_node(current->mems_allowed);
*rotor = node;
return node;
} }
int cpuset_mem_spread_node(void) int cpuset_mem_spread_node(void)
......
...@@ -25,7 +25,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ ...@@ -25,7 +25,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o md5.o irq_regs.o argv_split.o \ sha1.o md5.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \ flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o nmi_backtrace.o earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_MMU) += ioremap.o
......
#include <linux/nodemask.h>
#include <linux/module.h>
#include <linux/random.h>
int __next_node_in(int node, const nodemask_t *srcp)
{
int ret = __next_node(node, srcp);
if (ret == MAX_NUMNODES)
ret = __first_node(srcp);
return ret;
}
EXPORT_SYMBOL(__next_node_in);
#ifdef CONFIG_NUMA
/*
* Return the bit number of a random bit set in the nodemask.
* (returns NUMA_NO_NODE if nodemask is empty)
*/
int node_random(const nodemask_t *maskp)
{
int w, bit = NUMA_NO_NODE;
w = nodes_weight(*maskp);
if (w)
bit = bitmap_ord_to_pos(maskp->bits,
get_random_int() % w, MAX_NUMNODES);
return bit;
}
#endif
...@@ -937,9 +937,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, ...@@ -937,9 +937,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
*/ */
static int next_node_allowed(int nid, nodemask_t *nodes_allowed) static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{ {
nid = next_node(nid, *nodes_allowed); nid = next_node_in(nid, *nodes_allowed);
if (nid == MAX_NUMNODES)
nid = first_node(*nodes_allowed);
VM_BUG_ON(nid >= MAX_NUMNODES); VM_BUG_ON(nid >= MAX_NUMNODES);
return nid; return nid;
......
...@@ -1389,9 +1389,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) ...@@ -1389,9 +1389,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
mem_cgroup_may_update_nodemask(memcg); mem_cgroup_may_update_nodemask(memcg);
node = memcg->last_scanned_node; node = memcg->last_scanned_node;
node = next_node(node, memcg->scan_nodes); node = next_node_in(node, memcg->scan_nodes);
if (node == MAX_NUMNODES)
node = first_node(memcg->scan_nodes);
/* /*
* We call this when we hit limit, not when pages are added to LRU. * We call this when we hit limit, not when pages are added to LRU.
* No LRU may hold pages because all pages are UNEVICTABLE or * No LRU may hold pages because all pages are UNEVICTABLE or
......
...@@ -97,7 +97,6 @@ ...@@ -97,7 +97,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/random.h>
#include "internal.h" #include "internal.h"
...@@ -347,9 +346,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes, ...@@ -347,9 +346,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
BUG(); BUG();
if (!node_isset(current->il_next, tmp)) { if (!node_isset(current->il_next, tmp)) {
current->il_next = next_node(current->il_next, tmp); current->il_next = next_node_in(current->il_next, tmp);
if (current->il_next >= MAX_NUMNODES)
current->il_next = first_node(tmp);
if (current->il_next >= MAX_NUMNODES) if (current->il_next >= MAX_NUMNODES)
current->il_next = numa_node_id(); current->il_next = numa_node_id();
} }
...@@ -1709,9 +1706,7 @@ static unsigned interleave_nodes(struct mempolicy *policy) ...@@ -1709,9 +1706,7 @@ static unsigned interleave_nodes(struct mempolicy *policy)
struct task_struct *me = current; struct task_struct *me = current;
nid = me->il_next; nid = me->il_next;
next = next_node(nid, policy->v.nodes); next = next_node_in(nid, policy->v.nodes);
if (next >= MAX_NUMNODES)
next = first_node(policy->v.nodes);
if (next < MAX_NUMNODES) if (next < MAX_NUMNODES)
me->il_next = next; me->il_next = next;
return nid; return nid;
...@@ -1805,21 +1800,6 @@ static inline unsigned interleave_nid(struct mempolicy *pol, ...@@ -1805,21 +1800,6 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
return interleave_nodes(pol); return interleave_nodes(pol);
} }
/*
* Return the bit number of a random bit set in the nodemask.
* (returns NUMA_NO_NODE if nodemask is empty)
*/
int node_random(const nodemask_t *maskp)
{
int w, bit = NUMA_NO_NODE;
w = nodes_weight(*maskp);
if (w)
bit = bitmap_ord_to_pos(maskp->bits,
get_random_int() % w, MAX_NUMNODES);
return bit;
}
#ifdef CONFIG_HUGETLBFS #ifdef CONFIG_HUGETLBFS
/* /*
* huge_zonelist(@vma, @addr, @gfp_flags, @mpol) * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
......
...@@ -288,13 +288,10 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private, ...@@ -288,13 +288,10 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
* accordance with memory policy of the user process if possible. For * accordance with memory policy of the user process if possible. For
* now as a simple work-around, we use the next node for destination. * now as a simple work-around, we use the next node for destination.
*/ */
if (PageHuge(page)) { if (PageHuge(page))
int node = next_online_node(page_to_nid(page));
if (node == MAX_NUMNODES)
node = first_online_node;
return alloc_huge_page_node(page_hstate(compound_head(page)), return alloc_huge_page_node(page_hstate(compound_head(page)),
node); next_node_in(page_to_nid(page),
} node_online_map));
if (PageHighMem(page)) if (PageHighMem(page))
gfp_mask |= __GFP_HIGHMEM; gfp_mask |= __GFP_HIGHMEM;
......
...@@ -522,22 +522,15 @@ static DEFINE_PER_CPU(unsigned long, slab_reap_node); ...@@ -522,22 +522,15 @@ static DEFINE_PER_CPU(unsigned long, slab_reap_node);
static void init_reap_node(int cpu) static void init_reap_node(int cpu)
{ {
int node; per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
node_online_map);
node = next_node(cpu_to_mem(cpu), node_online_map);
if (node == MAX_NUMNODES)
node = first_node(node_online_map);
per_cpu(slab_reap_node, cpu) = node;
} }
static void next_reap_node(void) static void next_reap_node(void)
{ {
int node = __this_cpu_read(slab_reap_node); int node = __this_cpu_read(slab_reap_node);
node = next_node(node, node_online_map); node = next_node_in(node, node_online_map);
if (unlikely(node >= MAX_NUMNODES))
node = first_node(node_online_map);
__this_cpu_write(slab_reap_node, node); __this_cpu_write(slab_reap_node, node);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment