Commit 174596a0 authored by Rusty Russell's avatar Rusty Russell

cpumask: convert mm/

Impact: Use new API

Convert kernel mm functions to use struct cpumask.

We skip include/linux/percpu.h and mm/allocpercpu.c, which are in flux.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Reviewed-by: default avatarChristoph Lameter <cl@linux-foundation.org>
parent 41c7bb95
...@@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work *my_work) ...@@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work *my_work)
static int pdflush(void *dummy) static int pdflush(void *dummy)
{ {
struct pdflush_work my_work; struct pdflush_work my_work;
cpumask_t cpus_allowed; cpumask_var_t cpus_allowed;
/*
* Since the caller doesn't even check kthread_run() worked, let's not
* freak out too much if this fails.
*/
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
return 0;
}
/* /*
* pdflush can spend a lot of time doing encryption via dm-crypt. We * pdflush can spend a lot of time doing encryption via dm-crypt. We
...@@ -187,8 +196,9 @@ static int pdflush(void *dummy) ...@@ -187,8 +196,9 @@ static int pdflush(void *dummy)
* This is needed as pdflush's are dynamically created and destroyed. * This is needed as pdflush's are dynamically created and destroyed.
* The boottime pdflush's are easily placed w/o these 2 lines. * The boottime pdflush's are easily placed w/o these 2 lines.
*/ */
cpuset_cpus_allowed(current, &cpus_allowed); cpuset_cpus_allowed(current, cpus_allowed);
set_cpus_allowed_ptr(current, &cpus_allowed); set_cpus_allowed_ptr(current, cpus_allowed);
free_cpumask_var(cpus_allowed);
return __pdflush(&my_work); return __pdflush(&my_work);
} }
......
...@@ -2157,7 +2157,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2157,7 +2157,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* /*
* We use cache_chain_mutex to ensure a consistent view of * We use cache_chain_mutex to ensure a consistent view of
* cpu_online_map as well. Please see cpuup_callback * cpu_online_mask as well. Please see cpuup_callback
*/ */
get_online_cpus(); get_online_cpus();
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
......
...@@ -1970,7 +1970,7 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu, ...@@ -1970,7 +1970,7 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu,
kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
int cpu, gfp_t flags) int cpu, gfp_t flags)
...@@ -2045,13 +2045,13 @@ static void init_alloc_cpu_cpu(int cpu) ...@@ -2045,13 +2045,13 @@ static void init_alloc_cpu_cpu(int cpu)
{ {
int i; int i;
if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
return; return;
for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
cpu_set(cpu, kmem_cach_cpu_free_init_once); cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
} }
static void __init init_alloc_cpu(void) static void __init init_alloc_cpu(void)
...@@ -3451,7 +3451,7 @@ struct location { ...@@ -3451,7 +3451,7 @@ struct location {
long max_time; long max_time;
long min_pid; long min_pid;
long max_pid; long max_pid;
cpumask_t cpus; DECLARE_BITMAP(cpus, NR_CPUS);
nodemask_t nodes; nodemask_t nodes;
}; };
...@@ -3526,7 +3526,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, ...@@ -3526,7 +3526,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
if (track->pid > l->max_pid) if (track->pid > l->max_pid)
l->max_pid = track->pid; l->max_pid = track->pid;
cpu_set(track->cpu, l->cpus); cpumask_set_cpu(track->cpu,
to_cpumask(l->cpus));
} }
node_set(page_to_nid(virt_to_page(track)), l->nodes); node_set(page_to_nid(virt_to_page(track)), l->nodes);
return 1; return 1;
...@@ -3556,8 +3557,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, ...@@ -3556,8 +3557,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
l->max_time = age; l->max_time = age;
l->min_pid = track->pid; l->min_pid = track->pid;
l->max_pid = track->pid; l->max_pid = track->pid;
cpus_clear(l->cpus); cpumask_clear(to_cpumask(l->cpus));
cpu_set(track->cpu, l->cpus); cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
nodes_clear(l->nodes); nodes_clear(l->nodes);
node_set(page_to_nid(virt_to_page(track)), l->nodes); node_set(page_to_nid(virt_to_page(track)), l->nodes);
return 1; return 1;
...@@ -3638,11 +3639,12 @@ static int list_locations(struct kmem_cache *s, char *buf, ...@@ -3638,11 +3639,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
len += sprintf(buf + len, " pid=%ld", len += sprintf(buf + len, " pid=%ld",
l->min_pid); l->min_pid);
if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && if (num_online_cpus() > 1 &&
!cpumask_empty(to_cpumask(l->cpus)) &&
len < PAGE_SIZE - 60) { len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus="); len += sprintf(buf + len, " cpus=");
len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
&l->cpus); to_cpumask(l->cpus));
} }
if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
......
...@@ -1902,7 +1902,7 @@ static int kswapd(void *p) ...@@ -1902,7 +1902,7 @@ static int kswapd(void *p)
}; };
node_to_cpumask_ptr(cpumask, pgdat->node_id); node_to_cpumask_ptr(cpumask, pgdat->node_id);
if (!cpus_empty(*cpumask)) if (!cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask); set_cpus_allowed_ptr(tsk, cpumask);
current->reclaim_state = &reclaim_state; current->reclaim_state = &reclaim_state;
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
EXPORT_PER_CPU_SYMBOL(vm_event_states); EXPORT_PER_CPU_SYMBOL(vm_event_states);
static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
{ {
int cpu; int cpu;
int i; int i;
...@@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) ...@@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
void all_vm_events(unsigned long *ret) void all_vm_events(unsigned long *ret)
{ {
get_online_cpus(); get_online_cpus();
sum_vm_events(ret, &cpu_online_map); sum_vm_events(ret, cpu_online_mask);
put_online_cpus(); put_online_cpus();
} }
EXPORT_SYMBOL_GPL(all_vm_events); EXPORT_SYMBOL_GPL(all_vm_events);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment