cpu.c 17 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
13 14
#include <linux/oom.h>
#include <linux/rcupdate.h>
15
#include <linux/export.h>
16
#include <linux/bug.h>
Linus Torvalds's avatar
Linus Torvalds committed
17 18
#include <linux/kthread.h>
#include <linux/stop_machine.h>
19
#include <linux/mutex.h>
20
#include <linux/gfp.h>
21
#include <linux/suspend.h>
Linus Torvalds's avatar
Linus Torvalds committed
22

23 24
#include "smpboot.h"

25
#ifdef CONFIG_SMP
26
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
27
static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds's avatar
Linus Torvalds committed
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 * The following two API's must be used when attempting
 * to serialize the updates to cpu_online_mask, cpu_present_mask.
 */
void cpu_maps_update_begin(void)
{
	mutex_lock(&cpu_add_remove_lock);
}

void cpu_maps_update_done(void)
{
	mutex_unlock(&cpu_add_remove_lock);
}

43
static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds's avatar
Linus Torvalds committed
44

45 46 47 48 49
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 * Should always be manipulated under cpu_add_remove_lock
 */
static int cpu_hotplug_disabled;

50 51
#ifdef CONFIG_HOTPLUG_CPU

52 53 54 55 56 57 58 59
static struct {
	struct task_struct *active_writer;
	struct mutex lock; /* Synchronizes accesses to refcount, */
	/*
	 * Also blocks the new readers during
	 * an ongoing cpu hotplug operation.
	 */
	int refcount;
60 61 62 63 64
} cpu_hotplug = {
	.active_writer = NULL,
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
	.refcount = 0,
};
65

66
void get_online_cpus(void)
67
{
68 69
	might_sleep();
	if (cpu_hotplug.active_writer == current)
70
		return;
71 72 73 74
	mutex_lock(&cpu_hotplug.lock);
	cpu_hotplug.refcount++;
	mutex_unlock(&cpu_hotplug.lock);

75
}
76
EXPORT_SYMBOL_GPL(get_online_cpus);
77

78
void put_online_cpus(void)
79
{
80
	if (cpu_hotplug.active_writer == current)
81
		return;
82
	mutex_lock(&cpu_hotplug.lock);
83 84 85 86

	if (WARN_ON(!cpu_hotplug.refcount))
		cpu_hotplug.refcount++; /* try to fix things up */

87 88
	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
		wake_up_process(cpu_hotplug.active_writer);
89 90
	mutex_unlock(&cpu_hotplug.lock);

91
}
92
EXPORT_SYMBOL_GPL(put_online_cpus);
93

94 95 96 97 98 99 100
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
101 102
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
103 104 105 106 107 108 109 110 111 112
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
113
 * get_online_cpus() not an api which is called all that often.
114 115
 *
 */
116
void cpu_hotplug_begin(void)
117 118
{
	cpu_hotplug.active_writer = current;
119 120 121 122 123 124

	for (;;) {
		mutex_lock(&cpu_hotplug.lock);
		if (likely(!cpu_hotplug.refcount))
			break;
		__set_current_state(TASK_UNINTERRUPTIBLE);
125 126 127 128 129
		mutex_unlock(&cpu_hotplug.lock);
		schedule();
	}
}

130
void cpu_hotplug_done(void)
131 132 133 134
{
	cpu_hotplug.active_writer = NULL;
	mutex_unlock(&cpu_hotplug.lock);
}
135

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
/*
 * Wait for currently running CPU hotplug operations to complete (if any) and
 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 * hotplug path before performing hotplug operations. So acquiring that lock
 * guarantees mutual exclusion from any currently running hotplug operations.
 */
void cpu_hotplug_disable(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 1;
	cpu_maps_update_done();
}

void cpu_hotplug_enable(void)
{
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 0;
	cpu_maps_update_done();
}

157
#endif	/* CONFIG_HOTPLUG_CPU */
158

Linus Torvalds's avatar
Linus Torvalds committed
159
/* Need to know about CPUs going up/down? */
160
int __ref register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds's avatar
Linus Torvalds committed
161
{
162
	int ret;
163
	cpu_maps_update_begin();
164
	ret = raw_notifier_chain_register(&cpu_chain, nb);
165
	cpu_maps_update_done();
166
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
167
}
168

169 170 171
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
			int *nr_calls)
{
172 173 174
	int ret;

	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
175
					nr_calls);
176 177

	return notifier_to_errno(ret);
178 179 180 181 182 183 184
}

static int cpu_notify(unsigned long val, void *v)
{
	return __cpu_notify(val, v, -1, NULL);
}

185 186
#ifdef CONFIG_HOTPLUG_CPU

187 188
static void cpu_notify_nofail(unsigned long val, void *v)
{
189
	BUG_ON(cpu_notify(val, v));
190
}
Linus Torvalds's avatar
Linus Torvalds committed
191 192
EXPORT_SYMBOL(register_cpu_notifier);

193
void __ref unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds's avatar
Linus Torvalds committed
194
{
195
	cpu_maps_update_begin();
196
	raw_notifier_chain_unregister(&cpu_chain, nb);
197
	cpu_maps_update_done();
Linus Torvalds's avatar
Linus Torvalds committed
198 199 200
}
EXPORT_SYMBOL(unregister_cpu_notifier);

201 202 203 204 205 206 207 208 209 210 211 212
/**
 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 * @cpu: a CPU id
 *
 * This function walks all processes, finds a valid mm struct for each one and
 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 * trivial, there are various non-obvious corner cases, which this function
 * tries to solve in a safe manner.
 *
 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 * be called only for an already offlined CPU.
 */
213 214 215 216 217 218 219 220 221 222 223
void clear_tasks_mm_cpumask(int cpu)
{
	struct task_struct *p;

	/*
	 * This function is called after the cpu is taken down and marked
	 * offline, so its not like new tasks will ever get this cpu set in
	 * their mm mask. -- Peter Zijlstra
	 * Thus, we may use rcu_read_lock() here, instead of grabbing
	 * full-fledged tasklist_lock.
	 */
224
	WARN_ON(cpu_online(cpu));
225 226 227 228
	rcu_read_lock();
	for_each_process(p) {
		struct task_struct *t;

229 230 231 232
		/*
		 * Main thread might exit, but other threads may still have
		 * a valid mm. Find one.
		 */
233 234 235 236 237 238 239 240 241
		t = find_lock_task_mm(p);
		if (!t)
			continue;
		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
		task_unlock(t);
	}
	rcu_read_unlock();
}

Linus Torvalds's avatar
Linus Torvalds committed
242 243 244
static inline void check_for_tasks(int cpu)
{
	struct task_struct *p;
245
	cputime_t utime, stime;
Linus Torvalds's avatar
Linus Torvalds committed
246 247 248

	write_lock_irq(&tasklist_lock);
	for_each_process(p) {
249
		task_cputime(p, &utime, &stime);
250
		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
251
		    (utime || stime))
252 253 254 255
			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
				"(state = %ld, flags = %x)\n",
				p->comm, task_pid_nr(p), cpu,
				p->state, p->flags);
Linus Torvalds's avatar
Linus Torvalds committed
256 257 258 259
	}
	write_unlock_irq(&tasklist_lock);
}

Avi Kivity's avatar
Avi Kivity committed
260 261 262 263 264
struct take_cpu_down_param {
	unsigned long mod;
	void *hcpu;
};

Linus Torvalds's avatar
Linus Torvalds committed
265
/* Take this CPU down. */
266
static int __ref take_cpu_down(void *_param)
Linus Torvalds's avatar
Linus Torvalds committed
267
{
Avi Kivity's avatar
Avi Kivity committed
268
	struct take_cpu_down_param *param = _param;
Linus Torvalds's avatar
Linus Torvalds committed
269 270 271 272 273
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
Zwane Mwaikambo's avatar
Zwane Mwaikambo committed
274
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
275

276
	cpu_notify(CPU_DYING | param->mod, param->hcpu);
277 278
	/* Park the stopper thread */
	kthread_park(current);
Zwane Mwaikambo's avatar
Zwane Mwaikambo committed
279
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
280 281
}

282
/* Requires cpu_add_remove_lock to be held */
283
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds's avatar
Linus Torvalds committed
284
{
285 286
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
287
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivity's avatar
Avi Kivity committed
288 289 290 291
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
Linus Torvalds's avatar
Linus Torvalds committed
292

293 294
	if (num_online_cpus() == 1)
		return -EBUSY;
Linus Torvalds's avatar
Linus Torvalds committed
295

296 297
	if (!cpu_online(cpu))
		return -EINVAL;
Linus Torvalds's avatar
Linus Torvalds committed
298

299
	cpu_hotplug_begin();
300

301
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
302
	if (err) {
303
		nr_calls--;
304
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
305
		printk("%s: attempt to take down CPU %u failed\n",
306
				__func__, cpu);
307
		goto out_release;
Linus Torvalds's avatar
Linus Torvalds committed
308
	}
309
	smpboot_park_threads(cpu);
Linus Torvalds's avatar
Linus Torvalds committed
310

Rusty Russell's avatar
Rusty Russell committed
311
	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
312
	if (err) {
Linus Torvalds's avatar
Linus Torvalds committed
313
		/* CPU didn't die: tell everyone.  Can't complain. */
314
		smpboot_unpark_threads(cpu);
315
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
316
		goto out_release;
317
	}
318
	BUG_ON(cpu_online(cpu));
Linus Torvalds's avatar
Linus Torvalds committed
319

320 321 322 323
	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra's avatar
Peter Zijlstra committed
324 325
	 *
	 * Wait for the stop thread to go away.
326
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
327 328
	while (!idle_cpu(cpu))
		cpu_relax();
Linus Torvalds's avatar
Linus Torvalds committed
329 330 331 332 333

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
334
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
Linus Torvalds's avatar
Linus Torvalds committed
335 336 337

	check_for_tasks(cpu);

338
out_release:
339
	cpu_hotplug_done();
340 341
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
342 343 344
	return err;
}

345
int __ref cpu_down(unsigned int cpu)
346
{
347
	int err;
348

349
	cpu_maps_update_begin();
350 351

	if (cpu_hotplug_disabled) {
352
		err = -EBUSY;
353 354 355 356
		goto out;
	}

	err = _cpu_down(cpu, 0);
357

358
out:
359
	cpu_maps_update_done();
Linus Torvalds's avatar
Linus Torvalds committed
360 361
	return err;
}
362
EXPORT_SYMBOL(cpu_down);
Linus Torvalds's avatar
Linus Torvalds committed
363 364
#endif /*CONFIG_HOTPLUG_CPU*/

365
/* Requires cpu_add_remove_lock to be held */
366
static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds's avatar
Linus Torvalds committed
367
{
368
	int ret, nr_calls = 0;
Linus Torvalds's avatar
Linus Torvalds committed
369
	void *hcpu = (void *)(long)cpu;
370
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
371
	struct task_struct *idle;
Linus Torvalds's avatar
Linus Torvalds committed
372

373
	cpu_hotplug_begin();
374

375 376 377 378 379
	if (cpu_online(cpu) || !cpu_present(cpu)) {
		ret = -EINVAL;
		goto out;
	}

380 381 382
	idle = idle_thread_get(cpu);
	if (IS_ERR(idle)) {
		ret = PTR_ERR(idle);
383
		goto out;
384
	}
385

386 387 388 389
	ret = smpboot_create_threads(cpu);
	if (ret)
		goto out;

390
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
391
	if (ret) {
392
		nr_calls--;
393
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
394
				__func__, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
395 396 397 398
		goto out_notify;
	}

	/* Arch-specific enabling code. */
399
	ret = __cpu_up(cpu, idle);
Linus Torvalds's avatar
Linus Torvalds committed
400 401
	if (ret != 0)
		goto out_notify;
402
	BUG_ON(!cpu_online(cpu));
Linus Torvalds's avatar
Linus Torvalds committed
403

404 405 406
	/* Wake the per cpu threads */
	smpboot_unpark_threads(cpu);

Linus Torvalds's avatar
Linus Torvalds committed
407
	/* Now call notifier in preparation. */
408
	cpu_notify(CPU_ONLINE | mod, hcpu);
Linus Torvalds's avatar
Linus Torvalds committed
409 410 411

out_notify:
	if (ret != 0)
412
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
413
out:
414
	cpu_hotplug_done();
415 416 417 418

	return ret;
}

419
int cpu_up(unsigned int cpu)
420 421
{
	int err = 0;
422 423 424 425 426 427

#ifdef	CONFIG_MEMORY_HOTPLUG
	int nid;
	pg_data_t	*pgdat;
#endif

Rusty Russell's avatar
Rusty Russell committed
428
	if (!cpu_possible(cpu)) {
429 430
		printk(KERN_ERR "can't online cpu %d because it is not "
			"configured as may-hotadd at boot time\n", cpu);
431
#if defined(CONFIG_IA64)
432 433 434 435 436
		printk(KERN_ERR "please check additional_cpus= boot "
				"parameter\n");
#endif
		return -EINVAL;
	}
437

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
#ifdef	CONFIG_MEMORY_HOTPLUG
	nid = cpu_to_node(cpu);
	if (!node_online(nid)) {
		err = mem_online_node(nid);
		if (err)
			return err;
	}

	pgdat = NODE_DATA(nid);
	if (!pgdat) {
		printk(KERN_ERR
			"Can't online cpu %d due to NULL pgdat\n", cpu);
		return -ENOMEM;
	}

453 454
	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
		mutex_lock(&zonelists_mutex);
455
		build_all_zonelists(NULL, NULL);
456 457
		mutex_unlock(&zonelists_mutex);
	}
458 459
#endif

460
	cpu_maps_update_begin();
461 462

	if (cpu_hotplug_disabled) {
463
		err = -EBUSY;
464 465 466 467 468 469
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
470
	cpu_maps_update_done();
471 472
	return err;
}
Paul E. McKenney's avatar
Paul E. McKenney committed
473
EXPORT_SYMBOL_GPL(cpu_up);
474

475
#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russell's avatar
Rusty Russell committed
476
static cpumask_var_t frozen_cpus;
477 478 479

int disable_nonboot_cpus(void)
{
480
	int cpu, first_cpu, error = 0;
481

482
	cpu_maps_update_begin();
Rusty Russell's avatar
Rusty Russell committed
483
	first_cpu = cpumask_first(cpu_online_mask);
484 485
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
486 487
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
Rusty Russell's avatar
Rusty Russell committed
488
	cpumask_clear(frozen_cpus);
489

490 491 492 493
	printk("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
494
		error = _cpu_down(cpu, 1);
495
		if (!error)
Rusty Russell's avatar
Rusty Russell committed
496
			cpumask_set_cpu(cpu, frozen_cpus);
497
		else {
498 499 500 501 502
			printk(KERN_ERR "Error taking CPU%d down: %d\n",
				cpu, error);
			break;
		}
	}
503

504 505 506 507 508
	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		/* Make sure the CPUs won't be enabled by someone else */
		cpu_hotplug_disabled = 1;
	} else {
509
		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
510
	}
511
	cpu_maps_update_done();
512 513 514
	return error;
}

515 516 517 518 519 520 521 522
void __weak arch_enable_nonboot_cpus_begin(void)
{
}

void __weak arch_enable_nonboot_cpus_end(void)
{
}

523
void __ref enable_nonboot_cpus(void)
524 525 526 527
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
528
	cpu_maps_update_begin();
529
	cpu_hotplug_disabled = 0;
Rusty Russell's avatar
Rusty Russell committed
530
	if (cpumask_empty(frozen_cpus))
531
		goto out;
532

533
	printk(KERN_INFO "Enabling non-boot CPUs ...\n");
534 535 536

	arch_enable_nonboot_cpus_begin();

Rusty Russell's avatar
Rusty Russell committed
537
	for_each_cpu(cpu, frozen_cpus) {
538
		error = _cpu_up(cpu, 1);
539
		if (!error) {
540
			printk(KERN_INFO "CPU%d is up\n", cpu);
541 542
			continue;
		}
543
		printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
544
	}
545 546 547

	arch_enable_nonboot_cpus_end();

Rusty Russell's avatar
Rusty Russell committed
548
	cpumask_clear(frozen_cpus);
549
out:
550
	cpu_maps_update_done();
Linus Torvalds's avatar
Linus Torvalds committed
551
}
Rusty Russell's avatar
Rusty Russell committed
552

553
static int __init alloc_frozen_cpus(void)
Rusty Russell's avatar
Rusty Russell committed
554 555 556 557 558 559
{
	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
		return -ENOMEM;
	return 0;
}
core_initcall(alloc_frozen_cpus);
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579

/*
 * When callbacks for CPU hotplug notifications are being executed, we must
 * ensure that the state of the system with respect to the tasks being frozen
 * or not, as reported by the notification, remains unchanged *throughout the
 * duration* of the execution of the callbacks.
 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 *
 * This synchronization is implemented by mutually excluding regular CPU
 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 * Hibernate notifications.
 */
static int
cpu_hotplug_pm_callback(struct notifier_block *nb,
			unsigned long action, void *ptr)
{
	switch (action) {

	case PM_SUSPEND_PREPARE:
	case PM_HIBERNATION_PREPARE:
580
		cpu_hotplug_disable();
581 582 583 584
		break;

	case PM_POST_SUSPEND:
	case PM_POST_HIBERNATION:
585
		cpu_hotplug_enable();
586 587 588 589 590 591 592 593 594 595
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}


596
static int __init cpu_hotplug_pm_sync_init(void)
597
{
598 599 600 601 602
	/*
	 * cpu_hotplug_pm_callback has higher priority than x86
	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
	 * to disable cpu hotplug to avoid cpu hotplug race.
	 */
603 604 605 606 607
	pm_notifier(cpu_hotplug_pm_callback, 0);
	return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);

608
#endif /* CONFIG_PM_SLEEP_SMP */
609

610 611 612 613 614 615 616 617
/**
 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 * @cpu: cpu that just started
 *
 * This function calls the cpu_chain notifiers with CPU_STARTING.
 * It must be called by the arch code on the new cpu, before the new cpu
 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 */
618
void notify_cpu_starting(unsigned int cpu)
619 620 621 622
{
	unsigned long val = CPU_STARTING;

#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russell's avatar
Rusty Russell committed
623
	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
624 625
		val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
626
	cpu_notify(val, (void *)(long)cpu);
627 628
}

629
#endif /* CONFIG_SMP */
630

631 632 633 634
/*
 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 * represents all NR_CPUS bits binary values of 1<<nr.
 *
Rusty Russell's avatar
Rusty Russell committed
635
 * It is used by cpumask_of() to get a constant address to a CPU
636 637
 * mask value that has a single bit set only.
 */
638

639
/* cpu_bit_bitmap[0] is empty - so we can back into it */
640
#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
641 642 643
#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
644

645 646 647 648 649 650 651
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {

	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
#if BITS_PER_LONG > 32
	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
652 653
#endif
};
654
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
655 656 657

const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678

#ifdef CONFIG_INIT_ALL_POSSIBLE
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
	= CPU_BITS_ALL;
#else
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
#endif
const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
EXPORT_SYMBOL(cpu_possible_mask);

static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
EXPORT_SYMBOL(cpu_online_mask);

static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
EXPORT_SYMBOL(cpu_present_mask);

static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
EXPORT_SYMBOL(cpu_active_mask);
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725

void set_cpu_possible(unsigned int cpu, bool possible)
{
	if (possible)
		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
}

void set_cpu_present(unsigned int cpu, bool present)
{
	if (present)
		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
}

void set_cpu_online(unsigned int cpu, bool online)
{
	if (online)
		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
}

void set_cpu_active(unsigned int cpu, bool active)
{
	if (active)
		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
	else
		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
}

void init_cpu_present(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_present_bits), src);
}

void init_cpu_possible(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_possible_bits), src);
}

void init_cpu_online(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_online_bits), src);
}