jump_label.c 20.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 * jump label support
 *
 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6
 * Copyright (C) 2011 Peter Zijlstra
7 8 9 10 11 12 13 14 15
 *
 */
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
16
#include <linux/static_key.h>
17
#include <linux/jump_label_ratelimit.h>
18
#include <linux/bug.h>
19
#include <linux/cpu.h>
20
#include <asm/sections.h>
21 22 23 24

/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);

25 26 27 28 29 30 31 32 33 34
void jump_label_lock(void)
{
	mutex_lock(&jump_label_mutex);
}

void jump_label_unlock(void)
{
	mutex_unlock(&jump_label_mutex);
}

35 36 37 38 39
static int jump_label_cmp(const void *a, const void *b)
{
	const struct jump_entry *jea = a;
	const struct jump_entry *jeb = b;

40 41 42
	/*
	 * Entrires are sorted by key.
	 */
43
	if (jump_entry_key(jea) < jump_entry_key(jeb))
44 45
		return -1;

46
	if (jump_entry_key(jea) > jump_entry_key(jeb))
47 48
		return 1;

49 50 51 52 53 54 55 56 57 58 59
	/*
	 * In the batching mode, entries should also be sorted by the code
	 * inside the already sorted list of entries, enabling a bsearch in
	 * the vector.
	 */
	if (jump_entry_code(jea) < jump_entry_code(jeb))
		return -1;

	if (jump_entry_code(jea) > jump_entry_code(jeb))
		return 1;

60 61 62
	return 0;
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
static void jump_label_swap(void *a, void *b, int size)
{
	long delta = (unsigned long)a - (unsigned long)b;
	struct jump_entry *jea = a;
	struct jump_entry *jeb = b;
	struct jump_entry tmp = *jea;

	jea->code	= jeb->code - delta;
	jea->target	= jeb->target - delta;
	jea->key	= jeb->key - delta;

	jeb->code	= tmp.code + delta;
	jeb->target	= tmp.target + delta;
	jeb->key	= tmp.key + delta;
}

79
static void
80
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81 82
{
	unsigned long size;
83 84 85 86
	void *swapfn = NULL;

	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
		swapfn = jump_label_swap;
87 88 89

	size = (((unsigned long)stop - (unsigned long)start)
					/ sizeof(struct jump_entry));
90
	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91 92
}

93
static void jump_label_update(struct static_key *key);
94

95
/*
96
 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97 98
 * The use of 'atomic_read()' requires atomic.h and its problematic for some
 * kernel headers such as kernel.h and others. Since static_key_count() is not
99
 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 101
 * to have it be a function here. Similarly, for 'static_key_enable()' and
 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102
 * to be included from most/all places for CONFIG_JUMP_LABEL.
103 104 105 106 107 108 109 110 111 112 113 114 115
 */
int static_key_count(struct static_key *key)
{
	/*
	 * -1 means the first static_key_slow_inc() is in progress.
	 *  static_key_enabled() must return true, so return 1 here.
	 */
	int n = atomic_read(&key->enabled);

	return n >= 0 ? n : 1;
}
EXPORT_SYMBOL_GPL(static_key_count);

116
void static_key_slow_inc_cpuslocked(struct static_key *key)
117
{
118 119
	int v, v1;

120
	STATIC_KEY_CHECK_USE(key);
121
	lockdep_assert_cpus_held();
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136

	/*
	 * Careful if we get concurrent static_key_slow_inc() calls;
	 * later calls must wait for the first one to _finish_ the
	 * jump_label_update() process.  At the same time, however,
	 * the jump_label_update() call below wants to see
	 * static_key_enabled(&key) for jumps to be updated properly.
	 *
	 * So give a special meaning to negative key->enabled: it sends
	 * static_key_slow_inc() down the slow path, and it is non-zero
	 * so it counts as "enabled" in jump_label_update().  Note that
	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
	 */
	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
137
		if (likely(v1 == v))
138 139
			return;
	}
140

141
	jump_label_lock();
142 143
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
144
		jump_label_update(key);
145 146 147 148 149
		/*
		 * Ensure that if the above cmpxchg loop observes our positive
		 * value, it must also observe all the text changes.
		 */
		atomic_set_release(&key->enabled, 1);
150 151 152
	} else {
		atomic_inc(&key->enabled);
	}
153
	jump_label_unlock();
154 155 156 157 158 159
}

void static_key_slow_inc(struct static_key *key)
{
	cpus_read_lock();
	static_key_slow_inc_cpuslocked(key);
160
	cpus_read_unlock();
161
}
162
EXPORT_SYMBOL_GPL(static_key_slow_inc);
163

164
void static_key_enable_cpuslocked(struct static_key *key)
165
{
166
	STATIC_KEY_CHECK_USE(key);
167
	lockdep_assert_cpus_held();
168

169 170 171 172 173 174 175 176 177
	if (atomic_read(&key->enabled) > 0) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
		return;
	}

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		atomic_set(&key->enabled, -1);
		jump_label_update(key);
178 179 180 181
		/*
		 * See static_key_slow_inc().
		 */
		atomic_set_release(&key->enabled, 1);
182 183
	}
	jump_label_unlock();
184 185 186 187 188 189 190
}
EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);

void static_key_enable(struct static_key *key)
{
	cpus_read_lock();
	static_key_enable_cpuslocked(key);
191 192 193 194
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable);

195
void static_key_disable_cpuslocked(struct static_key *key)
196
{
197
	STATIC_KEY_CHECK_USE(key);
198
	lockdep_assert_cpus_held();
199

200 201 202 203 204 205 206 207 208
	if (atomic_read(&key->enabled) != 1) {
		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
		return;
	}

	jump_label_lock();
	if (atomic_cmpxchg(&key->enabled, 1, 0))
		jump_label_update(key);
	jump_label_unlock();
209 210 211 212 213 214 215
}
EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);

void static_key_disable(struct static_key *key)
{
	cpus_read_lock();
	static_key_disable_cpuslocked(key);
216 217 218 219
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable);

220
static bool static_key_slow_try_dec(struct static_key *key)
221
{
222 223
	int val;

224 225 226
	val = atomic_fetch_add_unless(&key->enabled, -1, 1);
	if (val == 1)
		return false;
227

228 229 230 231 232 233 234
	/*
	 * The negative count check is valid even when a negative
	 * key->enabled is in use by static_key_slow_inc(); a
	 * __static_key_slow_dec() before the first static_key_slow_inc()
	 * returns is unbalanced, because all other static_key_slow_inc()
	 * instances block while the update is in progress.
	 */
235 236 237 238
	WARN(val < 0, "jump label: negative count!\n");
	return true;
}

239
static void __static_key_slow_dec_cpuslocked(struct static_key *key)
240 241 242 243
{
	lockdep_assert_cpus_held();

	if (static_key_slow_try_dec(key))
244
		return;
245

246
	jump_label_lock();
247 248
	if (atomic_dec_and_test(&key->enabled))
		jump_label_update(key);
249
	jump_label_unlock();
250 251
}

252
static void __static_key_slow_dec(struct static_key *key)
253 254
{
	cpus_read_lock();
255
	__static_key_slow_dec_cpuslocked(key);
256
	cpus_read_unlock();
257 258
}

259
void jump_label_update_timeout(struct work_struct *work)
260
{
261 262
	struct static_key_deferred *key =
		container_of(work, struct static_key_deferred, work.work);
263
	__static_key_slow_dec(&key->key);
264
}
265
EXPORT_SYMBOL_GPL(jump_label_update_timeout);
266

267
void static_key_slow_dec(struct static_key *key)
268
{
269
	STATIC_KEY_CHECK_USE(key);
270
	__static_key_slow_dec(key);
271
}
272
EXPORT_SYMBOL_GPL(static_key_slow_dec);
273

274 275 276
void static_key_slow_dec_cpuslocked(struct static_key *key)
{
	STATIC_KEY_CHECK_USE(key);
277
	__static_key_slow_dec_cpuslocked(key);
278 279
}

280 281 282
void __static_key_slow_dec_deferred(struct static_key *key,
				    struct delayed_work *work,
				    unsigned long timeout)
283
{
284
	STATIC_KEY_CHECK_USE(key);
285 286 287 288 289

	if (static_key_slow_try_dec(key))
		return;

	schedule_delayed_work(work, timeout);
290
}
291
EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
292

293
void __static_key_deferred_flush(void *key, struct delayed_work *work)
294
{
295
	STATIC_KEY_CHECK_USE(key);
296
	flush_delayed_work(work);
297
}
298
EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
299

300
void jump_label_rate_limit(struct static_key_deferred *key,
301 302
		unsigned long rl)
{
303
	STATIC_KEY_CHECK_USE(key);
304 305 306
	key->timeout = rl;
	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
307
EXPORT_SYMBOL_GPL(jump_label_rate_limit);
308

309 310
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
311 312
	if (jump_entry_code(entry) <= (unsigned long)end &&
	    jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
313 314 315 316 317
		return 1;

	return 0;
}

318 319
static int __jump_label_text_reserved(struct jump_entry *iter_start,
		struct jump_entry *iter_stop, void *start, void *end)
320 321 322 323 324
{
	struct jump_entry *iter;

	iter = iter_start;
	while (iter < iter_stop) {
325 326
		if (addr_conflict(iter, start, end))
			return 1;
327 328 329
		iter++;
	}

330 331 332
	return 0;
}

333
/*
334 335 336 337 338
 * Update code which is definitely not currently executing.
 * Architectures which need heavyweight synchronization to modify
 * running code can override this to make the non-live update case
 * cheaper.
 */
339
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
340 341
					    enum jump_label_type type)
{
342
	arch_jump_label_transform(entry, type);
343 344
}

345
static inline struct jump_entry *static_key_entries(struct static_key *key)
346
{
347 348
	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
349 350
}

351
static inline bool static_key_type(struct static_key *key)
352
{
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	return key->type & JUMP_TYPE_TRUE;
}

static inline bool static_key_linked(struct static_key *key)
{
	return key->type & JUMP_TYPE_LINKED;
}

static inline void static_key_clear_linked(struct static_key *key)
{
	key->type &= ~JUMP_TYPE_LINKED;
}

static inline void static_key_set_linked(struct static_key *key)
{
	key->type |= JUMP_TYPE_LINKED;
369
}
370

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
/***
 * A 'struct static_key' uses a union such that it either points directly
 * to a table of 'struct jump_entry' or to a linked list of modules which in
 * turn point to 'struct jump_entry' tables.
 *
 * The two lower bits of the pointer are used to keep track of which pointer
 * type is in use and to store the initial branch direction, we use an access
 * function which preserves these bits.
 */
static void static_key_set_entries(struct static_key *key,
				   struct jump_entry *entries)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->entries = entries;
	key->type |= type;
}

391
static enum jump_label_type jump_label_type(struct jump_entry *entry)
392
{
393
	struct static_key *key = jump_entry_key(entry);
394
	bool enabled = static_key_enabled(key);
395
	bool branch = jump_entry_is_branch(entry);
396

397 398
	/* See the comment in linux/jump_label.h */
	return enabled ^ branch;
399 400
}

401 402 403 404 405 406 407 408 409
static bool jump_label_can_update(struct jump_entry *entry, bool init)
{
	/*
	 * Cannot update code that was in an init text area.
	 */
	if (!init && jump_entry_is_init(entry))
		return false;

	if (!kernel_text_address(jump_entry_code(entry))) {
410 411 412
		WARN_ONCE(!jump_entry_is_init(entry),
			  "can't patch jump_label at %pS",
			  (void *)jump_entry_code(entry));
413 414 415 416 417 418
		return false;
	}

	return true;
}

419
#ifndef HAVE_JUMP_LABEL_BATCH
420 421
static void __jump_label_update(struct static_key *key,
				struct jump_entry *entry,
422 423
				struct jump_entry *stop,
				bool init)
424 425
{
	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
426 427
		if (jump_label_can_update(entry, init))
			arch_jump_label_transform(entry, jump_label_type(entry));
428 429
	}
}
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
#else
static void __jump_label_update(struct static_key *key,
				struct jump_entry *entry,
				struct jump_entry *stop,
				bool init)
{
	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {

		if (!jump_label_can_update(entry, init))
			continue;

		if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
			/*
			 * Queue is full: Apply the current queue and try again.
			 */
			arch_jump_label_transform_apply();
			BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
		}
	}
	arch_jump_label_transform_apply();
}
#endif
452

453
void __init jump_label_init(void)
454 455 456
{
	struct jump_entry *iter_start = __start___jump_table;
	struct jump_entry *iter_stop = __stop___jump_table;
457
	struct static_key *key = NULL;
458 459
	struct jump_entry *iter;

460 461 462 463 464 465 466 467 468
	/*
	 * Since we are initializing the static_key.enabled field with
	 * with the 'raw' int values (to avoid pulling in atomic.h) in
	 * jump_label.h, let's make sure that is safe. There are only two
	 * cases to check since we initialize to 0 or 1.
	 */
	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);

469 470 471
	if (static_key_initialized)
		return;

472
	cpus_read_lock();
473
	jump_label_lock();
474 475 476
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
477
		struct static_key *iterk;
478

479 480 481 482
		/* rewrite NOPs */
		if (jump_label_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);

483 484 485
		if (init_section_contains((void *)jump_entry_code(iter), 1))
			jump_entry_set_init(iter);

486
		iterk = jump_entry_key(iter);
487
		if (iterk == key)
488 489
			continue;

490
		key = iterk;
491
		static_key_set_entries(key, iter);
492
	}
493
	static_key_initialized = true;
494
	jump_label_unlock();
495
	cpus_read_unlock();
496 497 498 499
}

#ifdef CONFIG_MODULES

500 501 502 503
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
	struct static_key *key = jump_entry_key(entry);
	bool type = static_key_type(key);
504
	bool branch = jump_entry_is_branch(entry);
505 506 507 508 509

	/* See the comment in linux/jump_label.h */
	return type ^ branch;
}

510 511
struct static_key_mod {
	struct static_key_mod *next;
512 513 514 515
	struct jump_entry *entries;
	struct module *mod;
};

516 517
static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
518
	WARN_ON_ONCE(!static_key_linked(key));
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}

/***
 * key->type and key->next are the same via union.
 * This sets key->next and preserves the type bits.
 *
 * See additional comments above static_key_set_entries().
 */
static void static_key_set_mod(struct static_key *key,
			       struct static_key_mod *mod)
{
	unsigned long type;

	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
	type = key->type & JUMP_TYPE_MASK;
	key->next = mod;
	key->type |= type;
}

539 540 541 542
static int __jump_label_mod_text_reserved(void *start, void *end)
{
	struct module *mod;

543
	preempt_disable();
544
	mod = __module_text_address((unsigned long)start);
545 546 547
	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
	preempt_enable();

548 549 550 551 552 553 554 555 556
	if (!mod)
		return 0;


	return __jump_label_text_reserved(mod->jump_entries,
				mod->jump_entries + mod->num_jump_entries,
				start, end);
}

557
static void __jump_label_mod_update(struct static_key *key)
558
{
559
	struct static_key_mod *mod;
560

561 562 563 564 565 566 567 568 569 570
	for (mod = static_key_mod(key); mod; mod = mod->next) {
		struct jump_entry *stop;
		struct module *m;

		/*
		 * NULL if the static_key is defined in a module
		 * that does not use it
		 */
		if (!mod->entries)
			continue;
571

572 573 574 575 576
		m = mod->mod;
		if (!m)
			stop = __stop___jump_table;
		else
			stop = m->jump_entries + m->num_jump_entries;
577
		__jump_label_update(key, mod->entries, stop,
578
				    m && m->state == MODULE_STATE_COMING);
579 580 581 582 583 584 585 586 587 588 589 590
	}
}

/***
 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
 * @mod: module to patch
 *
 * Allow for run-time selection of the optimal nops. Before the module
 * loads patch these with arch_get_jump_label_nop(), which is specified by
 * the arch specific jump label code.
 */
void jump_label_apply_nops(struct module *mod)
591
{
592 593 594 595 596 597 598 599
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;

	/* if the module doesn't have jump label entries, just return */
	if (iter_start == iter_stop)
		return;

600 601 602 603 604
	for (iter = iter_start; iter < iter_stop; iter++) {
		/* Only write NOPs for arch_branch_static(). */
		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
	}
605 606
}

607
static int jump_label_add_module(struct module *mod)
608
{
609 610 611
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
612
	struct static_key *key = NULL;
613
	struct static_key_mod *jlm, *jlm2;
614 615

	/* if the module doesn't have jump label entries, just return */
616
	if (iter_start == iter_stop)
617 618
		return 0;

619 620 621
	jump_label_sort_entries(iter_start, iter_stop);

	for (iter = iter_start; iter < iter_stop; iter++) {
622
		struct static_key *iterk;
623

624 625 626
		if (within_module_init(jump_entry_code(iter), mod))
			jump_entry_set_init(iter);

627
		iterk = jump_entry_key(iter);
628 629
		if (iterk == key)
			continue;
630

631
		key = iterk;
632
		if (within_module((unsigned long)key, mod)) {
633
			static_key_set_entries(key, iter);
634
			continue;
635
		}
636
		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
637 638
		if (!jlm)
			return -ENOMEM;
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
		if (!static_key_linked(key)) {
			jlm2 = kzalloc(sizeof(struct static_key_mod),
				       GFP_KERNEL);
			if (!jlm2) {
				kfree(jlm);
				return -ENOMEM;
			}
			preempt_disable();
			jlm2->mod = __module_address((unsigned long)key);
			preempt_enable();
			jlm2->entries = static_key_entries(key);
			jlm2->next = NULL;
			static_key_set_mod(key, jlm2);
			static_key_set_linked(key);
		}
654 655
		jlm->mod = mod;
		jlm->entries = iter;
656 657 658
		jlm->next = static_key_mod(key);
		static_key_set_mod(key, jlm);
		static_key_set_linked(key);
659

660 661
		/* Only update if we've changed from our initial state */
		if (jump_label_type(iter) != jump_label_init_type(iter))
662
			__jump_label_update(key, iter, iter_stop, true);
663
	}
664

665 666 667
	return 0;
}

668
static void jump_label_del_module(struct module *mod)
669
{
670 671 672
	struct jump_entry *iter_start = mod->jump_entries;
	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
	struct jump_entry *iter;
673 674
	struct static_key *key = NULL;
	struct static_key_mod *jlm, **prev;
675

676
	for (iter = iter_start; iter < iter_stop; iter++) {
677
		if (jump_entry_key(iter) == key)
678 679
			continue;

680
		key = jump_entry_key(iter);
681

682
		if (within_module((unsigned long)key, mod))
683 684
			continue;

685 686 687 688
		/* No memory during module load */
		if (WARN_ON(!static_key_linked(key)))
			continue;

689
		prev = &key->next;
690
		jlm = static_key_mod(key);
691

692 693 694 695 696
		while (jlm && jlm->mod != mod) {
			prev = &jlm->next;
			jlm = jlm->next;
		}

697 698 699 700 701 702 703
		/* No memory during module load */
		if (WARN_ON(!jlm))
			continue;

		if (prev == &key->next)
			static_key_set_mod(key, jlm->next);
		else
704
			*prev = jlm->next;
705 706 707 708 709 710 711 712

		kfree(jlm);

		jlm = static_key_mod(key);
		/* if only one etry is left, fold it back into the static_key */
		if (jlm->next == NULL) {
			static_key_set_entries(key, jlm->entries);
			static_key_clear_linked(key);
713
			kfree(jlm);
714 715 716 717 718 719 720 721 722 723 724
		}
	}
}

static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
			 void *data)
{
	struct module *mod = data;
	int ret = 0;

725 726 727
	cpus_read_lock();
	jump_label_lock();

728 729
	switch (val) {
	case MODULE_STATE_COMING:
730
		ret = jump_label_add_module(mod);
731
		if (ret) {
732
			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
733
			jump_label_del_module(mod);
734
		}
735 736
		break;
	case MODULE_STATE_GOING:
737
		jump_label_del_module(mod);
738 739 740
		break;
	}

741 742 743
	jump_label_unlock();
	cpus_read_unlock();

744
	return notifier_from_errno(ret);
745 746
}

747
static struct notifier_block jump_label_module_nb = {
748
	.notifier_call = jump_label_module_notify,
749
	.priority = 1, /* higher than tracepoints */
750 751
};

752
static __init int jump_label_init_module(void)
753 754 755
{
	return register_module_notifier(&jump_label_module_nb);
}
756
early_initcall(jump_label_init_module);
757 758 759

#endif /* CONFIG_MODULES */

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
/***
 * jump_label_text_reserved - check if addr range is reserved
 * @start: start text addr
 * @end: end text addr
 *
 * checks if the text addr located between @start and @end
 * overlaps with any of the jump label patch addresses. Code
 * that wants to modify kernel text should first verify that
 * it does not overlap with any of the jump label addresses.
 * Caller must hold jump_label_mutex.
 *
 * returns 1 if there is an overlap, 0 otherwise
 */
int jump_label_text_reserved(void *start, void *end)
{
	int ret = __jump_label_text_reserved(__start___jump_table,
			__stop___jump_table, start, end);

	if (ret)
		return ret;

#ifdef CONFIG_MODULES
	ret = __jump_label_mod_text_reserved(start, end);
#endif
	return ret;
}

787
static void jump_label_update(struct static_key *key)
788
{
789
	struct jump_entry *stop = __stop___jump_table;
790
	struct jump_entry *entry;
791
#ifdef CONFIG_MODULES
792
	struct module *mod;
793

794 795 796 797
	if (static_key_linked(key)) {
		__jump_label_mod_update(key);
		return;
	}
798

799 800
	preempt_disable();
	mod = __module_address((unsigned long)key);
801 802
	if (mod)
		stop = mod->jump_entries + mod->num_jump_entries;
803
	preempt_enable();
804
#endif
805
	entry = static_key_entries(key);
806 807
	/* if there are no users, entry can be NULL */
	if (entry)
808 809
		__jump_label_update(key, entry, stop,
				    system_state < SYSTEM_RUNNING);
810 811
}

812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
#ifdef CONFIG_STATIC_KEYS_SELFTEST
static DEFINE_STATIC_KEY_TRUE(sk_true);
static DEFINE_STATIC_KEY_FALSE(sk_false);

static __init int jump_label_test(void)
{
	int i;

	for (i = 0; i < 2; i++) {
		WARN_ON(static_key_enabled(&sk_true.key) != true);
		WARN_ON(static_key_enabled(&sk_false.key) != false);

		WARN_ON(!static_branch_likely(&sk_true));
		WARN_ON(!static_branch_unlikely(&sk_true));
		WARN_ON(static_branch_likely(&sk_false));
		WARN_ON(static_branch_unlikely(&sk_false));

		static_branch_disable(&sk_true);
		static_branch_enable(&sk_false);

		WARN_ON(static_key_enabled(&sk_true.key) == true);
		WARN_ON(static_key_enabled(&sk_false.key) == false);

		WARN_ON(static_branch_likely(&sk_true));
		WARN_ON(static_branch_unlikely(&sk_true));
		WARN_ON(!static_branch_likely(&sk_false));
		WARN_ON(!static_branch_unlikely(&sk_false));

		static_branch_enable(&sk_true);
		static_branch_disable(&sk_false);
	}

	return 0;
}
846
early_initcall(jump_label_test);
847
#endif /* STATIC_KEYS_SELFTEST */