1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * jump label support 4 * 5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> 6 * Copyright (C) 2011 Peter Zijlstra 7 * 8 */ 9 #include <linux/memory.h> 10 #include <linux/uaccess.h> 11 #include <linux/module.h> 12 #include <linux/list.h> 13 #include <linux/slab.h> 14 #include <linux/sort.h> 15 #include <linux/err.h> 16 #include <linux/static_key.h> 17 #include <linux/jump_label_ratelimit.h> 18 #include <linux/bug.h> 19 #include <linux/cpu.h> 20 #include <asm/sections.h> 21 22 /* mutex to protect coming/going of the the jump_label table */ 23 static DEFINE_MUTEX(jump_label_mutex); 24 25 void jump_label_lock(void) 26 { 27 mutex_lock(&jump_label_mutex); 28 } 29 30 void jump_label_unlock(void) 31 { 32 mutex_unlock(&jump_label_mutex); 33 } 34 35 static int jump_label_cmp(const void *a, const void *b) 36 { 37 const struct jump_entry *jea = a; 38 const struct jump_entry *jeb = b; 39 40 if (jump_entry_key(jea) < jump_entry_key(jeb)) 41 return -1; 42 43 if (jump_entry_key(jea) > jump_entry_key(jeb)) 44 return 1; 45 46 return 0; 47 } 48 49 static void jump_label_swap(void *a, void *b, int size) 50 { 51 long delta = (unsigned long)a - (unsigned long)b; 52 struct jump_entry *jea = a; 53 struct jump_entry *jeb = b; 54 struct jump_entry tmp = *jea; 55 56 jea->code = jeb->code - delta; 57 jea->target = jeb->target - delta; 58 jea->key = jeb->key - delta; 59 60 jeb->code = tmp.code + delta; 61 jeb->target = tmp.target + delta; 62 jeb->key = tmp.key + delta; 63 } 64 65 static void 66 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) 67 { 68 unsigned long size; 69 void *swapfn = NULL; 70 71 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) 72 swapfn = jump_label_swap; 73 74 size = (((unsigned long)stop - (unsigned long)start) 75 / sizeof(struct jump_entry)); 76 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); 77 } 78 79 static void jump_label_update(struct static_key *key); 80 81 /* 82 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. 83 * The use of 'atomic_read()' requires atomic.h and its problematic for some 84 * kernel headers such as kernel.h and others. Since static_key_count() is not 85 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok 86 * to have it be a function here. Similarly, for 'static_key_enable()' and 87 * 'static_key_disable()', which require bug.h. This should allow jump_label.h 88 * to be included from most/all places for CONFIG_JUMP_LABEL. 89 */ 90 int static_key_count(struct static_key *key) 91 { 92 /* 93 * -1 means the first static_key_slow_inc() is in progress. 94 * static_key_enabled() must return true, so return 1 here. 95 */ 96 int n = atomic_read(&key->enabled); 97 98 return n >= 0 ? n : 1; 99 } 100 EXPORT_SYMBOL_GPL(static_key_count); 101 102 void static_key_slow_inc_cpuslocked(struct static_key *key) 103 { 104 int v, v1; 105 106 STATIC_KEY_CHECK_USE(key); 107 lockdep_assert_cpus_held(); 108 109 /* 110 * Careful if we get concurrent static_key_slow_inc() calls; 111 * later calls must wait for the first one to _finish_ the 112 * jump_label_update() process. At the same time, however, 113 * the jump_label_update() call below wants to see 114 * static_key_enabled(&key) for jumps to be updated properly. 115 * 116 * So give a special meaning to negative key->enabled: it sends 117 * static_key_slow_inc() down the slow path, and it is non-zero 118 * so it counts as "enabled" in jump_label_update(). Note that 119 * atomic_inc_unless_negative() checks >= 0, so roll our own. 120 */ 121 for (v = atomic_read(&key->enabled); v > 0; v = v1) { 122 v1 = atomic_cmpxchg(&key->enabled, v, v + 1); 123 if (likely(v1 == v)) 124 return; 125 } 126 127 jump_label_lock(); 128 if (atomic_read(&key->enabled) == 0) { 129 atomic_set(&key->enabled, -1); 130 jump_label_update(key); 131 /* 132 * Ensure that if the above cmpxchg loop observes our positive 133 * value, it must also observe all the text changes. 134 */ 135 atomic_set_release(&key->enabled, 1); 136 } else { 137 atomic_inc(&key->enabled); 138 } 139 jump_label_unlock(); 140 } 141 142 void static_key_slow_inc(struct static_key *key) 143 { 144 cpus_read_lock(); 145 static_key_slow_inc_cpuslocked(key); 146 cpus_read_unlock(); 147 } 148 EXPORT_SYMBOL_GPL(static_key_slow_inc); 149 150 void static_key_enable_cpuslocked(struct static_key *key) 151 { 152 STATIC_KEY_CHECK_USE(key); 153 lockdep_assert_cpus_held(); 154 155 if (atomic_read(&key->enabled) > 0) { 156 WARN_ON_ONCE(atomic_read(&key->enabled) != 1); 157 return; 158 } 159 160 jump_label_lock(); 161 if (atomic_read(&key->enabled) == 0) { 162 atomic_set(&key->enabled, -1); 163 jump_label_update(key); 164 /* 165 * See static_key_slow_inc(). 166 */ 167 atomic_set_release(&key->enabled, 1); 168 } 169 jump_label_unlock(); 170 } 171 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked); 172 173 void static_key_enable(struct static_key *key) 174 { 175 cpus_read_lock(); 176 static_key_enable_cpuslocked(key); 177 cpus_read_unlock(); 178 } 179 EXPORT_SYMBOL_GPL(static_key_enable); 180 181 void static_key_disable_cpuslocked(struct static_key *key) 182 { 183 STATIC_KEY_CHECK_USE(key); 184 lockdep_assert_cpus_held(); 185 186 if (atomic_read(&key->enabled) != 1) { 187 WARN_ON_ONCE(atomic_read(&key->enabled) != 0); 188 return; 189 } 190 191 jump_label_lock(); 192 if (atomic_cmpxchg(&key->enabled, 1, 0)) 193 jump_label_update(key); 194 jump_label_unlock(); 195 } 196 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked); 197 198 void static_key_disable(struct static_key *key) 199 { 200 cpus_read_lock(); 201 static_key_disable_cpuslocked(key); 202 cpus_read_unlock(); 203 } 204 EXPORT_SYMBOL_GPL(static_key_disable); 205 206 static bool static_key_slow_try_dec(struct static_key *key) 207 { 208 int val; 209 210 val = atomic_fetch_add_unless(&key->enabled, -1, 1); 211 if (val == 1) 212 return false; 213 214 /* 215 * The negative count check is valid even when a negative 216 * key->enabled is in use by static_key_slow_inc(); a 217 * __static_key_slow_dec() before the first static_key_slow_inc() 218 * returns is unbalanced, because all other static_key_slow_inc() 219 * instances block while the update is in progress. 220 */ 221 WARN(val < 0, "jump label: negative count!\n"); 222 return true; 223 } 224 225 static void __static_key_slow_dec_cpuslocked(struct static_key *key) 226 { 227 lockdep_assert_cpus_held(); 228 229 if (static_key_slow_try_dec(key)) 230 return; 231 232 jump_label_lock(); 233 if (atomic_dec_and_test(&key->enabled)) 234 jump_label_update(key); 235 jump_label_unlock(); 236 } 237 238 static void __static_key_slow_dec(struct static_key *key) 239 { 240 cpus_read_lock(); 241 __static_key_slow_dec_cpuslocked(key); 242 cpus_read_unlock(); 243 } 244 245 void jump_label_update_timeout(struct work_struct *work) 246 { 247 struct static_key_deferred *key = 248 container_of(work, struct static_key_deferred, work.work); 249 __static_key_slow_dec(&key->key); 250 } 251 EXPORT_SYMBOL_GPL(jump_label_update_timeout); 252 253 void static_key_slow_dec(struct static_key *key) 254 { 255 STATIC_KEY_CHECK_USE(key); 256 __static_key_slow_dec(key); 257 } 258 EXPORT_SYMBOL_GPL(static_key_slow_dec); 259 260 void static_key_slow_dec_cpuslocked(struct static_key *key) 261 { 262 STATIC_KEY_CHECK_USE(key); 263 __static_key_slow_dec_cpuslocked(key); 264 } 265 266 void __static_key_slow_dec_deferred(struct static_key *key, 267 struct delayed_work *work, 268 unsigned long timeout) 269 { 270 STATIC_KEY_CHECK_USE(key); 271 272 if (static_key_slow_try_dec(key)) 273 return; 274 275 schedule_delayed_work(work, timeout); 276 } 277 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred); 278 279 void __static_key_deferred_flush(void *key, struct delayed_work *work) 280 { 281 STATIC_KEY_CHECK_USE(key); 282 flush_delayed_work(work); 283 } 284 EXPORT_SYMBOL_GPL(__static_key_deferred_flush); 285 286 void jump_label_rate_limit(struct static_key_deferred *key, 287 unsigned long rl) 288 { 289 STATIC_KEY_CHECK_USE(key); 290 key->timeout = rl; 291 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); 292 } 293 EXPORT_SYMBOL_GPL(jump_label_rate_limit); 294 295 static int addr_conflict(struct jump_entry *entry, void *start, void *end) 296 { 297 if (jump_entry_code(entry) <= (unsigned long)end && 298 jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) 299 return 1; 300 301 return 0; 302 } 303 304 static int __jump_label_text_reserved(struct jump_entry *iter_start, 305 struct jump_entry *iter_stop, void *start, void *end) 306 { 307 struct jump_entry *iter; 308 309 iter = iter_start; 310 while (iter < iter_stop) { 311 if (addr_conflict(iter, start, end)) 312 return 1; 313 iter++; 314 } 315 316 return 0; 317 } 318 319 /* 320 * Update code which is definitely not currently executing. 321 * Architectures which need heavyweight synchronization to modify 322 * running code can override this to make the non-live update case 323 * cheaper. 324 */ 325 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, 326 enum jump_label_type type) 327 { 328 arch_jump_label_transform(entry, type); 329 } 330 331 static inline struct jump_entry *static_key_entries(struct static_key *key) 332 { 333 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); 334 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); 335 } 336 337 static inline bool static_key_type(struct static_key *key) 338 { 339 return key->type & JUMP_TYPE_TRUE; 340 } 341 342 static inline bool static_key_linked(struct static_key *key) 343 { 344 return key->type & JUMP_TYPE_LINKED; 345 } 346 347 static inline void static_key_clear_linked(struct static_key *key) 348 { 349 key->type &= ~JUMP_TYPE_LINKED; 350 } 351 352 static inline void static_key_set_linked(struct static_key *key) 353 { 354 key->type |= JUMP_TYPE_LINKED; 355 } 356 357 /*** 358 * A 'struct static_key' uses a union such that it either points directly 359 * to a table of 'struct jump_entry' or to a linked list of modules which in 360 * turn point to 'struct jump_entry' tables. 361 * 362 * The two lower bits of the pointer are used to keep track of which pointer 363 * type is in use and to store the initial branch direction, we use an access 364 * function which preserves these bits. 365 */ 366 static void static_key_set_entries(struct static_key *key, 367 struct jump_entry *entries) 368 { 369 unsigned long type; 370 371 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); 372 type = key->type & JUMP_TYPE_MASK; 373 key->entries = entries; 374 key->type |= type; 375 } 376 377 static enum jump_label_type jump_label_type(struct jump_entry *entry) 378 { 379 struct static_key *key = jump_entry_key(entry); 380 bool enabled = static_key_enabled(key); 381 bool branch = jump_entry_is_branch(entry); 382 383 /* See the comment in linux/jump_label.h */ 384 return enabled ^ branch; 385 } 386 387 static void __jump_label_update(struct static_key *key, 388 struct jump_entry *entry, 389 struct jump_entry *stop, 390 bool init) 391 { 392 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { 393 /* 394 * An entry->code of 0 indicates an entry which has been 395 * disabled because it was in an init text area. 396 */ 397 if (init || !jump_entry_is_init(entry)) { 398 if (kernel_text_address(jump_entry_code(entry))) 399 arch_jump_label_transform(entry, jump_label_type(entry)); 400 else 401 WARN_ONCE(1, "can't patch jump_label at %pS", 402 (void *)jump_entry_code(entry)); 403 } 404 } 405 } 406 407 void __init jump_label_init(void) 408 { 409 struct jump_entry *iter_start = __start___jump_table; 410 struct jump_entry *iter_stop = __stop___jump_table; 411 struct static_key *key = NULL; 412 struct jump_entry *iter; 413 414 /* 415 * Since we are initializing the static_key.enabled field with 416 * with the 'raw' int values (to avoid pulling in atomic.h) in 417 * jump_label.h, let's make sure that is safe. There are only two 418 * cases to check since we initialize to 0 or 1. 419 */ 420 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); 421 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); 422 423 if (static_key_initialized) 424 return; 425 426 cpus_read_lock(); 427 jump_label_lock(); 428 jump_label_sort_entries(iter_start, iter_stop); 429 430 for (iter = iter_start; iter < iter_stop; iter++) { 431 struct static_key *iterk; 432 433 /* rewrite NOPs */ 434 if (jump_label_type(iter) == JUMP_LABEL_NOP) 435 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); 436 437 if (init_section_contains((void *)jump_entry_code(iter), 1)) 438 jump_entry_set_init(iter); 439 440 iterk = jump_entry_key(iter); 441 if (iterk == key) 442 continue; 443 444 key = iterk; 445 static_key_set_entries(key, iter); 446 } 447 static_key_initialized = true; 448 jump_label_unlock(); 449 cpus_read_unlock(); 450 } 451 452 #ifdef CONFIG_MODULES 453 454 static enum jump_label_type jump_label_init_type(struct jump_entry *entry) 455 { 456 struct static_key *key = jump_entry_key(entry); 457 bool type = static_key_type(key); 458 bool branch = jump_entry_is_branch(entry); 459 460 /* See the comment in linux/jump_label.h */ 461 return type ^ branch; 462 } 463 464 struct static_key_mod { 465 struct static_key_mod *next; 466 struct jump_entry *entries; 467 struct module *mod; 468 }; 469 470 static inline struct static_key_mod *static_key_mod(struct static_key *key) 471 { 472 WARN_ON_ONCE(!static_key_linked(key)); 473 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); 474 } 475 476 /*** 477 * key->type and key->next are the same via union. 478 * This sets key->next and preserves the type bits. 479 * 480 * See additional comments above static_key_set_entries(). 481 */ 482 static void static_key_set_mod(struct static_key *key, 483 struct static_key_mod *mod) 484 { 485 unsigned long type; 486 487 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); 488 type = key->type & JUMP_TYPE_MASK; 489 key->next = mod; 490 key->type |= type; 491 } 492 493 static int __jump_label_mod_text_reserved(void *start, void *end) 494 { 495 struct module *mod; 496 497 preempt_disable(); 498 mod = __module_text_address((unsigned long)start); 499 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 500 preempt_enable(); 501 502 if (!mod) 503 return 0; 504 505 506 return __jump_label_text_reserved(mod->jump_entries, 507 mod->jump_entries + mod->num_jump_entries, 508 start, end); 509 } 510 511 static void __jump_label_mod_update(struct static_key *key) 512 { 513 struct static_key_mod *mod; 514 515 for (mod = static_key_mod(key); mod; mod = mod->next) { 516 struct jump_entry *stop; 517 struct module *m; 518 519 /* 520 * NULL if the static_key is defined in a module 521 * that does not use it 522 */ 523 if (!mod->entries) 524 continue; 525 526 m = mod->mod; 527 if (!m) 528 stop = __stop___jump_table; 529 else 530 stop = m->jump_entries + m->num_jump_entries; 531 __jump_label_update(key, mod->entries, stop, 532 m && m->state == MODULE_STATE_COMING); 533 } 534 } 535 536 /*** 537 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() 538 * @mod: module to patch 539 * 540 * Allow for run-time selection of the optimal nops. Before the module 541 * loads patch these with arch_get_jump_label_nop(), which is specified by 542 * the arch specific jump label code. 543 */ 544 void jump_label_apply_nops(struct module *mod) 545 { 546 struct jump_entry *iter_start = mod->jump_entries; 547 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 548 struct jump_entry *iter; 549 550 /* if the module doesn't have jump label entries, just return */ 551 if (iter_start == iter_stop) 552 return; 553 554 for (iter = iter_start; iter < iter_stop; iter++) { 555 /* Only write NOPs for arch_branch_static(). */ 556 if (jump_label_init_type(iter) == JUMP_LABEL_NOP) 557 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); 558 } 559 } 560 561 static int jump_label_add_module(struct module *mod) 562 { 563 struct jump_entry *iter_start = mod->jump_entries; 564 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 565 struct jump_entry *iter; 566 struct static_key *key = NULL; 567 struct static_key_mod *jlm, *jlm2; 568 569 /* if the module doesn't have jump label entries, just return */ 570 if (iter_start == iter_stop) 571 return 0; 572 573 jump_label_sort_entries(iter_start, iter_stop); 574 575 for (iter = iter_start; iter < iter_stop; iter++) { 576 struct static_key *iterk; 577 578 if (within_module_init(jump_entry_code(iter), mod)) 579 jump_entry_set_init(iter); 580 581 iterk = jump_entry_key(iter); 582 if (iterk == key) 583 continue; 584 585 key = iterk; 586 if (within_module((unsigned long)key, mod)) { 587 static_key_set_entries(key, iter); 588 continue; 589 } 590 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); 591 if (!jlm) 592 return -ENOMEM; 593 if (!static_key_linked(key)) { 594 jlm2 = kzalloc(sizeof(struct static_key_mod), 595 GFP_KERNEL); 596 if (!jlm2) { 597 kfree(jlm); 598 return -ENOMEM; 599 } 600 preempt_disable(); 601 jlm2->mod = __module_address((unsigned long)key); 602 preempt_enable(); 603 jlm2->entries = static_key_entries(key); 604 jlm2->next = NULL; 605 static_key_set_mod(key, jlm2); 606 static_key_set_linked(key); 607 } 608 jlm->mod = mod; 609 jlm->entries = iter; 610 jlm->next = static_key_mod(key); 611 static_key_set_mod(key, jlm); 612 static_key_set_linked(key); 613 614 /* Only update if we've changed from our initial state */ 615 if (jump_label_type(iter) != jump_label_init_type(iter)) 616 __jump_label_update(key, iter, iter_stop, true); 617 } 618 619 return 0; 620 } 621 622 static void jump_label_del_module(struct module *mod) 623 { 624 struct jump_entry *iter_start = mod->jump_entries; 625 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 626 struct jump_entry *iter; 627 struct static_key *key = NULL; 628 struct static_key_mod *jlm, **prev; 629 630 for (iter = iter_start; iter < iter_stop; iter++) { 631 if (jump_entry_key(iter) == key) 632 continue; 633 634 key = jump_entry_key(iter); 635 636 if (within_module((unsigned long)key, mod)) 637 continue; 638 639 /* No memory during module load */ 640 if (WARN_ON(!static_key_linked(key))) 641 continue; 642 643 prev = &key->next; 644 jlm = static_key_mod(key); 645 646 while (jlm && jlm->mod != mod) { 647 prev = &jlm->next; 648 jlm = jlm->next; 649 } 650 651 /* No memory during module load */ 652 if (WARN_ON(!jlm)) 653 continue; 654 655 if (prev == &key->next) 656 static_key_set_mod(key, jlm->next); 657 else 658 *prev = jlm->next; 659 660 kfree(jlm); 661 662 jlm = static_key_mod(key); 663 /* if only one etry is left, fold it back into the static_key */ 664 if (jlm->next == NULL) { 665 static_key_set_entries(key, jlm->entries); 666 static_key_clear_linked(key); 667 kfree(jlm); 668 } 669 } 670 } 671 672 static int 673 jump_label_module_notify(struct notifier_block *self, unsigned long val, 674 void *data) 675 { 676 struct module *mod = data; 677 int ret = 0; 678 679 cpus_read_lock(); 680 jump_label_lock(); 681 682 switch (val) { 683 case MODULE_STATE_COMING: 684 ret = jump_label_add_module(mod); 685 if (ret) { 686 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n"); 687 jump_label_del_module(mod); 688 } 689 break; 690 case MODULE_STATE_GOING: 691 jump_label_del_module(mod); 692 break; 693 } 694 695 jump_label_unlock(); 696 cpus_read_unlock(); 697 698 return notifier_from_errno(ret); 699 } 700 701 static struct notifier_block jump_label_module_nb = { 702 .notifier_call = jump_label_module_notify, 703 .priority = 1, /* higher than tracepoints */ 704 }; 705 706 static __init int jump_label_init_module(void) 707 { 708 return register_module_notifier(&jump_label_module_nb); 709 } 710 early_initcall(jump_label_init_module); 711 712 #endif /* CONFIG_MODULES */ 713 714 /*** 715 * jump_label_text_reserved - check if addr range is reserved 716 * @start: start text addr 717 * @end: end text addr 718 * 719 * checks if the text addr located between @start and @end 720 * overlaps with any of the jump label patch addresses. Code 721 * that wants to modify kernel text should first verify that 722 * it does not overlap with any of the jump label addresses. 723 * Caller must hold jump_label_mutex. 724 * 725 * returns 1 if there is an overlap, 0 otherwise 726 */ 727 int jump_label_text_reserved(void *start, void *end) 728 { 729 int ret = __jump_label_text_reserved(__start___jump_table, 730 __stop___jump_table, start, end); 731 732 if (ret) 733 return ret; 734 735 #ifdef CONFIG_MODULES 736 ret = __jump_label_mod_text_reserved(start, end); 737 #endif 738 return ret; 739 } 740 741 static void jump_label_update(struct static_key *key) 742 { 743 struct jump_entry *stop = __stop___jump_table; 744 struct jump_entry *entry; 745 #ifdef CONFIG_MODULES 746 struct module *mod; 747 748 if (static_key_linked(key)) { 749 __jump_label_mod_update(key); 750 return; 751 } 752 753 preempt_disable(); 754 mod = __module_address((unsigned long)key); 755 if (mod) 756 stop = mod->jump_entries + mod->num_jump_entries; 757 preempt_enable(); 758 #endif 759 entry = static_key_entries(key); 760 /* if there are no users, entry can be NULL */ 761 if (entry) 762 __jump_label_update(key, entry, stop, 763 system_state < SYSTEM_RUNNING); 764 } 765 766 #ifdef CONFIG_STATIC_KEYS_SELFTEST 767 static DEFINE_STATIC_KEY_TRUE(sk_true); 768 static DEFINE_STATIC_KEY_FALSE(sk_false); 769 770 static __init int jump_label_test(void) 771 { 772 int i; 773 774 for (i = 0; i < 2; i++) { 775 WARN_ON(static_key_enabled(&sk_true.key) != true); 776 WARN_ON(static_key_enabled(&sk_false.key) != false); 777 778 WARN_ON(!static_branch_likely(&sk_true)); 779 WARN_ON(!static_branch_unlikely(&sk_true)); 780 WARN_ON(static_branch_likely(&sk_false)); 781 WARN_ON(static_branch_unlikely(&sk_false)); 782 783 static_branch_disable(&sk_true); 784 static_branch_enable(&sk_false); 785 786 WARN_ON(static_key_enabled(&sk_true.key) == true); 787 WARN_ON(static_key_enabled(&sk_false.key) == false); 788 789 WARN_ON(static_branch_likely(&sk_true)); 790 WARN_ON(static_branch_unlikely(&sk_true)); 791 WARN_ON(!static_branch_likely(&sk_false)); 792 WARN_ON(!static_branch_unlikely(&sk_false)); 793 794 static_branch_enable(&sk_true); 795 static_branch_disable(&sk_false); 796 } 797 798 return 0; 799 } 800 early_initcall(jump_label_test); 801 #endif /* STATIC_KEYS_SELFTEST */ 802