1 /* 2 * jump label support 3 * 4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> 5 * Copyright (C) 2011 Peter Zijlstra 6 * 7 */ 8 #include <linux/memory.h> 9 #include <linux/uaccess.h> 10 #include <linux/module.h> 11 #include <linux/list.h> 12 #include <linux/slab.h> 13 #include <linux/sort.h> 14 #include <linux/err.h> 15 #include <linux/static_key.h> 16 #include <linux/jump_label_ratelimit.h> 17 #include <linux/bug.h> 18 #include <linux/cpu.h> 19 #include <asm/sections.h> 20 21 #ifdef HAVE_JUMP_LABEL 22 23 /* mutex to protect coming/going of the the jump_label table */ 24 static DEFINE_MUTEX(jump_label_mutex); 25 26 void jump_label_lock(void) 27 { 28 mutex_lock(&jump_label_mutex); 29 } 30 31 void jump_label_unlock(void) 32 { 33 mutex_unlock(&jump_label_mutex); 34 } 35 36 static int jump_label_cmp(const void *a, const void *b) 37 { 38 const struct jump_entry *jea = a; 39 const struct jump_entry *jeb = b; 40 41 if (jump_entry_key(jea) < jump_entry_key(jeb)) 42 return -1; 43 44 if (jump_entry_key(jea) > jump_entry_key(jeb)) 45 return 1; 46 47 return 0; 48 } 49 50 static void jump_label_swap(void *a, void *b, int size) 51 { 52 long delta = (unsigned long)a - (unsigned long)b; 53 struct jump_entry *jea = a; 54 struct jump_entry *jeb = b; 55 struct jump_entry tmp = *jea; 56 57 jea->code = jeb->code - delta; 58 jea->target = jeb->target - delta; 59 jea->key = jeb->key - delta; 60 61 jeb->code = tmp.code + delta; 62 jeb->target = tmp.target + delta; 63 jeb->key = tmp.key + delta; 64 } 65 66 static void 67 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) 68 { 69 unsigned long size; 70 void *swapfn = NULL; 71 72 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) 73 swapfn = jump_label_swap; 74 75 size = (((unsigned long)stop - (unsigned long)start) 76 / sizeof(struct jump_entry)); 77 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); 78 } 79 80 static void jump_label_update(struct static_key *key); 81 82 /* 83 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h. 84 * The use of 'atomic_read()' requires atomic.h and its problematic for some 85 * kernel headers such as kernel.h and others. Since static_key_count() is not 86 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok 87 * to have it be a function here. Similarly, for 'static_key_enable()' and 88 * 'static_key_disable()', which require bug.h. This should allow jump_label.h 89 * to be included from most/all places for HAVE_JUMP_LABEL. 90 */ 91 int static_key_count(struct static_key *key) 92 { 93 /* 94 * -1 means the first static_key_slow_inc() is in progress. 95 * static_key_enabled() must return true, so return 1 here. 96 */ 97 int n = atomic_read(&key->enabled); 98 99 return n >= 0 ? n : 1; 100 } 101 EXPORT_SYMBOL_GPL(static_key_count); 102 103 void static_key_slow_inc_cpuslocked(struct static_key *key) 104 { 105 int v, v1; 106 107 STATIC_KEY_CHECK_USE(key); 108 lockdep_assert_cpus_held(); 109 110 /* 111 * Careful if we get concurrent static_key_slow_inc() calls; 112 * later calls must wait for the first one to _finish_ the 113 * jump_label_update() process. At the same time, however, 114 * the jump_label_update() call below wants to see 115 * static_key_enabled(&key) for jumps to be updated properly. 116 * 117 * So give a special meaning to negative key->enabled: it sends 118 * static_key_slow_inc() down the slow path, and it is non-zero 119 * so it counts as "enabled" in jump_label_update(). Note that 120 * atomic_inc_unless_negative() checks >= 0, so roll our own. 121 */ 122 for (v = atomic_read(&key->enabled); v > 0; v = v1) { 123 v1 = atomic_cmpxchg(&key->enabled, v, v + 1); 124 if (likely(v1 == v)) 125 return; 126 } 127 128 jump_label_lock(); 129 if (atomic_read(&key->enabled) == 0) { 130 atomic_set(&key->enabled, -1); 131 jump_label_update(key); 132 /* 133 * Ensure that if the above cmpxchg loop observes our positive 134 * value, it must also observe all the text changes. 135 */ 136 atomic_set_release(&key->enabled, 1); 137 } else { 138 atomic_inc(&key->enabled); 139 } 140 jump_label_unlock(); 141 } 142 143 void static_key_slow_inc(struct static_key *key) 144 { 145 cpus_read_lock(); 146 static_key_slow_inc_cpuslocked(key); 147 cpus_read_unlock(); 148 } 149 EXPORT_SYMBOL_GPL(static_key_slow_inc); 150 151 void static_key_enable_cpuslocked(struct static_key *key) 152 { 153 STATIC_KEY_CHECK_USE(key); 154 lockdep_assert_cpus_held(); 155 156 if (atomic_read(&key->enabled) > 0) { 157 WARN_ON_ONCE(atomic_read(&key->enabled) != 1); 158 return; 159 } 160 161 jump_label_lock(); 162 if (atomic_read(&key->enabled) == 0) { 163 atomic_set(&key->enabled, -1); 164 jump_label_update(key); 165 /* 166 * See static_key_slow_inc(). 167 */ 168 atomic_set_release(&key->enabled, 1); 169 } 170 jump_label_unlock(); 171 } 172 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked); 173 174 void static_key_enable(struct static_key *key) 175 { 176 cpus_read_lock(); 177 static_key_enable_cpuslocked(key); 178 cpus_read_unlock(); 179 } 180 EXPORT_SYMBOL_GPL(static_key_enable); 181 182 void static_key_disable_cpuslocked(struct static_key *key) 183 { 184 STATIC_KEY_CHECK_USE(key); 185 lockdep_assert_cpus_held(); 186 187 if (atomic_read(&key->enabled) != 1) { 188 WARN_ON_ONCE(atomic_read(&key->enabled) != 0); 189 return; 190 } 191 192 jump_label_lock(); 193 if (atomic_cmpxchg(&key->enabled, 1, 0)) 194 jump_label_update(key); 195 jump_label_unlock(); 196 } 197 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked); 198 199 void static_key_disable(struct static_key *key) 200 { 201 cpus_read_lock(); 202 static_key_disable_cpuslocked(key); 203 cpus_read_unlock(); 204 } 205 EXPORT_SYMBOL_GPL(static_key_disable); 206 207 static void __static_key_slow_dec_cpuslocked(struct static_key *key, 208 unsigned long rate_limit, 209 struct delayed_work *work) 210 { 211 lockdep_assert_cpus_held(); 212 213 /* 214 * The negative count check is valid even when a negative 215 * key->enabled is in use by static_key_slow_inc(); a 216 * __static_key_slow_dec() before the first static_key_slow_inc() 217 * returns is unbalanced, because all other static_key_slow_inc() 218 * instances block while the update is in progress. 219 */ 220 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { 221 WARN(atomic_read(&key->enabled) < 0, 222 "jump label: negative count!\n"); 223 return; 224 } 225 226 if (rate_limit) { 227 atomic_inc(&key->enabled); 228 schedule_delayed_work(work, rate_limit); 229 } else { 230 jump_label_update(key); 231 } 232 jump_label_unlock(); 233 } 234 235 static void __static_key_slow_dec(struct static_key *key, 236 unsigned long rate_limit, 237 struct delayed_work *work) 238 { 239 cpus_read_lock(); 240 __static_key_slow_dec_cpuslocked(key, rate_limit, work); 241 cpus_read_unlock(); 242 } 243 244 static void jump_label_update_timeout(struct work_struct *work) 245 { 246 struct static_key_deferred *key = 247 container_of(work, struct static_key_deferred, work.work); 248 __static_key_slow_dec(&key->key, 0, NULL); 249 } 250 251 void static_key_slow_dec(struct static_key *key) 252 { 253 STATIC_KEY_CHECK_USE(key); 254 __static_key_slow_dec(key, 0, NULL); 255 } 256 EXPORT_SYMBOL_GPL(static_key_slow_dec); 257 258 void static_key_slow_dec_cpuslocked(struct static_key *key) 259 { 260 STATIC_KEY_CHECK_USE(key); 261 __static_key_slow_dec_cpuslocked(key, 0, NULL); 262 } 263 264 void static_key_slow_dec_deferred(struct static_key_deferred *key) 265 { 266 STATIC_KEY_CHECK_USE(key); 267 __static_key_slow_dec(&key->key, key->timeout, &key->work); 268 } 269 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); 270 271 void static_key_deferred_flush(struct static_key_deferred *key) 272 { 273 STATIC_KEY_CHECK_USE(key); 274 flush_delayed_work(&key->work); 275 } 276 EXPORT_SYMBOL_GPL(static_key_deferred_flush); 277 278 void jump_label_rate_limit(struct static_key_deferred *key, 279 unsigned long rl) 280 { 281 STATIC_KEY_CHECK_USE(key); 282 key->timeout = rl; 283 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); 284 } 285 EXPORT_SYMBOL_GPL(jump_label_rate_limit); 286 287 static int addr_conflict(struct jump_entry *entry, void *start, void *end) 288 { 289 if (jump_entry_code(entry) <= (unsigned long)end && 290 jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) 291 return 1; 292 293 return 0; 294 } 295 296 static int __jump_label_text_reserved(struct jump_entry *iter_start, 297 struct jump_entry *iter_stop, void *start, void *end) 298 { 299 struct jump_entry *iter; 300 301 iter = iter_start; 302 while (iter < iter_stop) { 303 if (addr_conflict(iter, start, end)) 304 return 1; 305 iter++; 306 } 307 308 return 0; 309 } 310 311 /* 312 * Update code which is definitely not currently executing. 313 * Architectures which need heavyweight synchronization to modify 314 * running code can override this to make the non-live update case 315 * cheaper. 316 */ 317 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, 318 enum jump_label_type type) 319 { 320 arch_jump_label_transform(entry, type); 321 } 322 323 static inline struct jump_entry *static_key_entries(struct static_key *key) 324 { 325 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); 326 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); 327 } 328 329 static inline bool static_key_type(struct static_key *key) 330 { 331 return key->type & JUMP_TYPE_TRUE; 332 } 333 334 static inline bool static_key_linked(struct static_key *key) 335 { 336 return key->type & JUMP_TYPE_LINKED; 337 } 338 339 static inline void static_key_clear_linked(struct static_key *key) 340 { 341 key->type &= ~JUMP_TYPE_LINKED; 342 } 343 344 static inline void static_key_set_linked(struct static_key *key) 345 { 346 key->type |= JUMP_TYPE_LINKED; 347 } 348 349 /*** 350 * A 'struct static_key' uses a union such that it either points directly 351 * to a table of 'struct jump_entry' or to a linked list of modules which in 352 * turn point to 'struct jump_entry' tables. 353 * 354 * The two lower bits of the pointer are used to keep track of which pointer 355 * type is in use and to store the initial branch direction, we use an access 356 * function which preserves these bits. 357 */ 358 static void static_key_set_entries(struct static_key *key, 359 struct jump_entry *entries) 360 { 361 unsigned long type; 362 363 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); 364 type = key->type & JUMP_TYPE_MASK; 365 key->entries = entries; 366 key->type |= type; 367 } 368 369 static enum jump_label_type jump_label_type(struct jump_entry *entry) 370 { 371 struct static_key *key = jump_entry_key(entry); 372 bool enabled = static_key_enabled(key); 373 bool branch = jump_entry_is_branch(entry); 374 375 /* See the comment in linux/jump_label.h */ 376 return enabled ^ branch; 377 } 378 379 static void __jump_label_update(struct static_key *key, 380 struct jump_entry *entry, 381 struct jump_entry *stop, 382 bool init) 383 { 384 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { 385 /* 386 * An entry->code of 0 indicates an entry which has been 387 * disabled because it was in an init text area. 388 */ 389 if (init || !jump_entry_is_init(entry)) { 390 if (kernel_text_address(jump_entry_code(entry))) 391 arch_jump_label_transform(entry, jump_label_type(entry)); 392 else 393 WARN_ONCE(1, "can't patch jump_label at %pS", 394 (void *)jump_entry_code(entry)); 395 } 396 } 397 } 398 399 void __init jump_label_init(void) 400 { 401 struct jump_entry *iter_start = __start___jump_table; 402 struct jump_entry *iter_stop = __stop___jump_table; 403 struct static_key *key = NULL; 404 struct jump_entry *iter; 405 406 /* 407 * Since we are initializing the static_key.enabled field with 408 * with the 'raw' int values (to avoid pulling in atomic.h) in 409 * jump_label.h, let's make sure that is safe. There are only two 410 * cases to check since we initialize to 0 or 1. 411 */ 412 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); 413 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); 414 415 if (static_key_initialized) 416 return; 417 418 cpus_read_lock(); 419 jump_label_lock(); 420 jump_label_sort_entries(iter_start, iter_stop); 421 422 for (iter = iter_start; iter < iter_stop; iter++) { 423 struct static_key *iterk; 424 425 /* rewrite NOPs */ 426 if (jump_label_type(iter) == JUMP_LABEL_NOP) 427 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); 428 429 if (init_section_contains((void *)jump_entry_code(iter), 1)) 430 jump_entry_set_init(iter); 431 432 iterk = jump_entry_key(iter); 433 if (iterk == key) 434 continue; 435 436 key = iterk; 437 static_key_set_entries(key, iter); 438 } 439 static_key_initialized = true; 440 jump_label_unlock(); 441 cpus_read_unlock(); 442 } 443 444 #ifdef CONFIG_MODULES 445 446 static enum jump_label_type jump_label_init_type(struct jump_entry *entry) 447 { 448 struct static_key *key = jump_entry_key(entry); 449 bool type = static_key_type(key); 450 bool branch = jump_entry_is_branch(entry); 451 452 /* See the comment in linux/jump_label.h */ 453 return type ^ branch; 454 } 455 456 struct static_key_mod { 457 struct static_key_mod *next; 458 struct jump_entry *entries; 459 struct module *mod; 460 }; 461 462 static inline struct static_key_mod *static_key_mod(struct static_key *key) 463 { 464 WARN_ON_ONCE(!static_key_linked(key)); 465 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); 466 } 467 468 /*** 469 * key->type and key->next are the same via union. 470 * This sets key->next and preserves the type bits. 471 * 472 * See additional comments above static_key_set_entries(). 473 */ 474 static void static_key_set_mod(struct static_key *key, 475 struct static_key_mod *mod) 476 { 477 unsigned long type; 478 479 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); 480 type = key->type & JUMP_TYPE_MASK; 481 key->next = mod; 482 key->type |= type; 483 } 484 485 static int __jump_label_mod_text_reserved(void *start, void *end) 486 { 487 struct module *mod; 488 489 preempt_disable(); 490 mod = __module_text_address((unsigned long)start); 491 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 492 preempt_enable(); 493 494 if (!mod) 495 return 0; 496 497 498 return __jump_label_text_reserved(mod->jump_entries, 499 mod->jump_entries + mod->num_jump_entries, 500 start, end); 501 } 502 503 static void __jump_label_mod_update(struct static_key *key) 504 { 505 struct static_key_mod *mod; 506 507 for (mod = static_key_mod(key); mod; mod = mod->next) { 508 struct jump_entry *stop; 509 struct module *m; 510 511 /* 512 * NULL if the static_key is defined in a module 513 * that does not use it 514 */ 515 if (!mod->entries) 516 continue; 517 518 m = mod->mod; 519 if (!m) 520 stop = __stop___jump_table; 521 else 522 stop = m->jump_entries + m->num_jump_entries; 523 __jump_label_update(key, mod->entries, stop, 524 m && m->state == MODULE_STATE_COMING); 525 } 526 } 527 528 /*** 529 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() 530 * @mod: module to patch 531 * 532 * Allow for run-time selection of the optimal nops. Before the module 533 * loads patch these with arch_get_jump_label_nop(), which is specified by 534 * the arch specific jump label code. 535 */ 536 void jump_label_apply_nops(struct module *mod) 537 { 538 struct jump_entry *iter_start = mod->jump_entries; 539 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 540 struct jump_entry *iter; 541 542 /* if the module doesn't have jump label entries, just return */ 543 if (iter_start == iter_stop) 544 return; 545 546 for (iter = iter_start; iter < iter_stop; iter++) { 547 /* Only write NOPs for arch_branch_static(). */ 548 if (jump_label_init_type(iter) == JUMP_LABEL_NOP) 549 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); 550 } 551 } 552 553 static int jump_label_add_module(struct module *mod) 554 { 555 struct jump_entry *iter_start = mod->jump_entries; 556 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 557 struct jump_entry *iter; 558 struct static_key *key = NULL; 559 struct static_key_mod *jlm, *jlm2; 560 561 /* if the module doesn't have jump label entries, just return */ 562 if (iter_start == iter_stop) 563 return 0; 564 565 jump_label_sort_entries(iter_start, iter_stop); 566 567 for (iter = iter_start; iter < iter_stop; iter++) { 568 struct static_key *iterk; 569 570 if (within_module_init(jump_entry_code(iter), mod)) 571 jump_entry_set_init(iter); 572 573 iterk = jump_entry_key(iter); 574 if (iterk == key) 575 continue; 576 577 key = iterk; 578 if (within_module((unsigned long)key, mod)) { 579 static_key_set_entries(key, iter); 580 continue; 581 } 582 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); 583 if (!jlm) 584 return -ENOMEM; 585 if (!static_key_linked(key)) { 586 jlm2 = kzalloc(sizeof(struct static_key_mod), 587 GFP_KERNEL); 588 if (!jlm2) { 589 kfree(jlm); 590 return -ENOMEM; 591 } 592 preempt_disable(); 593 jlm2->mod = __module_address((unsigned long)key); 594 preempt_enable(); 595 jlm2->entries = static_key_entries(key); 596 jlm2->next = NULL; 597 static_key_set_mod(key, jlm2); 598 static_key_set_linked(key); 599 } 600 jlm->mod = mod; 601 jlm->entries = iter; 602 jlm->next = static_key_mod(key); 603 static_key_set_mod(key, jlm); 604 static_key_set_linked(key); 605 606 /* Only update if we've changed from our initial state */ 607 if (jump_label_type(iter) != jump_label_init_type(iter)) 608 __jump_label_update(key, iter, iter_stop, true); 609 } 610 611 return 0; 612 } 613 614 static void jump_label_del_module(struct module *mod) 615 { 616 struct jump_entry *iter_start = mod->jump_entries; 617 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 618 struct jump_entry *iter; 619 struct static_key *key = NULL; 620 struct static_key_mod *jlm, **prev; 621 622 for (iter = iter_start; iter < iter_stop; iter++) { 623 if (jump_entry_key(iter) == key) 624 continue; 625 626 key = jump_entry_key(iter); 627 628 if (within_module((unsigned long)key, mod)) 629 continue; 630 631 /* No memory during module load */ 632 if (WARN_ON(!static_key_linked(key))) 633 continue; 634 635 prev = &key->next; 636 jlm = static_key_mod(key); 637 638 while (jlm && jlm->mod != mod) { 639 prev = &jlm->next; 640 jlm = jlm->next; 641 } 642 643 /* No memory during module load */ 644 if (WARN_ON(!jlm)) 645 continue; 646 647 if (prev == &key->next) 648 static_key_set_mod(key, jlm->next); 649 else 650 *prev = jlm->next; 651 652 kfree(jlm); 653 654 jlm = static_key_mod(key); 655 /* if only one etry is left, fold it back into the static_key */ 656 if (jlm->next == NULL) { 657 static_key_set_entries(key, jlm->entries); 658 static_key_clear_linked(key); 659 kfree(jlm); 660 } 661 } 662 } 663 664 static int 665 jump_label_module_notify(struct notifier_block *self, unsigned long val, 666 void *data) 667 { 668 struct module *mod = data; 669 int ret = 0; 670 671 cpus_read_lock(); 672 jump_label_lock(); 673 674 switch (val) { 675 case MODULE_STATE_COMING: 676 ret = jump_label_add_module(mod); 677 if (ret) { 678 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n"); 679 jump_label_del_module(mod); 680 } 681 break; 682 case MODULE_STATE_GOING: 683 jump_label_del_module(mod); 684 break; 685 } 686 687 jump_label_unlock(); 688 cpus_read_unlock(); 689 690 return notifier_from_errno(ret); 691 } 692 693 static struct notifier_block jump_label_module_nb = { 694 .notifier_call = jump_label_module_notify, 695 .priority = 1, /* higher than tracepoints */ 696 }; 697 698 static __init int jump_label_init_module(void) 699 { 700 return register_module_notifier(&jump_label_module_nb); 701 } 702 early_initcall(jump_label_init_module); 703 704 #endif /* CONFIG_MODULES */ 705 706 /*** 707 * jump_label_text_reserved - check if addr range is reserved 708 * @start: start text addr 709 * @end: end text addr 710 * 711 * checks if the text addr located between @start and @end 712 * overlaps with any of the jump label patch addresses. Code 713 * that wants to modify kernel text should first verify that 714 * it does not overlap with any of the jump label addresses. 715 * Caller must hold jump_label_mutex. 716 * 717 * returns 1 if there is an overlap, 0 otherwise 718 */ 719 int jump_label_text_reserved(void *start, void *end) 720 { 721 int ret = __jump_label_text_reserved(__start___jump_table, 722 __stop___jump_table, start, end); 723 724 if (ret) 725 return ret; 726 727 #ifdef CONFIG_MODULES 728 ret = __jump_label_mod_text_reserved(start, end); 729 #endif 730 return ret; 731 } 732 733 static void jump_label_update(struct static_key *key) 734 { 735 struct jump_entry *stop = __stop___jump_table; 736 struct jump_entry *entry; 737 #ifdef CONFIG_MODULES 738 struct module *mod; 739 740 if (static_key_linked(key)) { 741 __jump_label_mod_update(key); 742 return; 743 } 744 745 preempt_disable(); 746 mod = __module_address((unsigned long)key); 747 if (mod) 748 stop = mod->jump_entries + mod->num_jump_entries; 749 preempt_enable(); 750 #endif 751 entry = static_key_entries(key); 752 /* if there are no users, entry can be NULL */ 753 if (entry) 754 __jump_label_update(key, entry, stop, 755 system_state < SYSTEM_RUNNING); 756 } 757 758 #ifdef CONFIG_STATIC_KEYS_SELFTEST 759 static DEFINE_STATIC_KEY_TRUE(sk_true); 760 static DEFINE_STATIC_KEY_FALSE(sk_false); 761 762 static __init int jump_label_test(void) 763 { 764 int i; 765 766 for (i = 0; i < 2; i++) { 767 WARN_ON(static_key_enabled(&sk_true.key) != true); 768 WARN_ON(static_key_enabled(&sk_false.key) != false); 769 770 WARN_ON(!static_branch_likely(&sk_true)); 771 WARN_ON(!static_branch_unlikely(&sk_true)); 772 WARN_ON(static_branch_likely(&sk_false)); 773 WARN_ON(static_branch_unlikely(&sk_false)); 774 775 static_branch_disable(&sk_true); 776 static_branch_enable(&sk_false); 777 778 WARN_ON(static_key_enabled(&sk_true.key) == true); 779 WARN_ON(static_key_enabled(&sk_false.key) == false); 780 781 WARN_ON(static_branch_likely(&sk_true)); 782 WARN_ON(static_branch_unlikely(&sk_true)); 783 WARN_ON(!static_branch_likely(&sk_false)); 784 WARN_ON(!static_branch_unlikely(&sk_false)); 785 786 static_branch_enable(&sk_true); 787 static_branch_disable(&sk_false); 788 } 789 790 return 0; 791 } 792 early_initcall(jump_label_test); 793 #endif /* STATIC_KEYS_SELFTEST */ 794 795 #endif /* HAVE_JUMP_LABEL */ 796