xref: /openbmc/linux/kernel/jump_label.c (revision 711aab1d)
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18 #include <linux/cpu.h>
19 
20 #ifdef HAVE_JUMP_LABEL
21 
22 /* mutex to protect coming/going of the the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24 
25 void jump_label_lock(void)
26 {
27 	mutex_lock(&jump_label_mutex);
28 }
29 
30 void jump_label_unlock(void)
31 {
32 	mutex_unlock(&jump_label_mutex);
33 }
34 
35 static int jump_label_cmp(const void *a, const void *b)
36 {
37 	const struct jump_entry *jea = a;
38 	const struct jump_entry *jeb = b;
39 
40 	if (jea->key < jeb->key)
41 		return -1;
42 
43 	if (jea->key > jeb->key)
44 		return 1;
45 
46 	return 0;
47 }
48 
49 static void
50 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
51 {
52 	unsigned long size;
53 
54 	size = (((unsigned long)stop - (unsigned long)start)
55 					/ sizeof(struct jump_entry));
56 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57 }
58 
59 static void jump_label_update(struct static_key *key);
60 
61 /*
62  * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
63  * The use of 'atomic_read()' requires atomic.h and its problematic for some
64  * kernel headers such as kernel.h and others. Since static_key_count() is not
65  * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
66  * to have it be a function here. Similarly, for 'static_key_enable()' and
67  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
68  * to be included from most/all places for HAVE_JUMP_LABEL.
69  */
70 int static_key_count(struct static_key *key)
71 {
72 	/*
73 	 * -1 means the first static_key_slow_inc() is in progress.
74 	 *  static_key_enabled() must return true, so return 1 here.
75 	 */
76 	int n = atomic_read(&key->enabled);
77 
78 	return n >= 0 ? n : 1;
79 }
80 EXPORT_SYMBOL_GPL(static_key_count);
81 
82 static void static_key_slow_inc_cpuslocked(struct static_key *key)
83 {
84 	int v, v1;
85 
86 	STATIC_KEY_CHECK_USE();
87 
88 	/*
89 	 * Careful if we get concurrent static_key_slow_inc() calls;
90 	 * later calls must wait for the first one to _finish_ the
91 	 * jump_label_update() process.  At the same time, however,
92 	 * the jump_label_update() call below wants to see
93 	 * static_key_enabled(&key) for jumps to be updated properly.
94 	 *
95 	 * So give a special meaning to negative key->enabled: it sends
96 	 * static_key_slow_inc() down the slow path, and it is non-zero
97 	 * so it counts as "enabled" in jump_label_update().  Note that
98 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
99 	 */
100 	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
101 		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
102 		if (likely(v1 == v))
103 			return;
104 	}
105 
106 	jump_label_lock();
107 	if (atomic_read(&key->enabled) == 0) {
108 		atomic_set(&key->enabled, -1);
109 		jump_label_update(key);
110 		/*
111 		 * Ensure that if the above cmpxchg loop observes our positive
112 		 * value, it must also observe all the text changes.
113 		 */
114 		atomic_set_release(&key->enabled, 1);
115 	} else {
116 		atomic_inc(&key->enabled);
117 	}
118 	jump_label_unlock();
119 }
120 
121 void static_key_slow_inc(struct static_key *key)
122 {
123 	cpus_read_lock();
124 	static_key_slow_inc_cpuslocked(key);
125 	cpus_read_unlock();
126 }
127 EXPORT_SYMBOL_GPL(static_key_slow_inc);
128 
129 void static_key_enable_cpuslocked(struct static_key *key)
130 {
131 	STATIC_KEY_CHECK_USE();
132 
133 	if (atomic_read(&key->enabled) > 0) {
134 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
135 		return;
136 	}
137 
138 	jump_label_lock();
139 	if (atomic_read(&key->enabled) == 0) {
140 		atomic_set(&key->enabled, -1);
141 		jump_label_update(key);
142 		/*
143 		 * See static_key_slow_inc().
144 		 */
145 		atomic_set_release(&key->enabled, 1);
146 	}
147 	jump_label_unlock();
148 }
149 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
150 
151 void static_key_enable(struct static_key *key)
152 {
153 	cpus_read_lock();
154 	static_key_enable_cpuslocked(key);
155 	cpus_read_unlock();
156 }
157 EXPORT_SYMBOL_GPL(static_key_enable);
158 
159 void static_key_disable_cpuslocked(struct static_key *key)
160 {
161 	STATIC_KEY_CHECK_USE();
162 
163 	if (atomic_read(&key->enabled) != 1) {
164 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
165 		return;
166 	}
167 
168 	jump_label_lock();
169 	if (atomic_cmpxchg(&key->enabled, 1, 0))
170 		jump_label_update(key);
171 	jump_label_unlock();
172 }
173 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
174 
175 void static_key_disable(struct static_key *key)
176 {
177 	cpus_read_lock();
178 	static_key_disable_cpuslocked(key);
179 	cpus_read_unlock();
180 }
181 EXPORT_SYMBOL_GPL(static_key_disable);
182 
183 static void static_key_slow_dec_cpuslocked(struct static_key *key,
184 					   unsigned long rate_limit,
185 					   struct delayed_work *work)
186 {
187 	/*
188 	 * The negative count check is valid even when a negative
189 	 * key->enabled is in use by static_key_slow_inc(); a
190 	 * __static_key_slow_dec() before the first static_key_slow_inc()
191 	 * returns is unbalanced, because all other static_key_slow_inc()
192 	 * instances block while the update is in progress.
193 	 */
194 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
195 		WARN(atomic_read(&key->enabled) < 0,
196 		     "jump label: negative count!\n");
197 		return;
198 	}
199 
200 	if (rate_limit) {
201 		atomic_inc(&key->enabled);
202 		schedule_delayed_work(work, rate_limit);
203 	} else {
204 		jump_label_update(key);
205 	}
206 	jump_label_unlock();
207 }
208 
209 static void __static_key_slow_dec(struct static_key *key,
210 				  unsigned long rate_limit,
211 				  struct delayed_work *work)
212 {
213 	cpus_read_lock();
214 	static_key_slow_dec_cpuslocked(key, rate_limit, work);
215 	cpus_read_unlock();
216 }
217 
218 static void jump_label_update_timeout(struct work_struct *work)
219 {
220 	struct static_key_deferred *key =
221 		container_of(work, struct static_key_deferred, work.work);
222 	__static_key_slow_dec(&key->key, 0, NULL);
223 }
224 
225 void static_key_slow_dec(struct static_key *key)
226 {
227 	STATIC_KEY_CHECK_USE();
228 	__static_key_slow_dec(key, 0, NULL);
229 }
230 EXPORT_SYMBOL_GPL(static_key_slow_dec);
231 
232 void static_key_slow_dec_deferred(struct static_key_deferred *key)
233 {
234 	STATIC_KEY_CHECK_USE();
235 	__static_key_slow_dec(&key->key, key->timeout, &key->work);
236 }
237 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
238 
239 void static_key_deferred_flush(struct static_key_deferred *key)
240 {
241 	STATIC_KEY_CHECK_USE();
242 	flush_delayed_work(&key->work);
243 }
244 EXPORT_SYMBOL_GPL(static_key_deferred_flush);
245 
246 void jump_label_rate_limit(struct static_key_deferred *key,
247 		unsigned long rl)
248 {
249 	STATIC_KEY_CHECK_USE();
250 	key->timeout = rl;
251 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
252 }
253 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
254 
255 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
256 {
257 	if (entry->code <= (unsigned long)end &&
258 		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
259 		return 1;
260 
261 	return 0;
262 }
263 
264 static int __jump_label_text_reserved(struct jump_entry *iter_start,
265 		struct jump_entry *iter_stop, void *start, void *end)
266 {
267 	struct jump_entry *iter;
268 
269 	iter = iter_start;
270 	while (iter < iter_stop) {
271 		if (addr_conflict(iter, start, end))
272 			return 1;
273 		iter++;
274 	}
275 
276 	return 0;
277 }
278 
279 /*
280  * Update code which is definitely not currently executing.
281  * Architectures which need heavyweight synchronization to modify
282  * running code can override this to make the non-live update case
283  * cheaper.
284  */
285 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
286 					    enum jump_label_type type)
287 {
288 	arch_jump_label_transform(entry, type);
289 }
290 
291 static inline struct jump_entry *static_key_entries(struct static_key *key)
292 {
293 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
294 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
295 }
296 
297 static inline bool static_key_type(struct static_key *key)
298 {
299 	return key->type & JUMP_TYPE_TRUE;
300 }
301 
302 static inline bool static_key_linked(struct static_key *key)
303 {
304 	return key->type & JUMP_TYPE_LINKED;
305 }
306 
307 static inline void static_key_clear_linked(struct static_key *key)
308 {
309 	key->type &= ~JUMP_TYPE_LINKED;
310 }
311 
312 static inline void static_key_set_linked(struct static_key *key)
313 {
314 	key->type |= JUMP_TYPE_LINKED;
315 }
316 
317 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
318 {
319 	return (struct static_key *)((unsigned long)entry->key & ~1UL);
320 }
321 
322 static bool jump_entry_branch(struct jump_entry *entry)
323 {
324 	return (unsigned long)entry->key & 1UL;
325 }
326 
327 /***
328  * A 'struct static_key' uses a union such that it either points directly
329  * to a table of 'struct jump_entry' or to a linked list of modules which in
330  * turn point to 'struct jump_entry' tables.
331  *
332  * The two lower bits of the pointer are used to keep track of which pointer
333  * type is in use and to store the initial branch direction, we use an access
334  * function which preserves these bits.
335  */
336 static void static_key_set_entries(struct static_key *key,
337 				   struct jump_entry *entries)
338 {
339 	unsigned long type;
340 
341 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
342 	type = key->type & JUMP_TYPE_MASK;
343 	key->entries = entries;
344 	key->type |= type;
345 }
346 
347 static enum jump_label_type jump_label_type(struct jump_entry *entry)
348 {
349 	struct static_key *key = jump_entry_key(entry);
350 	bool enabled = static_key_enabled(key);
351 	bool branch = jump_entry_branch(entry);
352 
353 	/* See the comment in linux/jump_label.h */
354 	return enabled ^ branch;
355 }
356 
357 static void __jump_label_update(struct static_key *key,
358 				struct jump_entry *entry,
359 				struct jump_entry *stop)
360 {
361 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
362 		/*
363 		 * entry->code set to 0 invalidates module init text sections
364 		 * kernel_text_address() verifies we are not in core kernel
365 		 * init code, see jump_label_invalidate_module_init().
366 		 */
367 		if (entry->code && kernel_text_address(entry->code))
368 			arch_jump_label_transform(entry, jump_label_type(entry));
369 	}
370 }
371 
372 void __init jump_label_init(void)
373 {
374 	struct jump_entry *iter_start = __start___jump_table;
375 	struct jump_entry *iter_stop = __stop___jump_table;
376 	struct static_key *key = NULL;
377 	struct jump_entry *iter;
378 
379 	/*
380 	 * Since we are initializing the static_key.enabled field with
381 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
382 	 * jump_label.h, let's make sure that is safe. There are only two
383 	 * cases to check since we initialize to 0 or 1.
384 	 */
385 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
386 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
387 
388 	if (static_key_initialized)
389 		return;
390 
391 	cpus_read_lock();
392 	jump_label_lock();
393 	jump_label_sort_entries(iter_start, iter_stop);
394 
395 	for (iter = iter_start; iter < iter_stop; iter++) {
396 		struct static_key *iterk;
397 
398 		/* rewrite NOPs */
399 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
400 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
401 
402 		iterk = jump_entry_key(iter);
403 		if (iterk == key)
404 			continue;
405 
406 		key = iterk;
407 		static_key_set_entries(key, iter);
408 	}
409 	static_key_initialized = true;
410 	jump_label_unlock();
411 	cpus_read_unlock();
412 }
413 
414 #ifdef CONFIG_MODULES
415 
416 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
417 {
418 	struct static_key *key = jump_entry_key(entry);
419 	bool type = static_key_type(key);
420 	bool branch = jump_entry_branch(entry);
421 
422 	/* See the comment in linux/jump_label.h */
423 	return type ^ branch;
424 }
425 
426 struct static_key_mod {
427 	struct static_key_mod *next;
428 	struct jump_entry *entries;
429 	struct module *mod;
430 };
431 
432 static inline struct static_key_mod *static_key_mod(struct static_key *key)
433 {
434 	WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
435 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
436 }
437 
438 /***
439  * key->type and key->next are the same via union.
440  * This sets key->next and preserves the type bits.
441  *
442  * See additional comments above static_key_set_entries().
443  */
444 static void static_key_set_mod(struct static_key *key,
445 			       struct static_key_mod *mod)
446 {
447 	unsigned long type;
448 
449 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
450 	type = key->type & JUMP_TYPE_MASK;
451 	key->next = mod;
452 	key->type |= type;
453 }
454 
455 static int __jump_label_mod_text_reserved(void *start, void *end)
456 {
457 	struct module *mod;
458 
459 	preempt_disable();
460 	mod = __module_text_address((unsigned long)start);
461 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
462 	preempt_enable();
463 
464 	if (!mod)
465 		return 0;
466 
467 
468 	return __jump_label_text_reserved(mod->jump_entries,
469 				mod->jump_entries + mod->num_jump_entries,
470 				start, end);
471 }
472 
473 static void __jump_label_mod_update(struct static_key *key)
474 {
475 	struct static_key_mod *mod;
476 
477 	for (mod = static_key_mod(key); mod; mod = mod->next) {
478 		struct jump_entry *stop;
479 		struct module *m;
480 
481 		/*
482 		 * NULL if the static_key is defined in a module
483 		 * that does not use it
484 		 */
485 		if (!mod->entries)
486 			continue;
487 
488 		m = mod->mod;
489 		if (!m)
490 			stop = __stop___jump_table;
491 		else
492 			stop = m->jump_entries + m->num_jump_entries;
493 		__jump_label_update(key, mod->entries, stop);
494 	}
495 }
496 
497 /***
498  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
499  * @mod: module to patch
500  *
501  * Allow for run-time selection of the optimal nops. Before the module
502  * loads patch these with arch_get_jump_label_nop(), which is specified by
503  * the arch specific jump label code.
504  */
505 void jump_label_apply_nops(struct module *mod)
506 {
507 	struct jump_entry *iter_start = mod->jump_entries;
508 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
509 	struct jump_entry *iter;
510 
511 	/* if the module doesn't have jump label entries, just return */
512 	if (iter_start == iter_stop)
513 		return;
514 
515 	for (iter = iter_start; iter < iter_stop; iter++) {
516 		/* Only write NOPs for arch_branch_static(). */
517 		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
518 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
519 	}
520 }
521 
522 static int jump_label_add_module(struct module *mod)
523 {
524 	struct jump_entry *iter_start = mod->jump_entries;
525 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
526 	struct jump_entry *iter;
527 	struct static_key *key = NULL;
528 	struct static_key_mod *jlm, *jlm2;
529 
530 	/* if the module doesn't have jump label entries, just return */
531 	if (iter_start == iter_stop)
532 		return 0;
533 
534 	jump_label_sort_entries(iter_start, iter_stop);
535 
536 	for (iter = iter_start; iter < iter_stop; iter++) {
537 		struct static_key *iterk;
538 
539 		iterk = jump_entry_key(iter);
540 		if (iterk == key)
541 			continue;
542 
543 		key = iterk;
544 		if (within_module(iter->key, mod)) {
545 			static_key_set_entries(key, iter);
546 			continue;
547 		}
548 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
549 		if (!jlm)
550 			return -ENOMEM;
551 		if (!static_key_linked(key)) {
552 			jlm2 = kzalloc(sizeof(struct static_key_mod),
553 				       GFP_KERNEL);
554 			if (!jlm2) {
555 				kfree(jlm);
556 				return -ENOMEM;
557 			}
558 			preempt_disable();
559 			jlm2->mod = __module_address((unsigned long)key);
560 			preempt_enable();
561 			jlm2->entries = static_key_entries(key);
562 			jlm2->next = NULL;
563 			static_key_set_mod(key, jlm2);
564 			static_key_set_linked(key);
565 		}
566 		jlm->mod = mod;
567 		jlm->entries = iter;
568 		jlm->next = static_key_mod(key);
569 		static_key_set_mod(key, jlm);
570 		static_key_set_linked(key);
571 
572 		/* Only update if we've changed from our initial state */
573 		if (jump_label_type(iter) != jump_label_init_type(iter))
574 			__jump_label_update(key, iter, iter_stop);
575 	}
576 
577 	return 0;
578 }
579 
580 static void jump_label_del_module(struct module *mod)
581 {
582 	struct jump_entry *iter_start = mod->jump_entries;
583 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
584 	struct jump_entry *iter;
585 	struct static_key *key = NULL;
586 	struct static_key_mod *jlm, **prev;
587 
588 	for (iter = iter_start; iter < iter_stop; iter++) {
589 		if (jump_entry_key(iter) == key)
590 			continue;
591 
592 		key = jump_entry_key(iter);
593 
594 		if (within_module(iter->key, mod))
595 			continue;
596 
597 		/* No memory during module load */
598 		if (WARN_ON(!static_key_linked(key)))
599 			continue;
600 
601 		prev = &key->next;
602 		jlm = static_key_mod(key);
603 
604 		while (jlm && jlm->mod != mod) {
605 			prev = &jlm->next;
606 			jlm = jlm->next;
607 		}
608 
609 		/* No memory during module load */
610 		if (WARN_ON(!jlm))
611 			continue;
612 
613 		if (prev == &key->next)
614 			static_key_set_mod(key, jlm->next);
615 		else
616 			*prev = jlm->next;
617 
618 		kfree(jlm);
619 
620 		jlm = static_key_mod(key);
621 		/* if only one etry is left, fold it back into the static_key */
622 		if (jlm->next == NULL) {
623 			static_key_set_entries(key, jlm->entries);
624 			static_key_clear_linked(key);
625 			kfree(jlm);
626 		}
627 	}
628 }
629 
630 static void jump_label_invalidate_module_init(struct module *mod)
631 {
632 	struct jump_entry *iter_start = mod->jump_entries;
633 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
634 	struct jump_entry *iter;
635 
636 	for (iter = iter_start; iter < iter_stop; iter++) {
637 		if (within_module_init(iter->code, mod))
638 			iter->code = 0;
639 	}
640 }
641 
642 static int
643 jump_label_module_notify(struct notifier_block *self, unsigned long val,
644 			 void *data)
645 {
646 	struct module *mod = data;
647 	int ret = 0;
648 
649 	cpus_read_lock();
650 	jump_label_lock();
651 
652 	switch (val) {
653 	case MODULE_STATE_COMING:
654 		ret = jump_label_add_module(mod);
655 		if (ret) {
656 			WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
657 			jump_label_del_module(mod);
658 		}
659 		break;
660 	case MODULE_STATE_GOING:
661 		jump_label_del_module(mod);
662 		break;
663 	case MODULE_STATE_LIVE:
664 		jump_label_invalidate_module_init(mod);
665 		break;
666 	}
667 
668 	jump_label_unlock();
669 	cpus_read_unlock();
670 
671 	return notifier_from_errno(ret);
672 }
673 
674 static struct notifier_block jump_label_module_nb = {
675 	.notifier_call = jump_label_module_notify,
676 	.priority = 1, /* higher than tracepoints */
677 };
678 
679 static __init int jump_label_init_module(void)
680 {
681 	return register_module_notifier(&jump_label_module_nb);
682 }
683 early_initcall(jump_label_init_module);
684 
685 #endif /* CONFIG_MODULES */
686 
687 /***
688  * jump_label_text_reserved - check if addr range is reserved
689  * @start: start text addr
690  * @end: end text addr
691  *
692  * checks if the text addr located between @start and @end
693  * overlaps with any of the jump label patch addresses. Code
694  * that wants to modify kernel text should first verify that
695  * it does not overlap with any of the jump label addresses.
696  * Caller must hold jump_label_mutex.
697  *
698  * returns 1 if there is an overlap, 0 otherwise
699  */
700 int jump_label_text_reserved(void *start, void *end)
701 {
702 	int ret = __jump_label_text_reserved(__start___jump_table,
703 			__stop___jump_table, start, end);
704 
705 	if (ret)
706 		return ret;
707 
708 #ifdef CONFIG_MODULES
709 	ret = __jump_label_mod_text_reserved(start, end);
710 #endif
711 	return ret;
712 }
713 
714 static void jump_label_update(struct static_key *key)
715 {
716 	struct jump_entry *stop = __stop___jump_table;
717 	struct jump_entry *entry;
718 #ifdef CONFIG_MODULES
719 	struct module *mod;
720 
721 	if (static_key_linked(key)) {
722 		__jump_label_mod_update(key);
723 		return;
724 	}
725 
726 	preempt_disable();
727 	mod = __module_address((unsigned long)key);
728 	if (mod)
729 		stop = mod->jump_entries + mod->num_jump_entries;
730 	preempt_enable();
731 #endif
732 	entry = static_key_entries(key);
733 	/* if there are no users, entry can be NULL */
734 	if (entry)
735 		__jump_label_update(key, entry, stop);
736 }
737 
738 #ifdef CONFIG_STATIC_KEYS_SELFTEST
739 static DEFINE_STATIC_KEY_TRUE(sk_true);
740 static DEFINE_STATIC_KEY_FALSE(sk_false);
741 
742 static __init int jump_label_test(void)
743 {
744 	int i;
745 
746 	for (i = 0; i < 2; i++) {
747 		WARN_ON(static_key_enabled(&sk_true.key) != true);
748 		WARN_ON(static_key_enabled(&sk_false.key) != false);
749 
750 		WARN_ON(!static_branch_likely(&sk_true));
751 		WARN_ON(!static_branch_unlikely(&sk_true));
752 		WARN_ON(static_branch_likely(&sk_false));
753 		WARN_ON(static_branch_unlikely(&sk_false));
754 
755 		static_branch_disable(&sk_true);
756 		static_branch_enable(&sk_false);
757 
758 		WARN_ON(static_key_enabled(&sk_true.key) == true);
759 		WARN_ON(static_key_enabled(&sk_false.key) == false);
760 
761 		WARN_ON(static_branch_likely(&sk_true));
762 		WARN_ON(static_branch_unlikely(&sk_true));
763 		WARN_ON(!static_branch_likely(&sk_false));
764 		WARN_ON(!static_branch_unlikely(&sk_false));
765 
766 		static_branch_enable(&sk_true);
767 		static_branch_disable(&sk_false);
768 	}
769 
770 	return 0;
771 }
772 late_initcall(jump_label_test);
773 #endif /* STATIC_KEYS_SELFTEST */
774 
775 #endif /* HAVE_JUMP_LABEL */
776