xref: /openbmc/linux/kernel/jump_label.c (revision 3557b3fd)
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18 #include <linux/cpu.h>
19 #include <asm/sections.h>
20 
21 /* mutex to protect coming/going of the the jump_label table */
22 static DEFINE_MUTEX(jump_label_mutex);
23 
24 void jump_label_lock(void)
25 {
26 	mutex_lock(&jump_label_mutex);
27 }
28 
29 void jump_label_unlock(void)
30 {
31 	mutex_unlock(&jump_label_mutex);
32 }
33 
34 static int jump_label_cmp(const void *a, const void *b)
35 {
36 	const struct jump_entry *jea = a;
37 	const struct jump_entry *jeb = b;
38 
39 	if (jump_entry_key(jea) < jump_entry_key(jeb))
40 		return -1;
41 
42 	if (jump_entry_key(jea) > jump_entry_key(jeb))
43 		return 1;
44 
45 	return 0;
46 }
47 
48 static void jump_label_swap(void *a, void *b, int size)
49 {
50 	long delta = (unsigned long)a - (unsigned long)b;
51 	struct jump_entry *jea = a;
52 	struct jump_entry *jeb = b;
53 	struct jump_entry tmp = *jea;
54 
55 	jea->code	= jeb->code - delta;
56 	jea->target	= jeb->target - delta;
57 	jea->key	= jeb->key - delta;
58 
59 	jeb->code	= tmp.code + delta;
60 	jeb->target	= tmp.target + delta;
61 	jeb->key	= tmp.key + delta;
62 }
63 
64 static void
65 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66 {
67 	unsigned long size;
68 	void *swapfn = NULL;
69 
70 	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
71 		swapfn = jump_label_swap;
72 
73 	size = (((unsigned long)stop - (unsigned long)start)
74 					/ sizeof(struct jump_entry));
75 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
76 }
77 
78 static void jump_label_update(struct static_key *key);
79 
80 /*
81  * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
82  * The use of 'atomic_read()' requires atomic.h and its problematic for some
83  * kernel headers such as kernel.h and others. Since static_key_count() is not
84  * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
85  * to have it be a function here. Similarly, for 'static_key_enable()' and
86  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
87  * to be included from most/all places for CONFIG_JUMP_LABEL.
88  */
89 int static_key_count(struct static_key *key)
90 {
91 	/*
92 	 * -1 means the first static_key_slow_inc() is in progress.
93 	 *  static_key_enabled() must return true, so return 1 here.
94 	 */
95 	int n = atomic_read(&key->enabled);
96 
97 	return n >= 0 ? n : 1;
98 }
99 EXPORT_SYMBOL_GPL(static_key_count);
100 
101 void static_key_slow_inc_cpuslocked(struct static_key *key)
102 {
103 	int v, v1;
104 
105 	STATIC_KEY_CHECK_USE(key);
106 	lockdep_assert_cpus_held();
107 
108 	/*
109 	 * Careful if we get concurrent static_key_slow_inc() calls;
110 	 * later calls must wait for the first one to _finish_ the
111 	 * jump_label_update() process.  At the same time, however,
112 	 * the jump_label_update() call below wants to see
113 	 * static_key_enabled(&key) for jumps to be updated properly.
114 	 *
115 	 * So give a special meaning to negative key->enabled: it sends
116 	 * static_key_slow_inc() down the slow path, and it is non-zero
117 	 * so it counts as "enabled" in jump_label_update().  Note that
118 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
119 	 */
120 	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
121 		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
122 		if (likely(v1 == v))
123 			return;
124 	}
125 
126 	jump_label_lock();
127 	if (atomic_read(&key->enabled) == 0) {
128 		atomic_set(&key->enabled, -1);
129 		jump_label_update(key);
130 		/*
131 		 * Ensure that if the above cmpxchg loop observes our positive
132 		 * value, it must also observe all the text changes.
133 		 */
134 		atomic_set_release(&key->enabled, 1);
135 	} else {
136 		atomic_inc(&key->enabled);
137 	}
138 	jump_label_unlock();
139 }
140 
141 void static_key_slow_inc(struct static_key *key)
142 {
143 	cpus_read_lock();
144 	static_key_slow_inc_cpuslocked(key);
145 	cpus_read_unlock();
146 }
147 EXPORT_SYMBOL_GPL(static_key_slow_inc);
148 
149 void static_key_enable_cpuslocked(struct static_key *key)
150 {
151 	STATIC_KEY_CHECK_USE(key);
152 	lockdep_assert_cpus_held();
153 
154 	if (atomic_read(&key->enabled) > 0) {
155 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
156 		return;
157 	}
158 
159 	jump_label_lock();
160 	if (atomic_read(&key->enabled) == 0) {
161 		atomic_set(&key->enabled, -1);
162 		jump_label_update(key);
163 		/*
164 		 * See static_key_slow_inc().
165 		 */
166 		atomic_set_release(&key->enabled, 1);
167 	}
168 	jump_label_unlock();
169 }
170 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
171 
172 void static_key_enable(struct static_key *key)
173 {
174 	cpus_read_lock();
175 	static_key_enable_cpuslocked(key);
176 	cpus_read_unlock();
177 }
178 EXPORT_SYMBOL_GPL(static_key_enable);
179 
180 void static_key_disable_cpuslocked(struct static_key *key)
181 {
182 	STATIC_KEY_CHECK_USE(key);
183 	lockdep_assert_cpus_held();
184 
185 	if (atomic_read(&key->enabled) != 1) {
186 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
187 		return;
188 	}
189 
190 	jump_label_lock();
191 	if (atomic_cmpxchg(&key->enabled, 1, 0))
192 		jump_label_update(key);
193 	jump_label_unlock();
194 }
195 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
196 
197 void static_key_disable(struct static_key *key)
198 {
199 	cpus_read_lock();
200 	static_key_disable_cpuslocked(key);
201 	cpus_read_unlock();
202 }
203 EXPORT_SYMBOL_GPL(static_key_disable);
204 
205 static void __static_key_slow_dec_cpuslocked(struct static_key *key,
206 					   unsigned long rate_limit,
207 					   struct delayed_work *work)
208 {
209 	lockdep_assert_cpus_held();
210 
211 	/*
212 	 * The negative count check is valid even when a negative
213 	 * key->enabled is in use by static_key_slow_inc(); a
214 	 * __static_key_slow_dec() before the first static_key_slow_inc()
215 	 * returns is unbalanced, because all other static_key_slow_inc()
216 	 * instances block while the update is in progress.
217 	 */
218 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
219 		WARN(atomic_read(&key->enabled) < 0,
220 		     "jump label: negative count!\n");
221 		return;
222 	}
223 
224 	if (rate_limit) {
225 		atomic_inc(&key->enabled);
226 		schedule_delayed_work(work, rate_limit);
227 	} else {
228 		jump_label_update(key);
229 	}
230 	jump_label_unlock();
231 }
232 
233 static void __static_key_slow_dec(struct static_key *key,
234 				  unsigned long rate_limit,
235 				  struct delayed_work *work)
236 {
237 	cpus_read_lock();
238 	__static_key_slow_dec_cpuslocked(key, rate_limit, work);
239 	cpus_read_unlock();
240 }
241 
242 static void jump_label_update_timeout(struct work_struct *work)
243 {
244 	struct static_key_deferred *key =
245 		container_of(work, struct static_key_deferred, work.work);
246 	__static_key_slow_dec(&key->key, 0, NULL);
247 }
248 
249 void static_key_slow_dec(struct static_key *key)
250 {
251 	STATIC_KEY_CHECK_USE(key);
252 	__static_key_slow_dec(key, 0, NULL);
253 }
254 EXPORT_SYMBOL_GPL(static_key_slow_dec);
255 
256 void static_key_slow_dec_cpuslocked(struct static_key *key)
257 {
258 	STATIC_KEY_CHECK_USE(key);
259 	__static_key_slow_dec_cpuslocked(key, 0, NULL);
260 }
261 
262 void static_key_slow_dec_deferred(struct static_key_deferred *key)
263 {
264 	STATIC_KEY_CHECK_USE(key);
265 	__static_key_slow_dec(&key->key, key->timeout, &key->work);
266 }
267 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
268 
269 void static_key_deferred_flush(struct static_key_deferred *key)
270 {
271 	STATIC_KEY_CHECK_USE(key);
272 	flush_delayed_work(&key->work);
273 }
274 EXPORT_SYMBOL_GPL(static_key_deferred_flush);
275 
276 void jump_label_rate_limit(struct static_key_deferred *key,
277 		unsigned long rl)
278 {
279 	STATIC_KEY_CHECK_USE(key);
280 	key->timeout = rl;
281 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
282 }
283 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
284 
285 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
286 {
287 	if (jump_entry_code(entry) <= (unsigned long)end &&
288 	    jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
289 		return 1;
290 
291 	return 0;
292 }
293 
294 static int __jump_label_text_reserved(struct jump_entry *iter_start,
295 		struct jump_entry *iter_stop, void *start, void *end)
296 {
297 	struct jump_entry *iter;
298 
299 	iter = iter_start;
300 	while (iter < iter_stop) {
301 		if (addr_conflict(iter, start, end))
302 			return 1;
303 		iter++;
304 	}
305 
306 	return 0;
307 }
308 
309 /*
310  * Update code which is definitely not currently executing.
311  * Architectures which need heavyweight synchronization to modify
312  * running code can override this to make the non-live update case
313  * cheaper.
314  */
315 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
316 					    enum jump_label_type type)
317 {
318 	arch_jump_label_transform(entry, type);
319 }
320 
321 static inline struct jump_entry *static_key_entries(struct static_key *key)
322 {
323 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
324 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
325 }
326 
327 static inline bool static_key_type(struct static_key *key)
328 {
329 	return key->type & JUMP_TYPE_TRUE;
330 }
331 
332 static inline bool static_key_linked(struct static_key *key)
333 {
334 	return key->type & JUMP_TYPE_LINKED;
335 }
336 
337 static inline void static_key_clear_linked(struct static_key *key)
338 {
339 	key->type &= ~JUMP_TYPE_LINKED;
340 }
341 
342 static inline void static_key_set_linked(struct static_key *key)
343 {
344 	key->type |= JUMP_TYPE_LINKED;
345 }
346 
347 /***
348  * A 'struct static_key' uses a union such that it either points directly
349  * to a table of 'struct jump_entry' or to a linked list of modules which in
350  * turn point to 'struct jump_entry' tables.
351  *
352  * The two lower bits of the pointer are used to keep track of which pointer
353  * type is in use and to store the initial branch direction, we use an access
354  * function which preserves these bits.
355  */
356 static void static_key_set_entries(struct static_key *key,
357 				   struct jump_entry *entries)
358 {
359 	unsigned long type;
360 
361 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
362 	type = key->type & JUMP_TYPE_MASK;
363 	key->entries = entries;
364 	key->type |= type;
365 }
366 
367 static enum jump_label_type jump_label_type(struct jump_entry *entry)
368 {
369 	struct static_key *key = jump_entry_key(entry);
370 	bool enabled = static_key_enabled(key);
371 	bool branch = jump_entry_is_branch(entry);
372 
373 	/* See the comment in linux/jump_label.h */
374 	return enabled ^ branch;
375 }
376 
377 static void __jump_label_update(struct static_key *key,
378 				struct jump_entry *entry,
379 				struct jump_entry *stop,
380 				bool init)
381 {
382 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
383 		/*
384 		 * An entry->code of 0 indicates an entry which has been
385 		 * disabled because it was in an init text area.
386 		 */
387 		if (init || !jump_entry_is_init(entry)) {
388 			if (kernel_text_address(jump_entry_code(entry)))
389 				arch_jump_label_transform(entry, jump_label_type(entry));
390 			else
391 				WARN_ONCE(1, "can't patch jump_label at %pS",
392 					  (void *)jump_entry_code(entry));
393 		}
394 	}
395 }
396 
397 void __init jump_label_init(void)
398 {
399 	struct jump_entry *iter_start = __start___jump_table;
400 	struct jump_entry *iter_stop = __stop___jump_table;
401 	struct static_key *key = NULL;
402 	struct jump_entry *iter;
403 
404 	/*
405 	 * Since we are initializing the static_key.enabled field with
406 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
407 	 * jump_label.h, let's make sure that is safe. There are only two
408 	 * cases to check since we initialize to 0 or 1.
409 	 */
410 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
411 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
412 
413 	if (static_key_initialized)
414 		return;
415 
416 	cpus_read_lock();
417 	jump_label_lock();
418 	jump_label_sort_entries(iter_start, iter_stop);
419 
420 	for (iter = iter_start; iter < iter_stop; iter++) {
421 		struct static_key *iterk;
422 
423 		/* rewrite NOPs */
424 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
425 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
426 
427 		if (init_section_contains((void *)jump_entry_code(iter), 1))
428 			jump_entry_set_init(iter);
429 
430 		iterk = jump_entry_key(iter);
431 		if (iterk == key)
432 			continue;
433 
434 		key = iterk;
435 		static_key_set_entries(key, iter);
436 	}
437 	static_key_initialized = true;
438 	jump_label_unlock();
439 	cpus_read_unlock();
440 }
441 
442 #ifdef CONFIG_MODULES
443 
444 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
445 {
446 	struct static_key *key = jump_entry_key(entry);
447 	bool type = static_key_type(key);
448 	bool branch = jump_entry_is_branch(entry);
449 
450 	/* See the comment in linux/jump_label.h */
451 	return type ^ branch;
452 }
453 
454 struct static_key_mod {
455 	struct static_key_mod *next;
456 	struct jump_entry *entries;
457 	struct module *mod;
458 };
459 
460 static inline struct static_key_mod *static_key_mod(struct static_key *key)
461 {
462 	WARN_ON_ONCE(!static_key_linked(key));
463 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
464 }
465 
466 /***
467  * key->type and key->next are the same via union.
468  * This sets key->next and preserves the type bits.
469  *
470  * See additional comments above static_key_set_entries().
471  */
472 static void static_key_set_mod(struct static_key *key,
473 			       struct static_key_mod *mod)
474 {
475 	unsigned long type;
476 
477 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
478 	type = key->type & JUMP_TYPE_MASK;
479 	key->next = mod;
480 	key->type |= type;
481 }
482 
483 static int __jump_label_mod_text_reserved(void *start, void *end)
484 {
485 	struct module *mod;
486 
487 	preempt_disable();
488 	mod = __module_text_address((unsigned long)start);
489 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
490 	preempt_enable();
491 
492 	if (!mod)
493 		return 0;
494 
495 
496 	return __jump_label_text_reserved(mod->jump_entries,
497 				mod->jump_entries + mod->num_jump_entries,
498 				start, end);
499 }
500 
501 static void __jump_label_mod_update(struct static_key *key)
502 {
503 	struct static_key_mod *mod;
504 
505 	for (mod = static_key_mod(key); mod; mod = mod->next) {
506 		struct jump_entry *stop;
507 		struct module *m;
508 
509 		/*
510 		 * NULL if the static_key is defined in a module
511 		 * that does not use it
512 		 */
513 		if (!mod->entries)
514 			continue;
515 
516 		m = mod->mod;
517 		if (!m)
518 			stop = __stop___jump_table;
519 		else
520 			stop = m->jump_entries + m->num_jump_entries;
521 		__jump_label_update(key, mod->entries, stop,
522 				    m && m->state == MODULE_STATE_COMING);
523 	}
524 }
525 
526 /***
527  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
528  * @mod: module to patch
529  *
530  * Allow for run-time selection of the optimal nops. Before the module
531  * loads patch these with arch_get_jump_label_nop(), which is specified by
532  * the arch specific jump label code.
533  */
534 void jump_label_apply_nops(struct module *mod)
535 {
536 	struct jump_entry *iter_start = mod->jump_entries;
537 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
538 	struct jump_entry *iter;
539 
540 	/* if the module doesn't have jump label entries, just return */
541 	if (iter_start == iter_stop)
542 		return;
543 
544 	for (iter = iter_start; iter < iter_stop; iter++) {
545 		/* Only write NOPs for arch_branch_static(). */
546 		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
547 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
548 	}
549 }
550 
551 static int jump_label_add_module(struct module *mod)
552 {
553 	struct jump_entry *iter_start = mod->jump_entries;
554 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
555 	struct jump_entry *iter;
556 	struct static_key *key = NULL;
557 	struct static_key_mod *jlm, *jlm2;
558 
559 	/* if the module doesn't have jump label entries, just return */
560 	if (iter_start == iter_stop)
561 		return 0;
562 
563 	jump_label_sort_entries(iter_start, iter_stop);
564 
565 	for (iter = iter_start; iter < iter_stop; iter++) {
566 		struct static_key *iterk;
567 
568 		if (within_module_init(jump_entry_code(iter), mod))
569 			jump_entry_set_init(iter);
570 
571 		iterk = jump_entry_key(iter);
572 		if (iterk == key)
573 			continue;
574 
575 		key = iterk;
576 		if (within_module((unsigned long)key, mod)) {
577 			static_key_set_entries(key, iter);
578 			continue;
579 		}
580 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
581 		if (!jlm)
582 			return -ENOMEM;
583 		if (!static_key_linked(key)) {
584 			jlm2 = kzalloc(sizeof(struct static_key_mod),
585 				       GFP_KERNEL);
586 			if (!jlm2) {
587 				kfree(jlm);
588 				return -ENOMEM;
589 			}
590 			preempt_disable();
591 			jlm2->mod = __module_address((unsigned long)key);
592 			preempt_enable();
593 			jlm2->entries = static_key_entries(key);
594 			jlm2->next = NULL;
595 			static_key_set_mod(key, jlm2);
596 			static_key_set_linked(key);
597 		}
598 		jlm->mod = mod;
599 		jlm->entries = iter;
600 		jlm->next = static_key_mod(key);
601 		static_key_set_mod(key, jlm);
602 		static_key_set_linked(key);
603 
604 		/* Only update if we've changed from our initial state */
605 		if (jump_label_type(iter) != jump_label_init_type(iter))
606 			__jump_label_update(key, iter, iter_stop, true);
607 	}
608 
609 	return 0;
610 }
611 
612 static void jump_label_del_module(struct module *mod)
613 {
614 	struct jump_entry *iter_start = mod->jump_entries;
615 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
616 	struct jump_entry *iter;
617 	struct static_key *key = NULL;
618 	struct static_key_mod *jlm, **prev;
619 
620 	for (iter = iter_start; iter < iter_stop; iter++) {
621 		if (jump_entry_key(iter) == key)
622 			continue;
623 
624 		key = jump_entry_key(iter);
625 
626 		if (within_module((unsigned long)key, mod))
627 			continue;
628 
629 		/* No memory during module load */
630 		if (WARN_ON(!static_key_linked(key)))
631 			continue;
632 
633 		prev = &key->next;
634 		jlm = static_key_mod(key);
635 
636 		while (jlm && jlm->mod != mod) {
637 			prev = &jlm->next;
638 			jlm = jlm->next;
639 		}
640 
641 		/* No memory during module load */
642 		if (WARN_ON(!jlm))
643 			continue;
644 
645 		if (prev == &key->next)
646 			static_key_set_mod(key, jlm->next);
647 		else
648 			*prev = jlm->next;
649 
650 		kfree(jlm);
651 
652 		jlm = static_key_mod(key);
653 		/* if only one etry is left, fold it back into the static_key */
654 		if (jlm->next == NULL) {
655 			static_key_set_entries(key, jlm->entries);
656 			static_key_clear_linked(key);
657 			kfree(jlm);
658 		}
659 	}
660 }
661 
662 static int
663 jump_label_module_notify(struct notifier_block *self, unsigned long val,
664 			 void *data)
665 {
666 	struct module *mod = data;
667 	int ret = 0;
668 
669 	cpus_read_lock();
670 	jump_label_lock();
671 
672 	switch (val) {
673 	case MODULE_STATE_COMING:
674 		ret = jump_label_add_module(mod);
675 		if (ret) {
676 			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
677 			jump_label_del_module(mod);
678 		}
679 		break;
680 	case MODULE_STATE_GOING:
681 		jump_label_del_module(mod);
682 		break;
683 	}
684 
685 	jump_label_unlock();
686 	cpus_read_unlock();
687 
688 	return notifier_from_errno(ret);
689 }
690 
691 static struct notifier_block jump_label_module_nb = {
692 	.notifier_call = jump_label_module_notify,
693 	.priority = 1, /* higher than tracepoints */
694 };
695 
696 static __init int jump_label_init_module(void)
697 {
698 	return register_module_notifier(&jump_label_module_nb);
699 }
700 early_initcall(jump_label_init_module);
701 
702 #endif /* CONFIG_MODULES */
703 
704 /***
705  * jump_label_text_reserved - check if addr range is reserved
706  * @start: start text addr
707  * @end: end text addr
708  *
709  * checks if the text addr located between @start and @end
710  * overlaps with any of the jump label patch addresses. Code
711  * that wants to modify kernel text should first verify that
712  * it does not overlap with any of the jump label addresses.
713  * Caller must hold jump_label_mutex.
714  *
715  * returns 1 if there is an overlap, 0 otherwise
716  */
717 int jump_label_text_reserved(void *start, void *end)
718 {
719 	int ret = __jump_label_text_reserved(__start___jump_table,
720 			__stop___jump_table, start, end);
721 
722 	if (ret)
723 		return ret;
724 
725 #ifdef CONFIG_MODULES
726 	ret = __jump_label_mod_text_reserved(start, end);
727 #endif
728 	return ret;
729 }
730 
731 static void jump_label_update(struct static_key *key)
732 {
733 	struct jump_entry *stop = __stop___jump_table;
734 	struct jump_entry *entry;
735 #ifdef CONFIG_MODULES
736 	struct module *mod;
737 
738 	if (static_key_linked(key)) {
739 		__jump_label_mod_update(key);
740 		return;
741 	}
742 
743 	preempt_disable();
744 	mod = __module_address((unsigned long)key);
745 	if (mod)
746 		stop = mod->jump_entries + mod->num_jump_entries;
747 	preempt_enable();
748 #endif
749 	entry = static_key_entries(key);
750 	/* if there are no users, entry can be NULL */
751 	if (entry)
752 		__jump_label_update(key, entry, stop,
753 				    system_state < SYSTEM_RUNNING);
754 }
755 
756 #ifdef CONFIG_STATIC_KEYS_SELFTEST
757 static DEFINE_STATIC_KEY_TRUE(sk_true);
758 static DEFINE_STATIC_KEY_FALSE(sk_false);
759 
760 static __init int jump_label_test(void)
761 {
762 	int i;
763 
764 	for (i = 0; i < 2; i++) {
765 		WARN_ON(static_key_enabled(&sk_true.key) != true);
766 		WARN_ON(static_key_enabled(&sk_false.key) != false);
767 
768 		WARN_ON(!static_branch_likely(&sk_true));
769 		WARN_ON(!static_branch_unlikely(&sk_true));
770 		WARN_ON(static_branch_likely(&sk_false));
771 		WARN_ON(static_branch_unlikely(&sk_false));
772 
773 		static_branch_disable(&sk_true);
774 		static_branch_enable(&sk_false);
775 
776 		WARN_ON(static_key_enabled(&sk_true.key) == true);
777 		WARN_ON(static_key_enabled(&sk_false.key) == false);
778 
779 		WARN_ON(static_branch_likely(&sk_true));
780 		WARN_ON(static_branch_unlikely(&sk_true));
781 		WARN_ON(!static_branch_likely(&sk_false));
782 		WARN_ON(!static_branch_unlikely(&sk_false));
783 
784 		static_branch_enable(&sk_true);
785 		static_branch_disable(&sk_false);
786 	}
787 
788 	return 0;
789 }
790 early_initcall(jump_label_test);
791 #endif /* STATIC_KEYS_SELFTEST */
792