xref: /openbmc/linux/kernel/jump_label.c (revision 80d0624d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * jump label support
4  *
5  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6  * Copyright (C) 2011 Peter Zijlstra
7  *
8  */
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
21 
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24 
25 void jump_label_lock(void)
26 {
27 	mutex_lock(&jump_label_mutex);
28 }
29 
30 void jump_label_unlock(void)
31 {
32 	mutex_unlock(&jump_label_mutex);
33 }
34 
35 static int jump_label_cmp(const void *a, const void *b)
36 {
37 	const struct jump_entry *jea = a;
38 	const struct jump_entry *jeb = b;
39 
40 	/*
41 	 * Entrires are sorted by key.
42 	 */
43 	if (jump_entry_key(jea) < jump_entry_key(jeb))
44 		return -1;
45 
46 	if (jump_entry_key(jea) > jump_entry_key(jeb))
47 		return 1;
48 
49 	/*
50 	 * In the batching mode, entries should also be sorted by the code
51 	 * inside the already sorted list of entries, enabling a bsearch in
52 	 * the vector.
53 	 */
54 	if (jump_entry_code(jea) < jump_entry_code(jeb))
55 		return -1;
56 
57 	if (jump_entry_code(jea) > jump_entry_code(jeb))
58 		return 1;
59 
60 	return 0;
61 }
62 
63 static void jump_label_swap(void *a, void *b, int size)
64 {
65 	long delta = (unsigned long)a - (unsigned long)b;
66 	struct jump_entry *jea = a;
67 	struct jump_entry *jeb = b;
68 	struct jump_entry tmp = *jea;
69 
70 	jea->code	= jeb->code - delta;
71 	jea->target	= jeb->target - delta;
72 	jea->key	= jeb->key - delta;
73 
74 	jeb->code	= tmp.code + delta;
75 	jeb->target	= tmp.target + delta;
76 	jeb->key	= tmp.key + delta;
77 }
78 
79 static void
80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81 {
82 	unsigned long size;
83 	void *swapfn = NULL;
84 
85 	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 		swapfn = jump_label_swap;
87 
88 	size = (((unsigned long)stop - (unsigned long)start)
89 					/ sizeof(struct jump_entry));
90 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91 }
92 
93 static void jump_label_update(struct static_key *key);
94 
95 /*
96  * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97  * The use of 'atomic_read()' requires atomic.h and its problematic for some
98  * kernel headers such as kernel.h and others. Since static_key_count() is not
99  * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100  * to have it be a function here. Similarly, for 'static_key_enable()' and
101  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102  * to be included from most/all places for CONFIG_JUMP_LABEL.
103  */
104 int static_key_count(struct static_key *key)
105 {
106 	/*
107 	 * -1 means the first static_key_slow_inc() is in progress.
108 	 *  static_key_enabled() must return true, so return 1 here.
109 	 */
110 	int n = atomic_read(&key->enabled);
111 
112 	return n >= 0 ? n : 1;
113 }
114 EXPORT_SYMBOL_GPL(static_key_count);
115 
116 /*
117  * static_key_fast_inc_not_disabled - adds a user for a static key
118  * @key: static key that must be already enabled
119  *
120  * The caller must make sure that the static key can't get disabled while
121  * in this function. It doesn't patch jump labels, only adds a user to
122  * an already enabled static key.
123  *
124  * Returns true if the increment was done. Unlike refcount_t the ref counter
125  * is not saturated, but will fail to increment on overflow.
126  */
127 bool static_key_fast_inc_not_disabled(struct static_key *key)
128 {
129 	int v;
130 
131 	STATIC_KEY_CHECK_USE(key);
132 	/*
133 	 * Negative key->enabled has a special meaning: it sends
134 	 * static_key_slow_inc/dec() down the slow path, and it is non-zero
135 	 * so it counts as "enabled" in jump_label_update().  Note that
136 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
137 	 */
138 	v = atomic_read(&key->enabled);
139 	do {
140 		if (v <= 0 || (v + 1) < 0)
141 			return false;
142 	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
143 
144 	return true;
145 }
146 EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
147 
148 bool static_key_slow_inc_cpuslocked(struct static_key *key)
149 {
150 	lockdep_assert_cpus_held();
151 
152 	/*
153 	 * Careful if we get concurrent static_key_slow_inc/dec() calls;
154 	 * later calls must wait for the first one to _finish_ the
155 	 * jump_label_update() process.  At the same time, however,
156 	 * the jump_label_update() call below wants to see
157 	 * static_key_enabled(&key) for jumps to be updated properly.
158 	 */
159 	if (static_key_fast_inc_not_disabled(key))
160 		return true;
161 
162 	jump_label_lock();
163 	if (atomic_read(&key->enabled) == 0) {
164 		atomic_set(&key->enabled, -1);
165 		jump_label_update(key);
166 		/*
167 		 * Ensure that if the above cmpxchg loop observes our positive
168 		 * value, it must also observe all the text changes.
169 		 */
170 		atomic_set_release(&key->enabled, 1);
171 	} else {
172 		if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
173 			jump_label_unlock();
174 			return false;
175 		}
176 	}
177 	jump_label_unlock();
178 	return true;
179 }
180 
181 bool static_key_slow_inc(struct static_key *key)
182 {
183 	bool ret;
184 
185 	cpus_read_lock();
186 	ret = static_key_slow_inc_cpuslocked(key);
187 	cpus_read_unlock();
188 	return ret;
189 }
190 EXPORT_SYMBOL_GPL(static_key_slow_inc);
191 
192 void static_key_enable_cpuslocked(struct static_key *key)
193 {
194 	STATIC_KEY_CHECK_USE(key);
195 	lockdep_assert_cpus_held();
196 
197 	if (atomic_read(&key->enabled) > 0) {
198 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
199 		return;
200 	}
201 
202 	jump_label_lock();
203 	if (atomic_read(&key->enabled) == 0) {
204 		atomic_set(&key->enabled, -1);
205 		jump_label_update(key);
206 		/*
207 		 * See static_key_slow_inc().
208 		 */
209 		atomic_set_release(&key->enabled, 1);
210 	}
211 	jump_label_unlock();
212 }
213 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
214 
215 void static_key_enable(struct static_key *key)
216 {
217 	cpus_read_lock();
218 	static_key_enable_cpuslocked(key);
219 	cpus_read_unlock();
220 }
221 EXPORT_SYMBOL_GPL(static_key_enable);
222 
223 void static_key_disable_cpuslocked(struct static_key *key)
224 {
225 	STATIC_KEY_CHECK_USE(key);
226 	lockdep_assert_cpus_held();
227 
228 	if (atomic_read(&key->enabled) != 1) {
229 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
230 		return;
231 	}
232 
233 	jump_label_lock();
234 	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
235 		jump_label_update(key);
236 	jump_label_unlock();
237 }
238 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
239 
240 void static_key_disable(struct static_key *key)
241 {
242 	cpus_read_lock();
243 	static_key_disable_cpuslocked(key);
244 	cpus_read_unlock();
245 }
246 EXPORT_SYMBOL_GPL(static_key_disable);
247 
248 static bool static_key_slow_try_dec(struct static_key *key)
249 {
250 	int v;
251 
252 	/*
253 	 * Go into the slow path if key::enabled is less than or equal than
254 	 * one. One is valid to shut down the key, anything less than one
255 	 * is an imbalance, which is handled at the call site.
256 	 *
257 	 * That includes the special case of '-1' which is set in
258 	 * static_key_slow_inc_cpuslocked(), but that's harmless as it is
259 	 * fully serialized in the slow path below. By the time this task
260 	 * acquires the jump label lock the value is back to one and the
261 	 * retry under the lock must succeed.
262 	 */
263 	v = atomic_read(&key->enabled);
264 	do {
265 		/*
266 		 * Warn about the '-1' case though; since that means a
267 		 * decrement is concurrent with a first (0->1) increment. IOW
268 		 * people are trying to disable something that wasn't yet fully
269 		 * enabled. This suggests an ordering problem on the user side.
270 		 */
271 		WARN_ON_ONCE(v < 0);
272 		if (v <= 1)
273 			return false;
274 	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
275 
276 	return true;
277 }
278 
279 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
280 {
281 	lockdep_assert_cpus_held();
282 
283 	if (static_key_slow_try_dec(key))
284 		return;
285 
286 	guard(mutex)(&jump_label_mutex);
287 	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
288 		jump_label_update(key);
289 	else
290 		WARN_ON_ONCE(!static_key_slow_try_dec(key));
291 }
292 
293 static void __static_key_slow_dec(struct static_key *key)
294 {
295 	cpus_read_lock();
296 	__static_key_slow_dec_cpuslocked(key);
297 	cpus_read_unlock();
298 }
299 
300 void jump_label_update_timeout(struct work_struct *work)
301 {
302 	struct static_key_deferred *key =
303 		container_of(work, struct static_key_deferred, work.work);
304 	__static_key_slow_dec(&key->key);
305 }
306 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
307 
308 void static_key_slow_dec(struct static_key *key)
309 {
310 	STATIC_KEY_CHECK_USE(key);
311 	__static_key_slow_dec(key);
312 }
313 EXPORT_SYMBOL_GPL(static_key_slow_dec);
314 
315 void static_key_slow_dec_cpuslocked(struct static_key *key)
316 {
317 	STATIC_KEY_CHECK_USE(key);
318 	__static_key_slow_dec_cpuslocked(key);
319 }
320 
321 void __static_key_slow_dec_deferred(struct static_key *key,
322 				    struct delayed_work *work,
323 				    unsigned long timeout)
324 {
325 	STATIC_KEY_CHECK_USE(key);
326 
327 	if (static_key_slow_try_dec(key))
328 		return;
329 
330 	schedule_delayed_work(work, timeout);
331 }
332 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
333 
334 void __static_key_deferred_flush(void *key, struct delayed_work *work)
335 {
336 	STATIC_KEY_CHECK_USE(key);
337 	flush_delayed_work(work);
338 }
339 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
340 
341 void jump_label_rate_limit(struct static_key_deferred *key,
342 		unsigned long rl)
343 {
344 	STATIC_KEY_CHECK_USE(key);
345 	key->timeout = rl;
346 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
347 }
348 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
349 
350 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
351 {
352 	if (jump_entry_code(entry) <= (unsigned long)end &&
353 	    jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
354 		return 1;
355 
356 	return 0;
357 }
358 
359 static int __jump_label_text_reserved(struct jump_entry *iter_start,
360 		struct jump_entry *iter_stop, void *start, void *end, bool init)
361 {
362 	struct jump_entry *iter;
363 
364 	iter = iter_start;
365 	while (iter < iter_stop) {
366 		if (init || !jump_entry_is_init(iter)) {
367 			if (addr_conflict(iter, start, end))
368 				return 1;
369 		}
370 		iter++;
371 	}
372 
373 	return 0;
374 }
375 
376 #ifndef arch_jump_label_transform_static
377 static void arch_jump_label_transform_static(struct jump_entry *entry,
378 					     enum jump_label_type type)
379 {
380 	/* nothing to do on most architectures */
381 }
382 #endif
383 
384 static inline struct jump_entry *static_key_entries(struct static_key *key)
385 {
386 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
387 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
388 }
389 
390 static inline bool static_key_type(struct static_key *key)
391 {
392 	return key->type & JUMP_TYPE_TRUE;
393 }
394 
395 static inline bool static_key_linked(struct static_key *key)
396 {
397 	return key->type & JUMP_TYPE_LINKED;
398 }
399 
400 static inline void static_key_clear_linked(struct static_key *key)
401 {
402 	key->type &= ~JUMP_TYPE_LINKED;
403 }
404 
405 static inline void static_key_set_linked(struct static_key *key)
406 {
407 	key->type |= JUMP_TYPE_LINKED;
408 }
409 
410 /***
411  * A 'struct static_key' uses a union such that it either points directly
412  * to a table of 'struct jump_entry' or to a linked list of modules which in
413  * turn point to 'struct jump_entry' tables.
414  *
415  * The two lower bits of the pointer are used to keep track of which pointer
416  * type is in use and to store the initial branch direction, we use an access
417  * function which preserves these bits.
418  */
419 static void static_key_set_entries(struct static_key *key,
420 				   struct jump_entry *entries)
421 {
422 	unsigned long type;
423 
424 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
425 	type = key->type & JUMP_TYPE_MASK;
426 	key->entries = entries;
427 	key->type |= type;
428 }
429 
430 static enum jump_label_type jump_label_type(struct jump_entry *entry)
431 {
432 	struct static_key *key = jump_entry_key(entry);
433 	bool enabled = static_key_enabled(key);
434 	bool branch = jump_entry_is_branch(entry);
435 
436 	/* See the comment in linux/jump_label.h */
437 	return enabled ^ branch;
438 }
439 
440 static bool jump_label_can_update(struct jump_entry *entry, bool init)
441 {
442 	/*
443 	 * Cannot update code that was in an init text area.
444 	 */
445 	if (!init && jump_entry_is_init(entry))
446 		return false;
447 
448 	if (!kernel_text_address(jump_entry_code(entry))) {
449 		/*
450 		 * This skips patching built-in __exit, which
451 		 * is part of init_section_contains() but is
452 		 * not part of kernel_text_address().
453 		 *
454 		 * Skipping built-in __exit is fine since it
455 		 * will never be executed.
456 		 */
457 		WARN_ONCE(!jump_entry_is_init(entry),
458 			  "can't patch jump_label at %pS",
459 			  (void *)jump_entry_code(entry));
460 		return false;
461 	}
462 
463 	return true;
464 }
465 
466 #ifndef HAVE_JUMP_LABEL_BATCH
467 static void __jump_label_update(struct static_key *key,
468 				struct jump_entry *entry,
469 				struct jump_entry *stop,
470 				bool init)
471 {
472 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
473 		if (jump_label_can_update(entry, init))
474 			arch_jump_label_transform(entry, jump_label_type(entry));
475 	}
476 }
477 #else
478 static void __jump_label_update(struct static_key *key,
479 				struct jump_entry *entry,
480 				struct jump_entry *stop,
481 				bool init)
482 {
483 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
484 
485 		if (!jump_label_can_update(entry, init))
486 			continue;
487 
488 		if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
489 			/*
490 			 * Queue is full: Apply the current queue and try again.
491 			 */
492 			arch_jump_label_transform_apply();
493 			BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
494 		}
495 	}
496 	arch_jump_label_transform_apply();
497 }
498 #endif
499 
500 void __init jump_label_init(void)
501 {
502 	struct jump_entry *iter_start = __start___jump_table;
503 	struct jump_entry *iter_stop = __stop___jump_table;
504 	struct static_key *key = NULL;
505 	struct jump_entry *iter;
506 
507 	/*
508 	 * Since we are initializing the static_key.enabled field with
509 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
510 	 * jump_label.h, let's make sure that is safe. There are only two
511 	 * cases to check since we initialize to 0 or 1.
512 	 */
513 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
514 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
515 
516 	if (static_key_initialized)
517 		return;
518 
519 	cpus_read_lock();
520 	jump_label_lock();
521 	jump_label_sort_entries(iter_start, iter_stop);
522 
523 	for (iter = iter_start; iter < iter_stop; iter++) {
524 		struct static_key *iterk;
525 		bool in_init;
526 
527 		/* rewrite NOPs */
528 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
529 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
530 
531 		in_init = init_section_contains((void *)jump_entry_code(iter), 1);
532 		jump_entry_set_init(iter, in_init);
533 
534 		iterk = jump_entry_key(iter);
535 		if (iterk == key)
536 			continue;
537 
538 		key = iterk;
539 		static_key_set_entries(key, iter);
540 	}
541 	static_key_initialized = true;
542 	jump_label_unlock();
543 	cpus_read_unlock();
544 }
545 
546 #ifdef CONFIG_MODULES
547 
548 enum jump_label_type jump_label_init_type(struct jump_entry *entry)
549 {
550 	struct static_key *key = jump_entry_key(entry);
551 	bool type = static_key_type(key);
552 	bool branch = jump_entry_is_branch(entry);
553 
554 	/* See the comment in linux/jump_label.h */
555 	return type ^ branch;
556 }
557 
558 struct static_key_mod {
559 	struct static_key_mod *next;
560 	struct jump_entry *entries;
561 	struct module *mod;
562 };
563 
564 static inline struct static_key_mod *static_key_mod(struct static_key *key)
565 {
566 	WARN_ON_ONCE(!static_key_linked(key));
567 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
568 }
569 
570 /***
571  * key->type and key->next are the same via union.
572  * This sets key->next and preserves the type bits.
573  *
574  * See additional comments above static_key_set_entries().
575  */
576 static void static_key_set_mod(struct static_key *key,
577 			       struct static_key_mod *mod)
578 {
579 	unsigned long type;
580 
581 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
582 	type = key->type & JUMP_TYPE_MASK;
583 	key->next = mod;
584 	key->type |= type;
585 }
586 
587 static int __jump_label_mod_text_reserved(void *start, void *end)
588 {
589 	struct module *mod;
590 	int ret;
591 
592 	preempt_disable();
593 	mod = __module_text_address((unsigned long)start);
594 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
595 	if (!try_module_get(mod))
596 		mod = NULL;
597 	preempt_enable();
598 
599 	if (!mod)
600 		return 0;
601 
602 	ret = __jump_label_text_reserved(mod->jump_entries,
603 				mod->jump_entries + mod->num_jump_entries,
604 				start, end, mod->state == MODULE_STATE_COMING);
605 
606 	module_put(mod);
607 
608 	return ret;
609 }
610 
611 static void __jump_label_mod_update(struct static_key *key)
612 {
613 	struct static_key_mod *mod;
614 
615 	for (mod = static_key_mod(key); mod; mod = mod->next) {
616 		struct jump_entry *stop;
617 		struct module *m;
618 
619 		/*
620 		 * NULL if the static_key is defined in a module
621 		 * that does not use it
622 		 */
623 		if (!mod->entries)
624 			continue;
625 
626 		m = mod->mod;
627 		if (!m)
628 			stop = __stop___jump_table;
629 		else
630 			stop = m->jump_entries + m->num_jump_entries;
631 		__jump_label_update(key, mod->entries, stop,
632 				    m && m->state == MODULE_STATE_COMING);
633 	}
634 }
635 
636 static int jump_label_add_module(struct module *mod)
637 {
638 	struct jump_entry *iter_start = mod->jump_entries;
639 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
640 	struct jump_entry *iter;
641 	struct static_key *key = NULL;
642 	struct static_key_mod *jlm, *jlm2;
643 
644 	/* if the module doesn't have jump label entries, just return */
645 	if (iter_start == iter_stop)
646 		return 0;
647 
648 	jump_label_sort_entries(iter_start, iter_stop);
649 
650 	for (iter = iter_start; iter < iter_stop; iter++) {
651 		struct static_key *iterk;
652 		bool in_init;
653 
654 		in_init = within_module_init(jump_entry_code(iter), mod);
655 		jump_entry_set_init(iter, in_init);
656 
657 		iterk = jump_entry_key(iter);
658 		if (iterk == key)
659 			continue;
660 
661 		key = iterk;
662 		if (within_module((unsigned long)key, mod)) {
663 			static_key_set_entries(key, iter);
664 			continue;
665 		}
666 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
667 		if (!jlm)
668 			return -ENOMEM;
669 		if (!static_key_linked(key)) {
670 			jlm2 = kzalloc(sizeof(struct static_key_mod),
671 				       GFP_KERNEL);
672 			if (!jlm2) {
673 				kfree(jlm);
674 				return -ENOMEM;
675 			}
676 			preempt_disable();
677 			jlm2->mod = __module_address((unsigned long)key);
678 			preempt_enable();
679 			jlm2->entries = static_key_entries(key);
680 			jlm2->next = NULL;
681 			static_key_set_mod(key, jlm2);
682 			static_key_set_linked(key);
683 		}
684 		jlm->mod = mod;
685 		jlm->entries = iter;
686 		jlm->next = static_key_mod(key);
687 		static_key_set_mod(key, jlm);
688 		static_key_set_linked(key);
689 
690 		/* Only update if we've changed from our initial state */
691 		if (jump_label_type(iter) != jump_label_init_type(iter))
692 			__jump_label_update(key, iter, iter_stop, true);
693 	}
694 
695 	return 0;
696 }
697 
698 static void jump_label_del_module(struct module *mod)
699 {
700 	struct jump_entry *iter_start = mod->jump_entries;
701 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
702 	struct jump_entry *iter;
703 	struct static_key *key = NULL;
704 	struct static_key_mod *jlm, **prev;
705 
706 	for (iter = iter_start; iter < iter_stop; iter++) {
707 		if (jump_entry_key(iter) == key)
708 			continue;
709 
710 		key = jump_entry_key(iter);
711 
712 		if (within_module((unsigned long)key, mod))
713 			continue;
714 
715 		/* No memory during module load */
716 		if (WARN_ON(!static_key_linked(key)))
717 			continue;
718 
719 		prev = &key->next;
720 		jlm = static_key_mod(key);
721 
722 		while (jlm && jlm->mod != mod) {
723 			prev = &jlm->next;
724 			jlm = jlm->next;
725 		}
726 
727 		/* No memory during module load */
728 		if (WARN_ON(!jlm))
729 			continue;
730 
731 		if (prev == &key->next)
732 			static_key_set_mod(key, jlm->next);
733 		else
734 			*prev = jlm->next;
735 
736 		kfree(jlm);
737 
738 		jlm = static_key_mod(key);
739 		/* if only one etry is left, fold it back into the static_key */
740 		if (jlm->next == NULL) {
741 			static_key_set_entries(key, jlm->entries);
742 			static_key_clear_linked(key);
743 			kfree(jlm);
744 		}
745 	}
746 }
747 
748 static int
749 jump_label_module_notify(struct notifier_block *self, unsigned long val,
750 			 void *data)
751 {
752 	struct module *mod = data;
753 	int ret = 0;
754 
755 	cpus_read_lock();
756 	jump_label_lock();
757 
758 	switch (val) {
759 	case MODULE_STATE_COMING:
760 		ret = jump_label_add_module(mod);
761 		if (ret) {
762 			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
763 			jump_label_del_module(mod);
764 		}
765 		break;
766 	case MODULE_STATE_GOING:
767 		jump_label_del_module(mod);
768 		break;
769 	}
770 
771 	jump_label_unlock();
772 	cpus_read_unlock();
773 
774 	return notifier_from_errno(ret);
775 }
776 
777 static struct notifier_block jump_label_module_nb = {
778 	.notifier_call = jump_label_module_notify,
779 	.priority = 1, /* higher than tracepoints */
780 };
781 
782 static __init int jump_label_init_module(void)
783 {
784 	return register_module_notifier(&jump_label_module_nb);
785 }
786 early_initcall(jump_label_init_module);
787 
788 #endif /* CONFIG_MODULES */
789 
790 /***
791  * jump_label_text_reserved - check if addr range is reserved
792  * @start: start text addr
793  * @end: end text addr
794  *
795  * checks if the text addr located between @start and @end
796  * overlaps with any of the jump label patch addresses. Code
797  * that wants to modify kernel text should first verify that
798  * it does not overlap with any of the jump label addresses.
799  * Caller must hold jump_label_mutex.
800  *
801  * returns 1 if there is an overlap, 0 otherwise
802  */
803 int jump_label_text_reserved(void *start, void *end)
804 {
805 	bool init = system_state < SYSTEM_RUNNING;
806 	int ret = __jump_label_text_reserved(__start___jump_table,
807 			__stop___jump_table, start, end, init);
808 
809 	if (ret)
810 		return ret;
811 
812 #ifdef CONFIG_MODULES
813 	ret = __jump_label_mod_text_reserved(start, end);
814 #endif
815 	return ret;
816 }
817 
818 static void jump_label_update(struct static_key *key)
819 {
820 	struct jump_entry *stop = __stop___jump_table;
821 	bool init = system_state < SYSTEM_RUNNING;
822 	struct jump_entry *entry;
823 #ifdef CONFIG_MODULES
824 	struct module *mod;
825 
826 	if (static_key_linked(key)) {
827 		__jump_label_mod_update(key);
828 		return;
829 	}
830 
831 	preempt_disable();
832 	mod = __module_address((unsigned long)key);
833 	if (mod) {
834 		stop = mod->jump_entries + mod->num_jump_entries;
835 		init = mod->state == MODULE_STATE_COMING;
836 	}
837 	preempt_enable();
838 #endif
839 	entry = static_key_entries(key);
840 	/* if there are no users, entry can be NULL */
841 	if (entry)
842 		__jump_label_update(key, entry, stop, init);
843 }
844 
845 #ifdef CONFIG_STATIC_KEYS_SELFTEST
846 static DEFINE_STATIC_KEY_TRUE(sk_true);
847 static DEFINE_STATIC_KEY_FALSE(sk_false);
848 
849 static __init int jump_label_test(void)
850 {
851 	int i;
852 
853 	for (i = 0; i < 2; i++) {
854 		WARN_ON(static_key_enabled(&sk_true.key) != true);
855 		WARN_ON(static_key_enabled(&sk_false.key) != false);
856 
857 		WARN_ON(!static_branch_likely(&sk_true));
858 		WARN_ON(!static_branch_unlikely(&sk_true));
859 		WARN_ON(static_branch_likely(&sk_false));
860 		WARN_ON(static_branch_unlikely(&sk_false));
861 
862 		static_branch_disable(&sk_true);
863 		static_branch_enable(&sk_false);
864 
865 		WARN_ON(static_key_enabled(&sk_true.key) == true);
866 		WARN_ON(static_key_enabled(&sk_false.key) == false);
867 
868 		WARN_ON(static_branch_likely(&sk_true));
869 		WARN_ON(static_branch_unlikely(&sk_true));
870 		WARN_ON(!static_branch_likely(&sk_false));
871 		WARN_ON(!static_branch_unlikely(&sk_false));
872 
873 		static_branch_enable(&sk_true);
874 		static_branch_disable(&sk_false);
875 	}
876 
877 	return 0;
878 }
879 early_initcall(jump_label_test);
880 #endif /* STATIC_KEYS_SELFTEST */
881