1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * jump label support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 * Copyright (C) 2011 Peter Zijlstra
7 *
8 */
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
21
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24
jump_label_lock(void)25 void jump_label_lock(void)
26 {
27 mutex_lock(&jump_label_mutex);
28 }
29
jump_label_unlock(void)30 void jump_label_unlock(void)
31 {
32 mutex_unlock(&jump_label_mutex);
33 }
34
jump_label_cmp(const void * a,const void * b)35 static int jump_label_cmp(const void *a, const void *b)
36 {
37 const struct jump_entry *jea = a;
38 const struct jump_entry *jeb = b;
39
40 /*
41 * Entrires are sorted by key.
42 */
43 if (jump_entry_key(jea) < jump_entry_key(jeb))
44 return -1;
45
46 if (jump_entry_key(jea) > jump_entry_key(jeb))
47 return 1;
48
49 /*
50 * In the batching mode, entries should also be sorted by the code
51 * inside the already sorted list of entries, enabling a bsearch in
52 * the vector.
53 */
54 if (jump_entry_code(jea) < jump_entry_code(jeb))
55 return -1;
56
57 if (jump_entry_code(jea) > jump_entry_code(jeb))
58 return 1;
59
60 return 0;
61 }
62
jump_label_swap(void * a,void * b,int size)63 static void jump_label_swap(void *a, void *b, int size)
64 {
65 long delta = (unsigned long)a - (unsigned long)b;
66 struct jump_entry *jea = a;
67 struct jump_entry *jeb = b;
68 struct jump_entry tmp = *jea;
69
70 jea->code = jeb->code - delta;
71 jea->target = jeb->target - delta;
72 jea->key = jeb->key - delta;
73
74 jeb->code = tmp.code + delta;
75 jeb->target = tmp.target + delta;
76 jeb->key = tmp.key + delta;
77 }
78
79 static void
jump_label_sort_entries(struct jump_entry * start,struct jump_entry * stop)80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81 {
82 unsigned long size;
83 void *swapfn = NULL;
84
85 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 swapfn = jump_label_swap;
87
88 size = (((unsigned long)stop - (unsigned long)start)
89 / sizeof(struct jump_entry));
90 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91 }
92
93 static void jump_label_update(struct static_key *key);
94
95 /*
96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
98 * kernel headers such as kernel.h and others. Since static_key_count() is not
99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
103 */
static_key_count(struct static_key * key)104 int static_key_count(struct static_key *key)
105 {
106 /*
107 * -1 means the first static_key_slow_inc() is in progress.
108 * static_key_enabled() must return true, so return 1 here.
109 */
110 int n = atomic_read(&key->enabled);
111
112 return n >= 0 ? n : 1;
113 }
114 EXPORT_SYMBOL_GPL(static_key_count);
115
116 /*
117 * static_key_fast_inc_not_disabled - adds a user for a static key
118 * @key: static key that must be already enabled
119 *
120 * The caller must make sure that the static key can't get disabled while
121 * in this function. It doesn't patch jump labels, only adds a user to
122 * an already enabled static key.
123 *
124 * Returns true if the increment was done. Unlike refcount_t the ref counter
125 * is not saturated, but will fail to increment on overflow.
126 */
static_key_fast_inc_not_disabled(struct static_key * key)127 bool static_key_fast_inc_not_disabled(struct static_key *key)
128 {
129 int v;
130
131 STATIC_KEY_CHECK_USE(key);
132 /*
133 * Negative key->enabled has a special meaning: it sends
134 * static_key_slow_inc/dec() down the slow path, and it is non-zero
135 * so it counts as "enabled" in jump_label_update(). Note that
136 * atomic_inc_unless_negative() checks >= 0, so roll our own.
137 */
138 v = atomic_read(&key->enabled);
139 do {
140 if (v <= 0 || (v + 1) < 0)
141 return false;
142 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
143
144 return true;
145 }
146 EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
147
static_key_slow_inc_cpuslocked(struct static_key * key)148 bool static_key_slow_inc_cpuslocked(struct static_key *key)
149 {
150 lockdep_assert_cpus_held();
151
152 /*
153 * Careful if we get concurrent static_key_slow_inc/dec() calls;
154 * later calls must wait for the first one to _finish_ the
155 * jump_label_update() process. At the same time, however,
156 * the jump_label_update() call below wants to see
157 * static_key_enabled(&key) for jumps to be updated properly.
158 */
159 if (static_key_fast_inc_not_disabled(key))
160 return true;
161
162 guard(mutex)(&jump_label_mutex);
163 /* Try to mark it as 'enabling in progress. */
164 if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
165 jump_label_update(key);
166 /*
167 * Ensure that when static_key_fast_inc_not_disabled() or
168 * static_key_dec_not_one() observe the positive value,
169 * they must also observe all the text changes.
170 */
171 atomic_set_release(&key->enabled, 1);
172 } else {
173 /*
174 * While holding the mutex this should never observe
175 * anything else than a value >= 1 and succeed
176 */
177 if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
178 return false;
179 }
180 return true;
181 }
182
static_key_slow_inc(struct static_key * key)183 bool static_key_slow_inc(struct static_key *key)
184 {
185 bool ret;
186
187 cpus_read_lock();
188 ret = static_key_slow_inc_cpuslocked(key);
189 cpus_read_unlock();
190 return ret;
191 }
192 EXPORT_SYMBOL_GPL(static_key_slow_inc);
193
static_key_enable_cpuslocked(struct static_key * key)194 void static_key_enable_cpuslocked(struct static_key *key)
195 {
196 STATIC_KEY_CHECK_USE(key);
197 lockdep_assert_cpus_held();
198
199 if (atomic_read(&key->enabled) > 0) {
200 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
201 return;
202 }
203
204 jump_label_lock();
205 if (atomic_read(&key->enabled) == 0) {
206 atomic_set(&key->enabled, -1);
207 jump_label_update(key);
208 /*
209 * See static_key_slow_inc().
210 */
211 atomic_set_release(&key->enabled, 1);
212 }
213 jump_label_unlock();
214 }
215 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
216
static_key_enable(struct static_key * key)217 void static_key_enable(struct static_key *key)
218 {
219 cpus_read_lock();
220 static_key_enable_cpuslocked(key);
221 cpus_read_unlock();
222 }
223 EXPORT_SYMBOL_GPL(static_key_enable);
224
static_key_disable_cpuslocked(struct static_key * key)225 void static_key_disable_cpuslocked(struct static_key *key)
226 {
227 STATIC_KEY_CHECK_USE(key);
228 lockdep_assert_cpus_held();
229
230 if (atomic_read(&key->enabled) != 1) {
231 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
232 return;
233 }
234
235 jump_label_lock();
236 if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
237 jump_label_update(key);
238 jump_label_unlock();
239 }
240 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
241
static_key_disable(struct static_key * key)242 void static_key_disable(struct static_key *key)
243 {
244 cpus_read_lock();
245 static_key_disable_cpuslocked(key);
246 cpus_read_unlock();
247 }
248 EXPORT_SYMBOL_GPL(static_key_disable);
249
static_key_dec_not_one(struct static_key * key)250 static bool static_key_dec_not_one(struct static_key *key)
251 {
252 int v;
253
254 /*
255 * Go into the slow path if key::enabled is less than or equal than
256 * one. One is valid to shut down the key, anything less than one
257 * is an imbalance, which is handled at the call site.
258 *
259 * That includes the special case of '-1' which is set in
260 * static_key_slow_inc_cpuslocked(), but that's harmless as it is
261 * fully serialized in the slow path below. By the time this task
262 * acquires the jump label lock the value is back to one and the
263 * retry under the lock must succeed.
264 */
265 v = atomic_read(&key->enabled);
266 do {
267 /*
268 * Warn about the '-1' case though; since that means a
269 * decrement is concurrent with a first (0->1) increment. IOW
270 * people are trying to disable something that wasn't yet fully
271 * enabled. This suggests an ordering problem on the user side.
272 */
273 WARN_ON_ONCE(v < 0);
274
275 /*
276 * Warn about underflow, and lie about success in an attempt to
277 * not make things worse.
278 */
279 if (WARN_ON_ONCE(v == 0))
280 return true;
281
282 if (v <= 1)
283 return false;
284 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
285
286 return true;
287 }
288
__static_key_slow_dec_cpuslocked(struct static_key * key)289 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
290 {
291 lockdep_assert_cpus_held();
292 int val;
293
294 if (static_key_dec_not_one(key))
295 return;
296
297 guard(mutex)(&jump_label_mutex);
298 val = atomic_read(&key->enabled);
299 /*
300 * It should be impossible to observe -1 with jump_label_mutex held,
301 * see static_key_slow_inc_cpuslocked().
302 */
303 if (WARN_ON_ONCE(val == -1))
304 return;
305 /*
306 * Cannot already be 0, something went sideways.
307 */
308 if (WARN_ON_ONCE(val == 0))
309 return;
310
311 if (atomic_dec_and_test(&key->enabled))
312 jump_label_update(key);
313 }
314
__static_key_slow_dec(struct static_key * key)315 static void __static_key_slow_dec(struct static_key *key)
316 {
317 cpus_read_lock();
318 __static_key_slow_dec_cpuslocked(key);
319 cpus_read_unlock();
320 }
321
jump_label_update_timeout(struct work_struct * work)322 void jump_label_update_timeout(struct work_struct *work)
323 {
324 struct static_key_deferred *key =
325 container_of(work, struct static_key_deferred, work.work);
326 __static_key_slow_dec(&key->key);
327 }
328 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
329
static_key_slow_dec(struct static_key * key)330 void static_key_slow_dec(struct static_key *key)
331 {
332 STATIC_KEY_CHECK_USE(key);
333 __static_key_slow_dec(key);
334 }
335 EXPORT_SYMBOL_GPL(static_key_slow_dec);
336
static_key_slow_dec_cpuslocked(struct static_key * key)337 void static_key_slow_dec_cpuslocked(struct static_key *key)
338 {
339 STATIC_KEY_CHECK_USE(key);
340 __static_key_slow_dec_cpuslocked(key);
341 }
342
__static_key_slow_dec_deferred(struct static_key * key,struct delayed_work * work,unsigned long timeout)343 void __static_key_slow_dec_deferred(struct static_key *key,
344 struct delayed_work *work,
345 unsigned long timeout)
346 {
347 STATIC_KEY_CHECK_USE(key);
348
349 if (static_key_dec_not_one(key))
350 return;
351
352 schedule_delayed_work(work, timeout);
353 }
354 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
355
__static_key_deferred_flush(void * key,struct delayed_work * work)356 void __static_key_deferred_flush(void *key, struct delayed_work *work)
357 {
358 STATIC_KEY_CHECK_USE(key);
359 flush_delayed_work(work);
360 }
361 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
362
jump_label_rate_limit(struct static_key_deferred * key,unsigned long rl)363 void jump_label_rate_limit(struct static_key_deferred *key,
364 unsigned long rl)
365 {
366 STATIC_KEY_CHECK_USE(key);
367 key->timeout = rl;
368 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
369 }
370 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
371
addr_conflict(struct jump_entry * entry,void * start,void * end)372 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
373 {
374 if (jump_entry_code(entry) <= (unsigned long)end &&
375 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
376 return 1;
377
378 return 0;
379 }
380
__jump_label_text_reserved(struct jump_entry * iter_start,struct jump_entry * iter_stop,void * start,void * end,bool init)381 static int __jump_label_text_reserved(struct jump_entry *iter_start,
382 struct jump_entry *iter_stop, void *start, void *end, bool init)
383 {
384 struct jump_entry *iter;
385
386 iter = iter_start;
387 while (iter < iter_stop) {
388 if (init || !jump_entry_is_init(iter)) {
389 if (addr_conflict(iter, start, end))
390 return 1;
391 }
392 iter++;
393 }
394
395 return 0;
396 }
397
398 #ifndef arch_jump_label_transform_static
arch_jump_label_transform_static(struct jump_entry * entry,enum jump_label_type type)399 static void arch_jump_label_transform_static(struct jump_entry *entry,
400 enum jump_label_type type)
401 {
402 /* nothing to do on most architectures */
403 }
404 #endif
405
static_key_entries(struct static_key * key)406 static inline struct jump_entry *static_key_entries(struct static_key *key)
407 {
408 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
409 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
410 }
411
static_key_type(struct static_key * key)412 static inline bool static_key_type(struct static_key *key)
413 {
414 return key->type & JUMP_TYPE_TRUE;
415 }
416
static_key_linked(struct static_key * key)417 static inline bool static_key_linked(struct static_key *key)
418 {
419 return key->type & JUMP_TYPE_LINKED;
420 }
421
static_key_clear_linked(struct static_key * key)422 static inline void static_key_clear_linked(struct static_key *key)
423 {
424 key->type &= ~JUMP_TYPE_LINKED;
425 }
426
static_key_set_linked(struct static_key * key)427 static inline void static_key_set_linked(struct static_key *key)
428 {
429 key->type |= JUMP_TYPE_LINKED;
430 }
431
432 /***
433 * A 'struct static_key' uses a union such that it either points directly
434 * to a table of 'struct jump_entry' or to a linked list of modules which in
435 * turn point to 'struct jump_entry' tables.
436 *
437 * The two lower bits of the pointer are used to keep track of which pointer
438 * type is in use and to store the initial branch direction, we use an access
439 * function which preserves these bits.
440 */
static_key_set_entries(struct static_key * key,struct jump_entry * entries)441 static void static_key_set_entries(struct static_key *key,
442 struct jump_entry *entries)
443 {
444 unsigned long type;
445
446 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
447 type = key->type & JUMP_TYPE_MASK;
448 key->entries = entries;
449 key->type |= type;
450 }
451
jump_label_type(struct jump_entry * entry)452 static enum jump_label_type jump_label_type(struct jump_entry *entry)
453 {
454 struct static_key *key = jump_entry_key(entry);
455 bool enabled = static_key_enabled(key);
456 bool branch = jump_entry_is_branch(entry);
457
458 /* See the comment in linux/jump_label.h */
459 return enabled ^ branch;
460 }
461
jump_label_can_update(struct jump_entry * entry,bool init)462 static bool jump_label_can_update(struct jump_entry *entry, bool init)
463 {
464 /*
465 * Cannot update code that was in an init text area.
466 */
467 if (!init && jump_entry_is_init(entry))
468 return false;
469
470 if (!kernel_text_address(jump_entry_code(entry))) {
471 /*
472 * This skips patching built-in __exit, which
473 * is part of init_section_contains() but is
474 * not part of kernel_text_address().
475 *
476 * Skipping built-in __exit is fine since it
477 * will never be executed.
478 */
479 WARN_ONCE(!jump_entry_is_init(entry),
480 "can't patch jump_label at %pS",
481 (void *)jump_entry_code(entry));
482 return false;
483 }
484
485 return true;
486 }
487
488 #ifndef HAVE_JUMP_LABEL_BATCH
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop,bool init)489 static void __jump_label_update(struct static_key *key,
490 struct jump_entry *entry,
491 struct jump_entry *stop,
492 bool init)
493 {
494 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
495 if (jump_label_can_update(entry, init))
496 arch_jump_label_transform(entry, jump_label_type(entry));
497 }
498 }
499 #else
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop,bool init)500 static void __jump_label_update(struct static_key *key,
501 struct jump_entry *entry,
502 struct jump_entry *stop,
503 bool init)
504 {
505 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
506
507 if (!jump_label_can_update(entry, init))
508 continue;
509
510 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
511 /*
512 * Queue is full: Apply the current queue and try again.
513 */
514 arch_jump_label_transform_apply();
515 BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
516 }
517 }
518 arch_jump_label_transform_apply();
519 }
520 #endif
521
jump_label_init(void)522 void __init jump_label_init(void)
523 {
524 struct jump_entry *iter_start = __start___jump_table;
525 struct jump_entry *iter_stop = __stop___jump_table;
526 struct static_key *key = NULL;
527 struct jump_entry *iter;
528
529 /*
530 * Since we are initializing the static_key.enabled field with
531 * with the 'raw' int values (to avoid pulling in atomic.h) in
532 * jump_label.h, let's make sure that is safe. There are only two
533 * cases to check since we initialize to 0 or 1.
534 */
535 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
536 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
537
538 if (static_key_initialized)
539 return;
540
541 cpus_read_lock();
542 jump_label_lock();
543 jump_label_sort_entries(iter_start, iter_stop);
544
545 for (iter = iter_start; iter < iter_stop; iter++) {
546 struct static_key *iterk;
547 bool in_init;
548
549 /* rewrite NOPs */
550 if (jump_label_type(iter) == JUMP_LABEL_NOP)
551 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
552
553 in_init = init_section_contains((void *)jump_entry_code(iter), 1);
554 jump_entry_set_init(iter, in_init);
555
556 iterk = jump_entry_key(iter);
557 if (iterk == key)
558 continue;
559
560 key = iterk;
561 static_key_set_entries(key, iter);
562 }
563 static_key_initialized = true;
564 jump_label_unlock();
565 cpus_read_unlock();
566 }
567
568 #ifdef CONFIG_MODULES
569
jump_label_init_type(struct jump_entry * entry)570 enum jump_label_type jump_label_init_type(struct jump_entry *entry)
571 {
572 struct static_key *key = jump_entry_key(entry);
573 bool type = static_key_type(key);
574 bool branch = jump_entry_is_branch(entry);
575
576 /* See the comment in linux/jump_label.h */
577 return type ^ branch;
578 }
579
580 struct static_key_mod {
581 struct static_key_mod *next;
582 struct jump_entry *entries;
583 struct module *mod;
584 };
585
static_key_mod(struct static_key * key)586 static inline struct static_key_mod *static_key_mod(struct static_key *key)
587 {
588 WARN_ON_ONCE(!static_key_linked(key));
589 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
590 }
591
592 /***
593 * key->type and key->next are the same via union.
594 * This sets key->next and preserves the type bits.
595 *
596 * See additional comments above static_key_set_entries().
597 */
static_key_set_mod(struct static_key * key,struct static_key_mod * mod)598 static void static_key_set_mod(struct static_key *key,
599 struct static_key_mod *mod)
600 {
601 unsigned long type;
602
603 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
604 type = key->type & JUMP_TYPE_MASK;
605 key->next = mod;
606 key->type |= type;
607 }
608
__jump_label_mod_text_reserved(void * start,void * end)609 static int __jump_label_mod_text_reserved(void *start, void *end)
610 {
611 struct module *mod;
612 int ret;
613
614 preempt_disable();
615 mod = __module_text_address((unsigned long)start);
616 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
617 if (!try_module_get(mod))
618 mod = NULL;
619 preempt_enable();
620
621 if (!mod)
622 return 0;
623
624 ret = __jump_label_text_reserved(mod->jump_entries,
625 mod->jump_entries + mod->num_jump_entries,
626 start, end, mod->state == MODULE_STATE_COMING);
627
628 module_put(mod);
629
630 return ret;
631 }
632
__jump_label_mod_update(struct static_key * key)633 static void __jump_label_mod_update(struct static_key *key)
634 {
635 struct static_key_mod *mod;
636
637 for (mod = static_key_mod(key); mod; mod = mod->next) {
638 struct jump_entry *stop;
639 struct module *m;
640
641 /*
642 * NULL if the static_key is defined in a module
643 * that does not use it
644 */
645 if (!mod->entries)
646 continue;
647
648 m = mod->mod;
649 if (!m)
650 stop = __stop___jump_table;
651 else
652 stop = m->jump_entries + m->num_jump_entries;
653 __jump_label_update(key, mod->entries, stop,
654 m && m->state == MODULE_STATE_COMING);
655 }
656 }
657
jump_label_add_module(struct module * mod)658 static int jump_label_add_module(struct module *mod)
659 {
660 struct jump_entry *iter_start = mod->jump_entries;
661 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
662 struct jump_entry *iter;
663 struct static_key *key = NULL;
664 struct static_key_mod *jlm, *jlm2;
665
666 /* if the module doesn't have jump label entries, just return */
667 if (iter_start == iter_stop)
668 return 0;
669
670 jump_label_sort_entries(iter_start, iter_stop);
671
672 for (iter = iter_start; iter < iter_stop; iter++) {
673 struct static_key *iterk;
674 bool in_init;
675
676 in_init = within_module_init(jump_entry_code(iter), mod);
677 jump_entry_set_init(iter, in_init);
678
679 iterk = jump_entry_key(iter);
680 if (iterk == key)
681 continue;
682
683 key = iterk;
684 if (within_module((unsigned long)key, mod)) {
685 static_key_set_entries(key, iter);
686 continue;
687 }
688 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
689 if (!jlm)
690 return -ENOMEM;
691 if (!static_key_linked(key)) {
692 jlm2 = kzalloc(sizeof(struct static_key_mod),
693 GFP_KERNEL);
694 if (!jlm2) {
695 kfree(jlm);
696 return -ENOMEM;
697 }
698 preempt_disable();
699 jlm2->mod = __module_address((unsigned long)key);
700 preempt_enable();
701 jlm2->entries = static_key_entries(key);
702 jlm2->next = NULL;
703 static_key_set_mod(key, jlm2);
704 static_key_set_linked(key);
705 }
706 jlm->mod = mod;
707 jlm->entries = iter;
708 jlm->next = static_key_mod(key);
709 static_key_set_mod(key, jlm);
710 static_key_set_linked(key);
711
712 /* Only update if we've changed from our initial state */
713 if (jump_label_type(iter) != jump_label_init_type(iter))
714 __jump_label_update(key, iter, iter_stop, true);
715 }
716
717 return 0;
718 }
719
jump_label_del_module(struct module * mod)720 static void jump_label_del_module(struct module *mod)
721 {
722 struct jump_entry *iter_start = mod->jump_entries;
723 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
724 struct jump_entry *iter;
725 struct static_key *key = NULL;
726 struct static_key_mod *jlm, **prev;
727
728 for (iter = iter_start; iter < iter_stop; iter++) {
729 if (jump_entry_key(iter) == key)
730 continue;
731
732 key = jump_entry_key(iter);
733
734 if (within_module((unsigned long)key, mod))
735 continue;
736
737 /* No memory during module load */
738 if (WARN_ON(!static_key_linked(key)))
739 continue;
740
741 prev = &key->next;
742 jlm = static_key_mod(key);
743
744 while (jlm && jlm->mod != mod) {
745 prev = &jlm->next;
746 jlm = jlm->next;
747 }
748
749 /* No memory during module load */
750 if (WARN_ON(!jlm))
751 continue;
752
753 if (prev == &key->next)
754 static_key_set_mod(key, jlm->next);
755 else
756 *prev = jlm->next;
757
758 kfree(jlm);
759
760 jlm = static_key_mod(key);
761 /* if only one etry is left, fold it back into the static_key */
762 if (jlm->next == NULL) {
763 static_key_set_entries(key, jlm->entries);
764 static_key_clear_linked(key);
765 kfree(jlm);
766 }
767 }
768 }
769
770 static int
jump_label_module_notify(struct notifier_block * self,unsigned long val,void * data)771 jump_label_module_notify(struct notifier_block *self, unsigned long val,
772 void *data)
773 {
774 struct module *mod = data;
775 int ret = 0;
776
777 cpus_read_lock();
778 jump_label_lock();
779
780 switch (val) {
781 case MODULE_STATE_COMING:
782 ret = jump_label_add_module(mod);
783 if (ret) {
784 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
785 jump_label_del_module(mod);
786 }
787 break;
788 case MODULE_STATE_GOING:
789 jump_label_del_module(mod);
790 break;
791 }
792
793 jump_label_unlock();
794 cpus_read_unlock();
795
796 return notifier_from_errno(ret);
797 }
798
799 static struct notifier_block jump_label_module_nb = {
800 .notifier_call = jump_label_module_notify,
801 .priority = 1, /* higher than tracepoints */
802 };
803
jump_label_init_module(void)804 static __init int jump_label_init_module(void)
805 {
806 return register_module_notifier(&jump_label_module_nb);
807 }
808 early_initcall(jump_label_init_module);
809
810 #endif /* CONFIG_MODULES */
811
812 /***
813 * jump_label_text_reserved - check if addr range is reserved
814 * @start: start text addr
815 * @end: end text addr
816 *
817 * checks if the text addr located between @start and @end
818 * overlaps with any of the jump label patch addresses. Code
819 * that wants to modify kernel text should first verify that
820 * it does not overlap with any of the jump label addresses.
821 * Caller must hold jump_label_mutex.
822 *
823 * returns 1 if there is an overlap, 0 otherwise
824 */
jump_label_text_reserved(void * start,void * end)825 int jump_label_text_reserved(void *start, void *end)
826 {
827 bool init = system_state < SYSTEM_RUNNING;
828 int ret = __jump_label_text_reserved(__start___jump_table,
829 __stop___jump_table, start, end, init);
830
831 if (ret)
832 return ret;
833
834 #ifdef CONFIG_MODULES
835 ret = __jump_label_mod_text_reserved(start, end);
836 #endif
837 return ret;
838 }
839
jump_label_update(struct static_key * key)840 static void jump_label_update(struct static_key *key)
841 {
842 struct jump_entry *stop = __stop___jump_table;
843 bool init = system_state < SYSTEM_RUNNING;
844 struct jump_entry *entry;
845 #ifdef CONFIG_MODULES
846 struct module *mod;
847
848 if (static_key_linked(key)) {
849 __jump_label_mod_update(key);
850 return;
851 }
852
853 preempt_disable();
854 mod = __module_address((unsigned long)key);
855 if (mod) {
856 stop = mod->jump_entries + mod->num_jump_entries;
857 init = mod->state == MODULE_STATE_COMING;
858 }
859 preempt_enable();
860 #endif
861 entry = static_key_entries(key);
862 /* if there are no users, entry can be NULL */
863 if (entry)
864 __jump_label_update(key, entry, stop, init);
865 }
866
867 #ifdef CONFIG_STATIC_KEYS_SELFTEST
868 static DEFINE_STATIC_KEY_TRUE(sk_true);
869 static DEFINE_STATIC_KEY_FALSE(sk_false);
870
jump_label_test(void)871 static __init int jump_label_test(void)
872 {
873 int i;
874
875 for (i = 0; i < 2; i++) {
876 WARN_ON(static_key_enabled(&sk_true.key) != true);
877 WARN_ON(static_key_enabled(&sk_false.key) != false);
878
879 WARN_ON(!static_branch_likely(&sk_true));
880 WARN_ON(!static_branch_unlikely(&sk_true));
881 WARN_ON(static_branch_likely(&sk_false));
882 WARN_ON(static_branch_unlikely(&sk_false));
883
884 static_branch_disable(&sk_true);
885 static_branch_enable(&sk_false);
886
887 WARN_ON(static_key_enabled(&sk_true.key) == true);
888 WARN_ON(static_key_enabled(&sk_false.key) == false);
889
890 WARN_ON(static_branch_likely(&sk_true));
891 WARN_ON(static_branch_unlikely(&sk_true));
892 WARN_ON(!static_branch_likely(&sk_false));
893 WARN_ON(!static_branch_unlikely(&sk_false));
894
895 static_branch_enable(&sk_true);
896 static_branch_disable(&sk_false);
897 }
898
899 return 0;
900 }
901 early_initcall(jump_label_test);
902 #endif /* STATIC_KEYS_SELFTEST */
903