xref: /openbmc/linux/kernel/jump_label.c (revision a89988a6)
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18 
19 #ifdef HAVE_JUMP_LABEL
20 
21 /* mutex to protect coming/going of the the jump_label table */
22 static DEFINE_MUTEX(jump_label_mutex);
23 
24 void jump_label_lock(void)
25 {
26 	mutex_lock(&jump_label_mutex);
27 }
28 
29 void jump_label_unlock(void)
30 {
31 	mutex_unlock(&jump_label_mutex);
32 }
33 
34 static int jump_label_cmp(const void *a, const void *b)
35 {
36 	const struct jump_entry *jea = a;
37 	const struct jump_entry *jeb = b;
38 
39 	if (jea->key < jeb->key)
40 		return -1;
41 
42 	if (jea->key > jeb->key)
43 		return 1;
44 
45 	return 0;
46 }
47 
48 static void
49 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
50 {
51 	unsigned long size;
52 
53 	size = (((unsigned long)stop - (unsigned long)start)
54 					/ sizeof(struct jump_entry));
55 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
56 }
57 
58 static void jump_label_update(struct static_key *key);
59 
60 /*
61  * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
62  * The use of 'atomic_read()' requires atomic.h and its problematic for some
63  * kernel headers such as kernel.h and others. Since static_key_count() is not
64  * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
65  * to have it be a function here. Similarly, for 'static_key_enable()' and
66  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
67  * to be included from most/all places for HAVE_JUMP_LABEL.
68  */
69 int static_key_count(struct static_key *key)
70 {
71 	/*
72 	 * -1 means the first static_key_slow_inc() is in progress.
73 	 *  static_key_enabled() must return true, so return 1 here.
74 	 */
75 	int n = atomic_read(&key->enabled);
76 
77 	return n >= 0 ? n : 1;
78 }
79 EXPORT_SYMBOL_GPL(static_key_count);
80 
81 void static_key_enable(struct static_key *key)
82 {
83 	int count = static_key_count(key);
84 
85 	WARN_ON_ONCE(count < 0 || count > 1);
86 
87 	if (!count)
88 		static_key_slow_inc(key);
89 }
90 EXPORT_SYMBOL_GPL(static_key_enable);
91 
92 void static_key_disable(struct static_key *key)
93 {
94 	int count = static_key_count(key);
95 
96 	WARN_ON_ONCE(count < 0 || count > 1);
97 
98 	if (count)
99 		static_key_slow_dec(key);
100 }
101 EXPORT_SYMBOL_GPL(static_key_disable);
102 
103 void static_key_slow_inc(struct static_key *key)
104 {
105 	int v, v1;
106 
107 	STATIC_KEY_CHECK_USE();
108 
109 	/*
110 	 * Careful if we get concurrent static_key_slow_inc() calls;
111 	 * later calls must wait for the first one to _finish_ the
112 	 * jump_label_update() process.  At the same time, however,
113 	 * the jump_label_update() call below wants to see
114 	 * static_key_enabled(&key) for jumps to be updated properly.
115 	 *
116 	 * So give a special meaning to negative key->enabled: it sends
117 	 * static_key_slow_inc() down the slow path, and it is non-zero
118 	 * so it counts as "enabled" in jump_label_update().  Note that
119 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
120 	 */
121 	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
122 		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
123 		if (likely(v1 == v))
124 			return;
125 	}
126 
127 	jump_label_lock();
128 	if (atomic_read(&key->enabled) == 0) {
129 		atomic_set(&key->enabled, -1);
130 		jump_label_update(key);
131 		atomic_set(&key->enabled, 1);
132 	} else {
133 		atomic_inc(&key->enabled);
134 	}
135 	jump_label_unlock();
136 }
137 EXPORT_SYMBOL_GPL(static_key_slow_inc);
138 
139 static void __static_key_slow_dec(struct static_key *key,
140 		unsigned long rate_limit, struct delayed_work *work)
141 {
142 	/*
143 	 * The negative count check is valid even when a negative
144 	 * key->enabled is in use by static_key_slow_inc(); a
145 	 * __static_key_slow_dec() before the first static_key_slow_inc()
146 	 * returns is unbalanced, because all other static_key_slow_inc()
147 	 * instances block while the update is in progress.
148 	 */
149 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
150 		WARN(atomic_read(&key->enabled) < 0,
151 		     "jump label: negative count!\n");
152 		return;
153 	}
154 
155 	if (rate_limit) {
156 		atomic_inc(&key->enabled);
157 		schedule_delayed_work(work, rate_limit);
158 	} else {
159 		jump_label_update(key);
160 	}
161 	jump_label_unlock();
162 }
163 
164 static void jump_label_update_timeout(struct work_struct *work)
165 {
166 	struct static_key_deferred *key =
167 		container_of(work, struct static_key_deferred, work.work);
168 	__static_key_slow_dec(&key->key, 0, NULL);
169 }
170 
171 void static_key_slow_dec(struct static_key *key)
172 {
173 	STATIC_KEY_CHECK_USE();
174 	__static_key_slow_dec(key, 0, NULL);
175 }
176 EXPORT_SYMBOL_GPL(static_key_slow_dec);
177 
178 void static_key_slow_dec_deferred(struct static_key_deferred *key)
179 {
180 	STATIC_KEY_CHECK_USE();
181 	__static_key_slow_dec(&key->key, key->timeout, &key->work);
182 }
183 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
184 
185 void static_key_deferred_flush(struct static_key_deferred *key)
186 {
187 	STATIC_KEY_CHECK_USE();
188 	flush_delayed_work(&key->work);
189 }
190 EXPORT_SYMBOL_GPL(static_key_deferred_flush);
191 
192 void jump_label_rate_limit(struct static_key_deferred *key,
193 		unsigned long rl)
194 {
195 	STATIC_KEY_CHECK_USE();
196 	key->timeout = rl;
197 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
198 }
199 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
200 
201 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
202 {
203 	if (entry->code <= (unsigned long)end &&
204 		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
205 		return 1;
206 
207 	return 0;
208 }
209 
210 static int __jump_label_text_reserved(struct jump_entry *iter_start,
211 		struct jump_entry *iter_stop, void *start, void *end)
212 {
213 	struct jump_entry *iter;
214 
215 	iter = iter_start;
216 	while (iter < iter_stop) {
217 		if (addr_conflict(iter, start, end))
218 			return 1;
219 		iter++;
220 	}
221 
222 	return 0;
223 }
224 
225 /*
226  * Update code which is definitely not currently executing.
227  * Architectures which need heavyweight synchronization to modify
228  * running code can override this to make the non-live update case
229  * cheaper.
230  */
231 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
232 					    enum jump_label_type type)
233 {
234 	arch_jump_label_transform(entry, type);
235 }
236 
237 static inline struct jump_entry *static_key_entries(struct static_key *key)
238 {
239 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
240 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
241 }
242 
243 static inline bool static_key_type(struct static_key *key)
244 {
245 	return key->type & JUMP_TYPE_TRUE;
246 }
247 
248 static inline bool static_key_linked(struct static_key *key)
249 {
250 	return key->type & JUMP_TYPE_LINKED;
251 }
252 
253 static inline void static_key_clear_linked(struct static_key *key)
254 {
255 	key->type &= ~JUMP_TYPE_LINKED;
256 }
257 
258 static inline void static_key_set_linked(struct static_key *key)
259 {
260 	key->type |= JUMP_TYPE_LINKED;
261 }
262 
263 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
264 {
265 	return (struct static_key *)((unsigned long)entry->key & ~1UL);
266 }
267 
268 static bool jump_entry_branch(struct jump_entry *entry)
269 {
270 	return (unsigned long)entry->key & 1UL;
271 }
272 
273 /***
274  * A 'struct static_key' uses a union such that it either points directly
275  * to a table of 'struct jump_entry' or to a linked list of modules which in
276  * turn point to 'struct jump_entry' tables.
277  *
278  * The two lower bits of the pointer are used to keep track of which pointer
279  * type is in use and to store the initial branch direction, we use an access
280  * function which preserves these bits.
281  */
282 static void static_key_set_entries(struct static_key *key,
283 				   struct jump_entry *entries)
284 {
285 	unsigned long type;
286 
287 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
288 	type = key->type & JUMP_TYPE_MASK;
289 	key->entries = entries;
290 	key->type |= type;
291 }
292 
293 static enum jump_label_type jump_label_type(struct jump_entry *entry)
294 {
295 	struct static_key *key = jump_entry_key(entry);
296 	bool enabled = static_key_enabled(key);
297 	bool branch = jump_entry_branch(entry);
298 
299 	/* See the comment in linux/jump_label.h */
300 	return enabled ^ branch;
301 }
302 
303 static void __jump_label_update(struct static_key *key,
304 				struct jump_entry *entry,
305 				struct jump_entry *stop)
306 {
307 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
308 		/*
309 		 * entry->code set to 0 invalidates module init text sections
310 		 * kernel_text_address() verifies we are not in core kernel
311 		 * init code, see jump_label_invalidate_module_init().
312 		 */
313 		if (entry->code && kernel_text_address(entry->code))
314 			arch_jump_label_transform(entry, jump_label_type(entry));
315 	}
316 }
317 
318 void __init jump_label_init(void)
319 {
320 	struct jump_entry *iter_start = __start___jump_table;
321 	struct jump_entry *iter_stop = __stop___jump_table;
322 	struct static_key *key = NULL;
323 	struct jump_entry *iter;
324 
325 	/*
326 	 * Since we are initializing the static_key.enabled field with
327 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
328 	 * jump_label.h, let's make sure that is safe. There are only two
329 	 * cases to check since we initialize to 0 or 1.
330 	 */
331 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
332 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
333 
334 	if (static_key_initialized)
335 		return;
336 
337 	jump_label_lock();
338 	jump_label_sort_entries(iter_start, iter_stop);
339 
340 	for (iter = iter_start; iter < iter_stop; iter++) {
341 		struct static_key *iterk;
342 
343 		/* rewrite NOPs */
344 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
345 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
346 
347 		iterk = jump_entry_key(iter);
348 		if (iterk == key)
349 			continue;
350 
351 		key = iterk;
352 		static_key_set_entries(key, iter);
353 	}
354 	static_key_initialized = true;
355 	jump_label_unlock();
356 }
357 
358 #ifdef CONFIG_MODULES
359 
360 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
361 {
362 	struct static_key *key = jump_entry_key(entry);
363 	bool type = static_key_type(key);
364 	bool branch = jump_entry_branch(entry);
365 
366 	/* See the comment in linux/jump_label.h */
367 	return type ^ branch;
368 }
369 
370 struct static_key_mod {
371 	struct static_key_mod *next;
372 	struct jump_entry *entries;
373 	struct module *mod;
374 };
375 
376 static inline struct static_key_mod *static_key_mod(struct static_key *key)
377 {
378 	WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
379 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
380 }
381 
382 /***
383  * key->type and key->next are the same via union.
384  * This sets key->next and preserves the type bits.
385  *
386  * See additional comments above static_key_set_entries().
387  */
388 static void static_key_set_mod(struct static_key *key,
389 			       struct static_key_mod *mod)
390 {
391 	unsigned long type;
392 
393 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
394 	type = key->type & JUMP_TYPE_MASK;
395 	key->next = mod;
396 	key->type |= type;
397 }
398 
399 static int __jump_label_mod_text_reserved(void *start, void *end)
400 {
401 	struct module *mod;
402 
403 	preempt_disable();
404 	mod = __module_text_address((unsigned long)start);
405 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
406 	preempt_enable();
407 
408 	if (!mod)
409 		return 0;
410 
411 
412 	return __jump_label_text_reserved(mod->jump_entries,
413 				mod->jump_entries + mod->num_jump_entries,
414 				start, end);
415 }
416 
417 static void __jump_label_mod_update(struct static_key *key)
418 {
419 	struct static_key_mod *mod;
420 
421 	for (mod = static_key_mod(key); mod; mod = mod->next) {
422 		struct jump_entry *stop;
423 		struct module *m;
424 
425 		/*
426 		 * NULL if the static_key is defined in a module
427 		 * that does not use it
428 		 */
429 		if (!mod->entries)
430 			continue;
431 
432 		m = mod->mod;
433 		if (!m)
434 			stop = __stop___jump_table;
435 		else
436 			stop = m->jump_entries + m->num_jump_entries;
437 		__jump_label_update(key, mod->entries, stop);
438 	}
439 }
440 
441 /***
442  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
443  * @mod: module to patch
444  *
445  * Allow for run-time selection of the optimal nops. Before the module
446  * loads patch these with arch_get_jump_label_nop(), which is specified by
447  * the arch specific jump label code.
448  */
449 void jump_label_apply_nops(struct module *mod)
450 {
451 	struct jump_entry *iter_start = mod->jump_entries;
452 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
453 	struct jump_entry *iter;
454 
455 	/* if the module doesn't have jump label entries, just return */
456 	if (iter_start == iter_stop)
457 		return;
458 
459 	for (iter = iter_start; iter < iter_stop; iter++) {
460 		/* Only write NOPs for arch_branch_static(). */
461 		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
462 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
463 	}
464 }
465 
466 static int jump_label_add_module(struct module *mod)
467 {
468 	struct jump_entry *iter_start = mod->jump_entries;
469 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
470 	struct jump_entry *iter;
471 	struct static_key *key = NULL;
472 	struct static_key_mod *jlm, *jlm2;
473 
474 	/* if the module doesn't have jump label entries, just return */
475 	if (iter_start == iter_stop)
476 		return 0;
477 
478 	jump_label_sort_entries(iter_start, iter_stop);
479 
480 	for (iter = iter_start; iter < iter_stop; iter++) {
481 		struct static_key *iterk;
482 
483 		iterk = jump_entry_key(iter);
484 		if (iterk == key)
485 			continue;
486 
487 		key = iterk;
488 		if (within_module(iter->key, mod)) {
489 			static_key_set_entries(key, iter);
490 			continue;
491 		}
492 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
493 		if (!jlm)
494 			return -ENOMEM;
495 		if (!static_key_linked(key)) {
496 			jlm2 = kzalloc(sizeof(struct static_key_mod),
497 				       GFP_KERNEL);
498 			if (!jlm2) {
499 				kfree(jlm);
500 				return -ENOMEM;
501 			}
502 			preempt_disable();
503 			jlm2->mod = __module_address((unsigned long)key);
504 			preempt_enable();
505 			jlm2->entries = static_key_entries(key);
506 			jlm2->next = NULL;
507 			static_key_set_mod(key, jlm2);
508 			static_key_set_linked(key);
509 		}
510 		jlm->mod = mod;
511 		jlm->entries = iter;
512 		jlm->next = static_key_mod(key);
513 		static_key_set_mod(key, jlm);
514 		static_key_set_linked(key);
515 
516 		/* Only update if we've changed from our initial state */
517 		if (jump_label_type(iter) != jump_label_init_type(iter))
518 			__jump_label_update(key, iter, iter_stop);
519 	}
520 
521 	return 0;
522 }
523 
524 static void jump_label_del_module(struct module *mod)
525 {
526 	struct jump_entry *iter_start = mod->jump_entries;
527 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
528 	struct jump_entry *iter;
529 	struct static_key *key = NULL;
530 	struct static_key_mod *jlm, **prev;
531 
532 	for (iter = iter_start; iter < iter_stop; iter++) {
533 		if (jump_entry_key(iter) == key)
534 			continue;
535 
536 		key = jump_entry_key(iter);
537 
538 		if (within_module(iter->key, mod))
539 			continue;
540 
541 		/* No memory during module load */
542 		if (WARN_ON(!static_key_linked(key)))
543 			continue;
544 
545 		prev = &key->next;
546 		jlm = static_key_mod(key);
547 
548 		while (jlm && jlm->mod != mod) {
549 			prev = &jlm->next;
550 			jlm = jlm->next;
551 		}
552 
553 		/* No memory during module load */
554 		if (WARN_ON(!jlm))
555 			continue;
556 
557 		if (prev == &key->next)
558 			static_key_set_mod(key, jlm->next);
559 		else
560 			*prev = jlm->next;
561 
562 		kfree(jlm);
563 
564 		jlm = static_key_mod(key);
565 		/* if only one etry is left, fold it back into the static_key */
566 		if (jlm->next == NULL) {
567 			static_key_set_entries(key, jlm->entries);
568 			static_key_clear_linked(key);
569 			kfree(jlm);
570 		}
571 	}
572 }
573 
574 static void jump_label_invalidate_module_init(struct module *mod)
575 {
576 	struct jump_entry *iter_start = mod->jump_entries;
577 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
578 	struct jump_entry *iter;
579 
580 	for (iter = iter_start; iter < iter_stop; iter++) {
581 		if (within_module_init(iter->code, mod))
582 			iter->code = 0;
583 	}
584 }
585 
586 static int
587 jump_label_module_notify(struct notifier_block *self, unsigned long val,
588 			 void *data)
589 {
590 	struct module *mod = data;
591 	int ret = 0;
592 
593 	switch (val) {
594 	case MODULE_STATE_COMING:
595 		jump_label_lock();
596 		ret = jump_label_add_module(mod);
597 		if (ret) {
598 			WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
599 			jump_label_del_module(mod);
600 		}
601 		jump_label_unlock();
602 		break;
603 	case MODULE_STATE_GOING:
604 		jump_label_lock();
605 		jump_label_del_module(mod);
606 		jump_label_unlock();
607 		break;
608 	case MODULE_STATE_LIVE:
609 		jump_label_lock();
610 		jump_label_invalidate_module_init(mod);
611 		jump_label_unlock();
612 		break;
613 	}
614 
615 	return notifier_from_errno(ret);
616 }
617 
618 static struct notifier_block jump_label_module_nb = {
619 	.notifier_call = jump_label_module_notify,
620 	.priority = 1, /* higher than tracepoints */
621 };
622 
623 static __init int jump_label_init_module(void)
624 {
625 	return register_module_notifier(&jump_label_module_nb);
626 }
627 early_initcall(jump_label_init_module);
628 
629 #endif /* CONFIG_MODULES */
630 
631 /***
632  * jump_label_text_reserved - check if addr range is reserved
633  * @start: start text addr
634  * @end: end text addr
635  *
636  * checks if the text addr located between @start and @end
637  * overlaps with any of the jump label patch addresses. Code
638  * that wants to modify kernel text should first verify that
639  * it does not overlap with any of the jump label addresses.
640  * Caller must hold jump_label_mutex.
641  *
642  * returns 1 if there is an overlap, 0 otherwise
643  */
644 int jump_label_text_reserved(void *start, void *end)
645 {
646 	int ret = __jump_label_text_reserved(__start___jump_table,
647 			__stop___jump_table, start, end);
648 
649 	if (ret)
650 		return ret;
651 
652 #ifdef CONFIG_MODULES
653 	ret = __jump_label_mod_text_reserved(start, end);
654 #endif
655 	return ret;
656 }
657 
658 static void jump_label_update(struct static_key *key)
659 {
660 	struct jump_entry *stop = __stop___jump_table;
661 	struct jump_entry *entry;
662 #ifdef CONFIG_MODULES
663 	struct module *mod;
664 
665 	if (static_key_linked(key)) {
666 		__jump_label_mod_update(key);
667 		return;
668 	}
669 
670 	preempt_disable();
671 	mod = __module_address((unsigned long)key);
672 	if (mod)
673 		stop = mod->jump_entries + mod->num_jump_entries;
674 	preempt_enable();
675 #endif
676 	entry = static_key_entries(key);
677 	/* if there are no users, entry can be NULL */
678 	if (entry)
679 		__jump_label_update(key, entry, stop);
680 }
681 
682 #ifdef CONFIG_STATIC_KEYS_SELFTEST
683 static DEFINE_STATIC_KEY_TRUE(sk_true);
684 static DEFINE_STATIC_KEY_FALSE(sk_false);
685 
686 static __init int jump_label_test(void)
687 {
688 	int i;
689 
690 	for (i = 0; i < 2; i++) {
691 		WARN_ON(static_key_enabled(&sk_true.key) != true);
692 		WARN_ON(static_key_enabled(&sk_false.key) != false);
693 
694 		WARN_ON(!static_branch_likely(&sk_true));
695 		WARN_ON(!static_branch_unlikely(&sk_true));
696 		WARN_ON(static_branch_likely(&sk_false));
697 		WARN_ON(static_branch_unlikely(&sk_false));
698 
699 		static_branch_disable(&sk_true);
700 		static_branch_enable(&sk_false);
701 
702 		WARN_ON(static_key_enabled(&sk_true.key) == true);
703 		WARN_ON(static_key_enabled(&sk_false.key) == false);
704 
705 		WARN_ON(static_branch_likely(&sk_true));
706 		WARN_ON(static_branch_unlikely(&sk_true));
707 		WARN_ON(!static_branch_likely(&sk_false));
708 		WARN_ON(!static_branch_unlikely(&sk_false));
709 
710 		static_branch_enable(&sk_true);
711 		static_branch_disable(&sk_false);
712 	}
713 
714 	return 0;
715 }
716 late_initcall(jump_label_test);
717 #endif /* STATIC_KEYS_SELFTEST */
718 
719 #endif /* HAVE_JUMP_LABEL */
720