Lines Matching +full:key +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
41 * Entrires are sorted by key. in jump_label_cmp()
44 return -1; in jump_label_cmp()
55 return -1; in jump_label_cmp()
65 long delta = (unsigned long)a - (unsigned long)b; in jump_label_swap()
70 jea->code = jeb->code - delta; in jump_label_swap()
71 jea->target = jeb->target - delta; in jump_label_swap()
72 jea->key = jeb->key - delta; in jump_label_swap()
74 jeb->code = tmp.code + delta; in jump_label_swap()
75 jeb->target = tmp.target + delta; in jump_label_swap()
76 jeb->key = tmp.key + delta; in jump_label_swap()
88 size = (((unsigned long)stop - (unsigned long)start) in jump_label_sort_entries()
93 static void jump_label_update(struct static_key *key);
104 int static_key_count(struct static_key *key) in static_key_count() argument
107 * -1 means the first static_key_slow_inc() is in progress. in static_key_count()
110 int n = atomic_read(&key->enabled); in static_key_count()
117 * static_key_fast_inc_not_disabled - adds a user for a static key
118 * @key: static key that must be already enabled
120 * The caller must make sure that the static key can't get disabled while
122 * an already enabled static key.
127 bool static_key_fast_inc_not_disabled(struct static_key *key) in static_key_fast_inc_not_disabled() argument
131 STATIC_KEY_CHECK_USE(key); in static_key_fast_inc_not_disabled()
133 * Negative key->enabled has a special meaning: it sends in static_key_fast_inc_not_disabled()
134 * static_key_slow_inc/dec() down the slow path, and it is non-zero in static_key_fast_inc_not_disabled()
138 v = atomic_read(&key->enabled); in static_key_fast_inc_not_disabled()
142 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))); in static_key_fast_inc_not_disabled()
148 bool static_key_slow_inc_cpuslocked(struct static_key *key) in static_key_slow_inc_cpuslocked() argument
157 * static_key_enabled(&key) for jumps to be updated properly. in static_key_slow_inc_cpuslocked()
159 if (static_key_fast_inc_not_disabled(key)) in static_key_slow_inc_cpuslocked()
164 if (!atomic_cmpxchg(&key->enabled, 0, -1)) { in static_key_slow_inc_cpuslocked()
165 jump_label_update(key); in static_key_slow_inc_cpuslocked()
171 atomic_set_release(&key->enabled, 1); in static_key_slow_inc_cpuslocked()
177 if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) in static_key_slow_inc_cpuslocked()
183 bool static_key_slow_inc(struct static_key *key) in static_key_slow_inc() argument
188 ret = static_key_slow_inc_cpuslocked(key); in static_key_slow_inc()
194 void static_key_enable_cpuslocked(struct static_key *key) in static_key_enable_cpuslocked() argument
196 STATIC_KEY_CHECK_USE(key); in static_key_enable_cpuslocked()
199 if (atomic_read(&key->enabled) > 0) { in static_key_enable_cpuslocked()
200 WARN_ON_ONCE(atomic_read(&key->enabled) != 1); in static_key_enable_cpuslocked()
205 if (atomic_read(&key->enabled) == 0) { in static_key_enable_cpuslocked()
206 atomic_set(&key->enabled, -1); in static_key_enable_cpuslocked()
207 jump_label_update(key); in static_key_enable_cpuslocked()
211 atomic_set_release(&key->enabled, 1); in static_key_enable_cpuslocked()
217 void static_key_enable(struct static_key *key) in static_key_enable() argument
220 static_key_enable_cpuslocked(key); in static_key_enable()
225 void static_key_disable_cpuslocked(struct static_key *key) in static_key_disable_cpuslocked() argument
227 STATIC_KEY_CHECK_USE(key); in static_key_disable_cpuslocked()
230 if (atomic_read(&key->enabled) != 1) { in static_key_disable_cpuslocked()
231 WARN_ON_ONCE(atomic_read(&key->enabled) != 0); in static_key_disable_cpuslocked()
236 if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) in static_key_disable_cpuslocked()
237 jump_label_update(key); in static_key_disable_cpuslocked()
242 void static_key_disable(struct static_key *key) in static_key_disable() argument
245 static_key_disable_cpuslocked(key); in static_key_disable()
250 static bool static_key_dec_not_one(struct static_key *key) in static_key_dec_not_one() argument
255 * Go into the slow path if key::enabled is less than or equal than in static_key_dec_not_one()
256 * one. One is valid to shut down the key, anything less than one in static_key_dec_not_one()
259 * That includes the special case of '-1' which is set in in static_key_dec_not_one()
265 v = atomic_read(&key->enabled); in static_key_dec_not_one()
268 * Warn about the '-1' case though; since that means a in static_key_dec_not_one()
269 * decrement is concurrent with a first (0->1) increment. IOW in static_key_dec_not_one()
284 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1))); in static_key_dec_not_one()
289 static void __static_key_slow_dec_cpuslocked(struct static_key *key) in __static_key_slow_dec_cpuslocked() argument
294 if (static_key_dec_not_one(key)) in __static_key_slow_dec_cpuslocked()
298 val = atomic_read(&key->enabled); in __static_key_slow_dec_cpuslocked()
300 * It should be impossible to observe -1 with jump_label_mutex held, in __static_key_slow_dec_cpuslocked()
303 if (WARN_ON_ONCE(val == -1)) in __static_key_slow_dec_cpuslocked()
311 if (atomic_dec_and_test(&key->enabled)) in __static_key_slow_dec_cpuslocked()
312 jump_label_update(key); in __static_key_slow_dec_cpuslocked()
315 static void __static_key_slow_dec(struct static_key *key) in __static_key_slow_dec() argument
318 __static_key_slow_dec_cpuslocked(key); in __static_key_slow_dec()
324 struct static_key_deferred *key = in jump_label_update_timeout() local
326 __static_key_slow_dec(&key->key); in jump_label_update_timeout()
330 void static_key_slow_dec(struct static_key *key) in static_key_slow_dec() argument
332 STATIC_KEY_CHECK_USE(key); in static_key_slow_dec()
333 __static_key_slow_dec(key); in static_key_slow_dec()
337 void static_key_slow_dec_cpuslocked(struct static_key *key) in static_key_slow_dec_cpuslocked() argument
339 STATIC_KEY_CHECK_USE(key); in static_key_slow_dec_cpuslocked()
340 __static_key_slow_dec_cpuslocked(key); in static_key_slow_dec_cpuslocked()
343 void __static_key_slow_dec_deferred(struct static_key *key, in __static_key_slow_dec_deferred() argument
347 STATIC_KEY_CHECK_USE(key); in __static_key_slow_dec_deferred()
349 if (static_key_dec_not_one(key)) in __static_key_slow_dec_deferred()
356 void __static_key_deferred_flush(void *key, struct delayed_work *work) in __static_key_deferred_flush() argument
358 STATIC_KEY_CHECK_USE(key); in __static_key_deferred_flush()
363 void jump_label_rate_limit(struct static_key_deferred *key, in jump_label_rate_limit() argument
366 STATIC_KEY_CHECK_USE(key); in jump_label_rate_limit()
367 key->timeout = rl; in jump_label_rate_limit()
368 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); in jump_label_rate_limit()
406 static inline struct jump_entry *static_key_entries(struct static_key *key) in static_key_entries() argument
408 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); in static_key_entries()
409 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); in static_key_entries()
412 static inline bool static_key_type(struct static_key *key) in static_key_type() argument
414 return key->type & JUMP_TYPE_TRUE; in static_key_type()
417 static inline bool static_key_linked(struct static_key *key) in static_key_linked() argument
419 return key->type & JUMP_TYPE_LINKED; in static_key_linked()
422 static inline void static_key_clear_linked(struct static_key *key) in static_key_clear_linked() argument
424 key->type &= ~JUMP_TYPE_LINKED; in static_key_clear_linked()
427 static inline void static_key_set_linked(struct static_key *key) in static_key_set_linked() argument
429 key->type |= JUMP_TYPE_LINKED; in static_key_set_linked()
441 static void static_key_set_entries(struct static_key *key, in static_key_set_entries() argument
447 type = key->type & JUMP_TYPE_MASK; in static_key_set_entries()
448 key->entries = entries; in static_key_set_entries()
449 key->type |= type; in static_key_set_entries()
454 struct static_key *key = jump_entry_key(entry); in jump_label_type() local
455 bool enabled = static_key_enabled(key); in jump_label_type()
472 * This skips patching built-in __exit, which in jump_label_can_update()
476 * Skipping built-in __exit is fine since it in jump_label_can_update()
489 static void __jump_label_update(struct static_key *key, in __jump_label_update() argument
494 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { in __jump_label_update()
500 static void __jump_label_update(struct static_key *key, in __jump_label_update() argument
505 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { in __jump_label_update()
526 struct static_key *key = NULL; in jump_label_init() local
557 if (iterk == key) in jump_label_init()
560 key = iterk; in jump_label_init()
561 static_key_set_entries(key, iter); in jump_label_init()
572 struct static_key *key = jump_entry_key(entry); in jump_label_init_type() local
573 bool type = static_key_type(key); in jump_label_init_type()
586 static inline struct static_key_mod *static_key_mod(struct static_key *key) in static_key_mod() argument
588 WARN_ON_ONCE(!static_key_linked(key)); in static_key_mod()
589 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); in static_key_mod()
593 * key->type and key->next are the same via union.
594 * This sets key->next and preserves the type bits.
598 static void static_key_set_mod(struct static_key *key, in static_key_set_mod() argument
604 type = key->type & JUMP_TYPE_MASK; in static_key_set_mod()
605 key->next = mod; in static_key_set_mod()
606 key->type |= type; in static_key_set_mod()
624 ret = __jump_label_text_reserved(mod->jump_entries, in __jump_label_mod_text_reserved()
625 mod->jump_entries + mod->num_jump_entries, in __jump_label_mod_text_reserved()
626 start, end, mod->state == MODULE_STATE_COMING); in __jump_label_mod_text_reserved()
633 static void __jump_label_mod_update(struct static_key *key) in __jump_label_mod_update() argument
637 for (mod = static_key_mod(key); mod; mod = mod->next) { in __jump_label_mod_update()
645 if (!mod->entries) in __jump_label_mod_update()
648 m = mod->mod; in __jump_label_mod_update()
652 stop = m->jump_entries + m->num_jump_entries; in __jump_label_mod_update()
653 __jump_label_update(key, mod->entries, stop, in __jump_label_mod_update()
654 m && m->state == MODULE_STATE_COMING); in __jump_label_mod_update()
660 struct jump_entry *iter_start = mod->jump_entries; in jump_label_add_module()
661 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; in jump_label_add_module()
663 struct static_key *key = NULL; in jump_label_add_module() local
680 if (iterk == key) in jump_label_add_module()
683 key = iterk; in jump_label_add_module()
684 if (within_module((unsigned long)key, mod)) { in jump_label_add_module()
685 static_key_set_entries(key, iter); in jump_label_add_module()
690 return -ENOMEM; in jump_label_add_module()
691 if (!static_key_linked(key)) { in jump_label_add_module()
696 return -ENOMEM; in jump_label_add_module()
699 jlm2->mod = __module_address((unsigned long)key); in jump_label_add_module()
701 jlm2->entries = static_key_entries(key); in jump_label_add_module()
702 jlm2->next = NULL; in jump_label_add_module()
703 static_key_set_mod(key, jlm2); in jump_label_add_module()
704 static_key_set_linked(key); in jump_label_add_module()
706 jlm->mod = mod; in jump_label_add_module()
707 jlm->entries = iter; in jump_label_add_module()
708 jlm->next = static_key_mod(key); in jump_label_add_module()
709 static_key_set_mod(key, jlm); in jump_label_add_module()
710 static_key_set_linked(key); in jump_label_add_module()
714 __jump_label_update(key, iter, iter_stop, true); in jump_label_add_module()
722 struct jump_entry *iter_start = mod->jump_entries; in jump_label_del_module()
723 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; in jump_label_del_module()
725 struct static_key *key = NULL; in jump_label_del_module() local
729 if (jump_entry_key(iter) == key) in jump_label_del_module()
732 key = jump_entry_key(iter); in jump_label_del_module()
734 if (within_module((unsigned long)key, mod)) in jump_label_del_module()
738 if (WARN_ON(!static_key_linked(key))) in jump_label_del_module()
741 prev = &key->next; in jump_label_del_module()
742 jlm = static_key_mod(key); in jump_label_del_module()
744 while (jlm && jlm->mod != mod) { in jump_label_del_module()
745 prev = &jlm->next; in jump_label_del_module()
746 jlm = jlm->next; in jump_label_del_module()
753 if (prev == &key->next) in jump_label_del_module()
754 static_key_set_mod(key, jlm->next); in jump_label_del_module()
756 *prev = jlm->next; in jump_label_del_module()
760 jlm = static_key_mod(key); in jump_label_del_module()
762 if (jlm->next == NULL) { in jump_label_del_module()
763 static_key_set_entries(key, jlm->entries); in jump_label_del_module()
764 static_key_clear_linked(key); in jump_label_del_module()
813 * jump_label_text_reserved - check if addr range is reserved
840 static void jump_label_update(struct static_key *key) in jump_label_update() argument
848 if (static_key_linked(key)) { in jump_label_update()
849 __jump_label_mod_update(key); in jump_label_update()
854 mod = __module_address((unsigned long)key); in jump_label_update()
856 stop = mod->jump_entries + mod->num_jump_entries; in jump_label_update()
857 init = mod->state == MODULE_STATE_COMING; in jump_label_update()
861 entry = static_key_entries(key); in jump_label_update()
864 __jump_label_update(key, entry, stop, init); in jump_label_update()
876 WARN_ON(static_key_enabled(&sk_true.key) != true); in jump_label_test()
877 WARN_ON(static_key_enabled(&sk_false.key) != false); in jump_label_test()
887 WARN_ON(static_key_enabled(&sk_true.key) == true); in jump_label_test()
888 WARN_ON(static_key_enabled(&sk_false.key) == false); in jump_label_test()