1 /* 2 * jump label support 3 * 4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> 5 * Copyright (C) 2011 Peter Zijlstra 6 * 7 */ 8 #include <linux/memory.h> 9 #include <linux/uaccess.h> 10 #include <linux/module.h> 11 #include <linux/list.h> 12 #include <linux/slab.h> 13 #include <linux/sort.h> 14 #include <linux/err.h> 15 #include <linux/static_key.h> 16 #include <linux/jump_label_ratelimit.h> 17 18 #ifdef HAVE_JUMP_LABEL 19 20 /* mutex to protect coming/going of the the jump_label table */ 21 static DEFINE_MUTEX(jump_label_mutex); 22 23 void jump_label_lock(void) 24 { 25 mutex_lock(&jump_label_mutex); 26 } 27 28 void jump_label_unlock(void) 29 { 30 mutex_unlock(&jump_label_mutex); 31 } 32 33 static int jump_label_cmp(const void *a, const void *b) 34 { 35 const struct jump_entry *jea = a; 36 const struct jump_entry *jeb = b; 37 38 if (jea->key < jeb->key) 39 return -1; 40 41 if (jea->key > jeb->key) 42 return 1; 43 44 return 0; 45 } 46 47 static void 48 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) 49 { 50 unsigned long size; 51 52 size = (((unsigned long)stop - (unsigned long)start) 53 / sizeof(struct jump_entry)); 54 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); 55 } 56 57 static void jump_label_update(struct static_key *key); 58 59 void static_key_slow_inc(struct static_key *key) 60 { 61 STATIC_KEY_CHECK_USE(); 62 if (atomic_inc_not_zero(&key->enabled)) 63 return; 64 65 jump_label_lock(); 66 if (atomic_inc_return(&key->enabled) == 1) 67 jump_label_update(key); 68 jump_label_unlock(); 69 } 70 EXPORT_SYMBOL_GPL(static_key_slow_inc); 71 72 static void __static_key_slow_dec(struct static_key *key, 73 unsigned long rate_limit, struct delayed_work *work) 74 { 75 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { 76 WARN(atomic_read(&key->enabled) < 0, 77 "jump label: negative count!\n"); 78 return; 79 } 80 81 if (rate_limit) { 82 atomic_inc(&key->enabled); 83 schedule_delayed_work(work, rate_limit); 84 } else { 85 jump_label_update(key); 86 } 87 jump_label_unlock(); 88 } 89 90 static void jump_label_update_timeout(struct work_struct *work) 91 { 92 struct static_key_deferred *key = 93 container_of(work, struct static_key_deferred, work.work); 94 __static_key_slow_dec(&key->key, 0, NULL); 95 } 96 97 void static_key_slow_dec(struct static_key *key) 98 { 99 STATIC_KEY_CHECK_USE(); 100 __static_key_slow_dec(key, 0, NULL); 101 } 102 EXPORT_SYMBOL_GPL(static_key_slow_dec); 103 104 void static_key_slow_dec_deferred(struct static_key_deferred *key) 105 { 106 STATIC_KEY_CHECK_USE(); 107 __static_key_slow_dec(&key->key, key->timeout, &key->work); 108 } 109 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); 110 111 void jump_label_rate_limit(struct static_key_deferred *key, 112 unsigned long rl) 113 { 114 STATIC_KEY_CHECK_USE(); 115 key->timeout = rl; 116 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); 117 } 118 EXPORT_SYMBOL_GPL(jump_label_rate_limit); 119 120 static int addr_conflict(struct jump_entry *entry, void *start, void *end) 121 { 122 if (entry->code <= (unsigned long)end && 123 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) 124 return 1; 125 126 return 0; 127 } 128 129 static int __jump_label_text_reserved(struct jump_entry *iter_start, 130 struct jump_entry *iter_stop, void *start, void *end) 131 { 132 struct jump_entry *iter; 133 134 iter = iter_start; 135 while (iter < iter_stop) { 136 if (addr_conflict(iter, start, end)) 137 return 1; 138 iter++; 139 } 140 141 return 0; 142 } 143 144 /* 145 * Update code which is definitely not currently executing. 146 * Architectures which need heavyweight synchronization to modify 147 * running code can override this to make the non-live update case 148 * cheaper. 149 */ 150 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, 151 enum jump_label_type type) 152 { 153 arch_jump_label_transform(entry, type); 154 } 155 156 static inline struct jump_entry *static_key_entries(struct static_key *key) 157 { 158 return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK); 159 } 160 161 static inline bool static_key_type(struct static_key *key) 162 { 163 return (unsigned long)key->entries & JUMP_TYPE_MASK; 164 } 165 166 static inline struct static_key *jump_entry_key(struct jump_entry *entry) 167 { 168 return (struct static_key *)((unsigned long)entry->key & ~1UL); 169 } 170 171 static bool jump_entry_branch(struct jump_entry *entry) 172 { 173 return (unsigned long)entry->key & 1UL; 174 } 175 176 static enum jump_label_type jump_label_type(struct jump_entry *entry) 177 { 178 struct static_key *key = jump_entry_key(entry); 179 bool enabled = static_key_enabled(key); 180 bool branch = jump_entry_branch(entry); 181 182 /* See the comment in linux/jump_label.h */ 183 return enabled ^ branch; 184 } 185 186 static void __jump_label_update(struct static_key *key, 187 struct jump_entry *entry, 188 struct jump_entry *stop) 189 { 190 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { 191 /* 192 * entry->code set to 0 invalidates module init text sections 193 * kernel_text_address() verifies we are not in core kernel 194 * init code, see jump_label_invalidate_module_init(). 195 */ 196 if (entry->code && kernel_text_address(entry->code)) 197 arch_jump_label_transform(entry, jump_label_type(entry)); 198 } 199 } 200 201 void __init jump_label_init(void) 202 { 203 struct jump_entry *iter_start = __start___jump_table; 204 struct jump_entry *iter_stop = __stop___jump_table; 205 struct static_key *key = NULL; 206 struct jump_entry *iter; 207 208 jump_label_lock(); 209 jump_label_sort_entries(iter_start, iter_stop); 210 211 for (iter = iter_start; iter < iter_stop; iter++) { 212 struct static_key *iterk; 213 214 /* rewrite NOPs */ 215 if (jump_label_type(iter) == JUMP_LABEL_NOP) 216 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); 217 218 iterk = jump_entry_key(iter); 219 if (iterk == key) 220 continue; 221 222 key = iterk; 223 /* 224 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. 225 */ 226 *((unsigned long *)&key->entries) += (unsigned long)iter; 227 #ifdef CONFIG_MODULES 228 key->next = NULL; 229 #endif 230 } 231 static_key_initialized = true; 232 jump_label_unlock(); 233 } 234 235 #ifdef CONFIG_MODULES 236 237 static enum jump_label_type jump_label_init_type(struct jump_entry *entry) 238 { 239 struct static_key *key = jump_entry_key(entry); 240 bool type = static_key_type(key); 241 bool branch = jump_entry_branch(entry); 242 243 /* See the comment in linux/jump_label.h */ 244 return type ^ branch; 245 } 246 247 struct static_key_mod { 248 struct static_key_mod *next; 249 struct jump_entry *entries; 250 struct module *mod; 251 }; 252 253 static int __jump_label_mod_text_reserved(void *start, void *end) 254 { 255 struct module *mod; 256 257 mod = __module_text_address((unsigned long)start); 258 if (!mod) 259 return 0; 260 261 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 262 263 return __jump_label_text_reserved(mod->jump_entries, 264 mod->jump_entries + mod->num_jump_entries, 265 start, end); 266 } 267 268 static void __jump_label_mod_update(struct static_key *key) 269 { 270 struct static_key_mod *mod; 271 272 for (mod = key->next; mod; mod = mod->next) { 273 struct module *m = mod->mod; 274 275 __jump_label_update(key, mod->entries, 276 m->jump_entries + m->num_jump_entries); 277 } 278 } 279 280 /*** 281 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() 282 * @mod: module to patch 283 * 284 * Allow for run-time selection of the optimal nops. Before the module 285 * loads patch these with arch_get_jump_label_nop(), which is specified by 286 * the arch specific jump label code. 287 */ 288 void jump_label_apply_nops(struct module *mod) 289 { 290 struct jump_entry *iter_start = mod->jump_entries; 291 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 292 struct jump_entry *iter; 293 294 /* if the module doesn't have jump label entries, just return */ 295 if (iter_start == iter_stop) 296 return; 297 298 for (iter = iter_start; iter < iter_stop; iter++) { 299 /* Only write NOPs for arch_branch_static(). */ 300 if (jump_label_init_type(iter) == JUMP_LABEL_NOP) 301 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); 302 } 303 } 304 305 static int jump_label_add_module(struct module *mod) 306 { 307 struct jump_entry *iter_start = mod->jump_entries; 308 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 309 struct jump_entry *iter; 310 struct static_key *key = NULL; 311 struct static_key_mod *jlm; 312 313 /* if the module doesn't have jump label entries, just return */ 314 if (iter_start == iter_stop) 315 return 0; 316 317 jump_label_sort_entries(iter_start, iter_stop); 318 319 for (iter = iter_start; iter < iter_stop; iter++) { 320 struct static_key *iterk; 321 322 iterk = jump_entry_key(iter); 323 if (iterk == key) 324 continue; 325 326 key = iterk; 327 if (within_module(iter->key, mod)) { 328 /* 329 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. 330 */ 331 *((unsigned long *)&key->entries) += (unsigned long)iter; 332 key->next = NULL; 333 continue; 334 } 335 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); 336 if (!jlm) 337 return -ENOMEM; 338 jlm->mod = mod; 339 jlm->entries = iter; 340 jlm->next = key->next; 341 key->next = jlm; 342 343 /* Only update if we've changed from our initial state */ 344 if (jump_label_type(iter) != jump_label_init_type(iter)) 345 __jump_label_update(key, iter, iter_stop); 346 } 347 348 return 0; 349 } 350 351 static void jump_label_del_module(struct module *mod) 352 { 353 struct jump_entry *iter_start = mod->jump_entries; 354 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 355 struct jump_entry *iter; 356 struct static_key *key = NULL; 357 struct static_key_mod *jlm, **prev; 358 359 for (iter = iter_start; iter < iter_stop; iter++) { 360 if (jump_entry_key(iter) == key) 361 continue; 362 363 key = jump_entry_key(iter); 364 365 if (within_module(iter->key, mod)) 366 continue; 367 368 prev = &key->next; 369 jlm = key->next; 370 371 while (jlm && jlm->mod != mod) { 372 prev = &jlm->next; 373 jlm = jlm->next; 374 } 375 376 if (jlm) { 377 *prev = jlm->next; 378 kfree(jlm); 379 } 380 } 381 } 382 383 static void jump_label_invalidate_module_init(struct module *mod) 384 { 385 struct jump_entry *iter_start = mod->jump_entries; 386 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; 387 struct jump_entry *iter; 388 389 for (iter = iter_start; iter < iter_stop; iter++) { 390 if (within_module_init(iter->code, mod)) 391 iter->code = 0; 392 } 393 } 394 395 static int 396 jump_label_module_notify(struct notifier_block *self, unsigned long val, 397 void *data) 398 { 399 struct module *mod = data; 400 int ret = 0; 401 402 switch (val) { 403 case MODULE_STATE_COMING: 404 jump_label_lock(); 405 ret = jump_label_add_module(mod); 406 if (ret) 407 jump_label_del_module(mod); 408 jump_label_unlock(); 409 break; 410 case MODULE_STATE_GOING: 411 jump_label_lock(); 412 jump_label_del_module(mod); 413 jump_label_unlock(); 414 break; 415 case MODULE_STATE_LIVE: 416 jump_label_lock(); 417 jump_label_invalidate_module_init(mod); 418 jump_label_unlock(); 419 break; 420 } 421 422 return notifier_from_errno(ret); 423 } 424 425 struct notifier_block jump_label_module_nb = { 426 .notifier_call = jump_label_module_notify, 427 .priority = 1, /* higher than tracepoints */ 428 }; 429 430 static __init int jump_label_init_module(void) 431 { 432 return register_module_notifier(&jump_label_module_nb); 433 } 434 early_initcall(jump_label_init_module); 435 436 #endif /* CONFIG_MODULES */ 437 438 /*** 439 * jump_label_text_reserved - check if addr range is reserved 440 * @start: start text addr 441 * @end: end text addr 442 * 443 * checks if the text addr located between @start and @end 444 * overlaps with any of the jump label patch addresses. Code 445 * that wants to modify kernel text should first verify that 446 * it does not overlap with any of the jump label addresses. 447 * Caller must hold jump_label_mutex. 448 * 449 * returns 1 if there is an overlap, 0 otherwise 450 */ 451 int jump_label_text_reserved(void *start, void *end) 452 { 453 int ret = __jump_label_text_reserved(__start___jump_table, 454 __stop___jump_table, start, end); 455 456 if (ret) 457 return ret; 458 459 #ifdef CONFIG_MODULES 460 ret = __jump_label_mod_text_reserved(start, end); 461 #endif 462 return ret; 463 } 464 465 static void jump_label_update(struct static_key *key) 466 { 467 struct jump_entry *stop = __stop___jump_table; 468 struct jump_entry *entry = static_key_entries(key); 469 #ifdef CONFIG_MODULES 470 struct module *mod; 471 472 __jump_label_mod_update(key); 473 474 preempt_disable(); 475 mod = __module_address((unsigned long)key); 476 if (mod) 477 stop = mod->jump_entries + mod->num_jump_entries; 478 preempt_enable(); 479 #endif 480 /* if there are no users, entry can be NULL */ 481 if (entry) 482 __jump_label_update(key, entry, stop); 483 } 484 485 #ifdef CONFIG_STATIC_KEYS_SELFTEST 486 static DEFINE_STATIC_KEY_TRUE(sk_true); 487 static DEFINE_STATIC_KEY_FALSE(sk_false); 488 489 static __init int jump_label_test(void) 490 { 491 int i; 492 493 for (i = 0; i < 2; i++) { 494 WARN_ON(static_key_enabled(&sk_true.key) != true); 495 WARN_ON(static_key_enabled(&sk_false.key) != false); 496 497 WARN_ON(!static_branch_likely(&sk_true)); 498 WARN_ON(!static_branch_unlikely(&sk_true)); 499 WARN_ON(static_branch_likely(&sk_false)); 500 WARN_ON(static_branch_unlikely(&sk_false)); 501 502 static_branch_disable(&sk_true); 503 static_branch_enable(&sk_false); 504 505 WARN_ON(static_key_enabled(&sk_true.key) == true); 506 WARN_ON(static_key_enabled(&sk_false.key) == false); 507 508 WARN_ON(static_branch_likely(&sk_true)); 509 WARN_ON(static_branch_unlikely(&sk_true)); 510 WARN_ON(!static_branch_likely(&sk_false)); 511 WARN_ON(!static_branch_unlikely(&sk_false)); 512 513 static_branch_enable(&sk_true); 514 static_branch_disable(&sk_false); 515 } 516 517 return 0; 518 } 519 late_initcall(jump_label_test); 520 #endif /* STATIC_KEYS_SELFTEST */ 521 522 #endif /* HAVE_JUMP_LABEL */ 523