1 // SPDX-License-Identifier: GPL-2.0-only 2 #define pr_fmt(fmt) "SMP alternatives: " fmt 3 4 #include <linux/module.h> 5 #include <linux/sched.h> 6 #include <linux/perf_event.h> 7 #include <linux/mutex.h> 8 #include <linux/list.h> 9 #include <linux/stringify.h> 10 #include <linux/mm.h> 11 #include <linux/vmalloc.h> 12 #include <linux/memory.h> 13 #include <linux/stop_machine.h> 14 #include <linux/slab.h> 15 #include <linux/kdebug.h> 16 #include <linux/kprobes.h> 17 #include <linux/mmu_context.h> 18 #include <linux/bsearch.h> 19 #include <linux/sync_core.h> 20 #include <asm/text-patching.h> 21 #include <asm/alternative.h> 22 #include <asm/sections.h> 23 #include <asm/mce.h> 24 #include <asm/nmi.h> 25 #include <asm/cacheflush.h> 26 #include <asm/tlbflush.h> 27 #include <asm/insn.h> 28 #include <asm/io.h> 29 #include <asm/fixmap.h> 30 31 int __read_mostly alternatives_patched; 32 33 EXPORT_SYMBOL_GPL(alternatives_patched); 34 35 #define MAX_PATCH_LEN (255-1) 36 37 static int __initdata_or_module debug_alternative; 38 39 static int __init debug_alt(char *str) 40 { 41 debug_alternative = 1; 42 return 1; 43 } 44 __setup("debug-alternative", debug_alt); 45 46 static int noreplace_smp; 47 48 static int __init setup_noreplace_smp(char *str) 49 { 50 noreplace_smp = 1; 51 return 1; 52 } 53 __setup("noreplace-smp", setup_noreplace_smp); 54 55 #define DPRINTK(fmt, args...) \ 56 do { \ 57 if (debug_alternative) \ 58 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \ 59 } while (0) 60 61 #define DUMP_BYTES(buf, len, fmt, args...) \ 62 do { \ 63 if (unlikely(debug_alternative)) { \ 64 int j; \ 65 \ 66 if (!(len)) \ 67 break; \ 68 \ 69 printk(KERN_DEBUG pr_fmt(fmt), ##args); \ 70 for (j = 0; j < (len) - 1; j++) \ 71 printk(KERN_CONT "%02hhx ", buf[j]); \ 72 printk(KERN_CONT "%02hhx\n", buf[j]); \ 73 } \ 74 } while (0) 75 76 /* 77 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes 78 * that correspond to that nop. Getting from one nop to the next, we 79 * add to the array the offset that is equal to the sum of all sizes of 80 * nops preceding the one we are after. 81 * 82 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the 83 * nice symmetry of sizes of the previous nops. 84 */ 85 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) 86 static const unsigned char intelnops[] = 87 { 88 GENERIC_NOP1, 89 GENERIC_NOP2, 90 GENERIC_NOP3, 91 GENERIC_NOP4, 92 GENERIC_NOP5, 93 GENERIC_NOP6, 94 GENERIC_NOP7, 95 GENERIC_NOP8, 96 GENERIC_NOP5_ATOMIC 97 }; 98 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = 99 { 100 NULL, 101 intelnops, 102 intelnops + 1, 103 intelnops + 1 + 2, 104 intelnops + 1 + 2 + 3, 105 intelnops + 1 + 2 + 3 + 4, 106 intelnops + 1 + 2 + 3 + 4 + 5, 107 intelnops + 1 + 2 + 3 + 4 + 5 + 6, 108 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 109 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 110 }; 111 #endif 112 113 #ifdef K8_NOP1 114 static const unsigned char k8nops[] = 115 { 116 K8_NOP1, 117 K8_NOP2, 118 K8_NOP3, 119 K8_NOP4, 120 K8_NOP5, 121 K8_NOP6, 122 K8_NOP7, 123 K8_NOP8, 124 K8_NOP5_ATOMIC 125 }; 126 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = 127 { 128 NULL, 129 k8nops, 130 k8nops + 1, 131 k8nops + 1 + 2, 132 k8nops + 1 + 2 + 3, 133 k8nops + 1 + 2 + 3 + 4, 134 k8nops + 1 + 2 + 3 + 4 + 5, 135 k8nops + 1 + 2 + 3 + 4 + 5 + 6, 136 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 137 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 138 }; 139 #endif 140 141 #if defined(K7_NOP1) && !defined(CONFIG_X86_64) 142 static const unsigned char k7nops[] = 143 { 144 K7_NOP1, 145 K7_NOP2, 146 K7_NOP3, 147 K7_NOP4, 148 K7_NOP5, 149 K7_NOP6, 150 K7_NOP7, 151 K7_NOP8, 152 K7_NOP5_ATOMIC 153 }; 154 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = 155 { 156 NULL, 157 k7nops, 158 k7nops + 1, 159 k7nops + 1 + 2, 160 k7nops + 1 + 2 + 3, 161 k7nops + 1 + 2 + 3 + 4, 162 k7nops + 1 + 2 + 3 + 4 + 5, 163 k7nops + 1 + 2 + 3 + 4 + 5 + 6, 164 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 165 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 166 }; 167 #endif 168 169 #ifdef P6_NOP1 170 static const unsigned char p6nops[] = 171 { 172 P6_NOP1, 173 P6_NOP2, 174 P6_NOP3, 175 P6_NOP4, 176 P6_NOP5, 177 P6_NOP6, 178 P6_NOP7, 179 P6_NOP8, 180 P6_NOP5_ATOMIC 181 }; 182 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = 183 { 184 NULL, 185 p6nops, 186 p6nops + 1, 187 p6nops + 1 + 2, 188 p6nops + 1 + 2 + 3, 189 p6nops + 1 + 2 + 3 + 4, 190 p6nops + 1 + 2 + 3 + 4 + 5, 191 p6nops + 1 + 2 + 3 + 4 + 5 + 6, 192 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 193 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 194 }; 195 #endif 196 197 /* Initialize these to a safe default */ 198 #ifdef CONFIG_X86_64 199 const unsigned char * const *ideal_nops = p6_nops; 200 #else 201 const unsigned char * const *ideal_nops = intel_nops; 202 #endif 203 204 void __init arch_init_ideal_nops(void) 205 { 206 switch (boot_cpu_data.x86_vendor) { 207 case X86_VENDOR_INTEL: 208 /* 209 * Due to a decoder implementation quirk, some 210 * specific Intel CPUs actually perform better with 211 * the "k8_nops" than with the SDM-recommended NOPs. 212 */ 213 if (boot_cpu_data.x86 == 6 && 214 boot_cpu_data.x86_model >= 0x0f && 215 boot_cpu_data.x86_model != 0x1c && 216 boot_cpu_data.x86_model != 0x26 && 217 boot_cpu_data.x86_model != 0x27 && 218 boot_cpu_data.x86_model < 0x30) { 219 ideal_nops = k8_nops; 220 } else if (boot_cpu_has(X86_FEATURE_NOPL)) { 221 ideal_nops = p6_nops; 222 } else { 223 #ifdef CONFIG_X86_64 224 ideal_nops = k8_nops; 225 #else 226 ideal_nops = intel_nops; 227 #endif 228 } 229 break; 230 231 case X86_VENDOR_HYGON: 232 ideal_nops = p6_nops; 233 return; 234 235 case X86_VENDOR_AMD: 236 if (boot_cpu_data.x86 > 0xf) { 237 ideal_nops = p6_nops; 238 return; 239 } 240 241 /* fall through */ 242 243 default: 244 #ifdef CONFIG_X86_64 245 ideal_nops = k8_nops; 246 #else 247 if (boot_cpu_has(X86_FEATURE_K8)) 248 ideal_nops = k8_nops; 249 else if (boot_cpu_has(X86_FEATURE_K7)) 250 ideal_nops = k7_nops; 251 else 252 ideal_nops = intel_nops; 253 #endif 254 } 255 } 256 257 /* Use this to add nops to a buffer, then text_poke the whole buffer. */ 258 static void __init_or_module add_nops(void *insns, unsigned int len) 259 { 260 while (len > 0) { 261 unsigned int noplen = len; 262 if (noplen > ASM_NOP_MAX) 263 noplen = ASM_NOP_MAX; 264 memcpy(insns, ideal_nops[noplen], noplen); 265 insns += noplen; 266 len -= noplen; 267 } 268 } 269 270 extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 271 extern s32 __smp_locks[], __smp_locks_end[]; 272 void text_poke_early(void *addr, const void *opcode, size_t len); 273 274 /* 275 * Are we looking at a near JMP with a 1 or 4-byte displacement. 276 */ 277 static inline bool is_jmp(const u8 opcode) 278 { 279 return opcode == 0xeb || opcode == 0xe9; 280 } 281 282 static void __init_or_module 283 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff) 284 { 285 u8 *next_rip, *tgt_rip; 286 s32 n_dspl, o_dspl; 287 int repl_len; 288 289 if (a->replacementlen != 5) 290 return; 291 292 o_dspl = *(s32 *)(insn_buff + 1); 293 294 /* next_rip of the replacement JMP */ 295 next_rip = repl_insn + a->replacementlen; 296 /* target rip of the replacement JMP */ 297 tgt_rip = next_rip + o_dspl; 298 n_dspl = tgt_rip - orig_insn; 299 300 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl); 301 302 if (tgt_rip - orig_insn >= 0) { 303 if (n_dspl - 2 <= 127) 304 goto two_byte_jmp; 305 else 306 goto five_byte_jmp; 307 /* negative offset */ 308 } else { 309 if (((n_dspl - 2) & 0xff) == (n_dspl - 2)) 310 goto two_byte_jmp; 311 else 312 goto five_byte_jmp; 313 } 314 315 two_byte_jmp: 316 n_dspl -= 2; 317 318 insn_buff[0] = 0xeb; 319 insn_buff[1] = (s8)n_dspl; 320 add_nops(insn_buff + 2, 3); 321 322 repl_len = 2; 323 goto done; 324 325 five_byte_jmp: 326 n_dspl -= 5; 327 328 insn_buff[0] = 0xe9; 329 *(s32 *)&insn_buff[1] = n_dspl; 330 331 repl_len = 5; 332 333 done: 334 335 DPRINTK("final displ: 0x%08x, JMP 0x%lx", 336 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); 337 } 338 339 /* 340 * "noinline" to cause control flow change and thus invalidate I$ and 341 * cause refetch after modification. 342 */ 343 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) 344 { 345 unsigned long flags; 346 int i; 347 348 for (i = 0; i < a->padlen; i++) { 349 if (instr[i] != 0x90) 350 return; 351 } 352 353 local_irq_save(flags); 354 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 355 local_irq_restore(flags); 356 357 DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ", 358 instr, a->instrlen - a->padlen, a->padlen); 359 } 360 361 /* 362 * Replace instructions with better alternatives for this CPU type. This runs 363 * before SMP is initialized to avoid SMP problems with self modifying code. 364 * This implies that asymmetric systems where APs have less capabilities than 365 * the boot processor are not handled. Tough. Make sure you disable such 366 * features by hand. 367 * 368 * Marked "noinline" to cause control flow change and thus insn cache 369 * to refetch changed I$ lines. 370 */ 371 void __init_or_module noinline apply_alternatives(struct alt_instr *start, 372 struct alt_instr *end) 373 { 374 struct alt_instr *a; 375 u8 *instr, *replacement; 376 u8 insn_buff[MAX_PATCH_LEN]; 377 378 DPRINTK("alt table %px, -> %px", start, end); 379 /* 380 * The scan order should be from start to end. A later scanned 381 * alternative code can overwrite previously scanned alternative code. 382 * Some kernel functions (e.g. memcpy, memset, etc) use this order to 383 * patch code. 384 * 385 * So be careful if you want to change the scan order to any other 386 * order. 387 */ 388 for (a = start; a < end; a++) { 389 int insn_buff_sz = 0; 390 391 instr = (u8 *)&a->instr_offset + a->instr_offset; 392 replacement = (u8 *)&a->repl_offset + a->repl_offset; 393 BUG_ON(a->instrlen > sizeof(insn_buff)); 394 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); 395 if (!boot_cpu_has(a->cpuid)) { 396 if (a->padlen > 1) 397 optimize_nops(a, instr); 398 399 continue; 400 } 401 402 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d", 403 a->cpuid >> 5, 404 a->cpuid & 0x1f, 405 instr, instr, a->instrlen, 406 replacement, a->replacementlen, a->padlen); 407 408 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); 409 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); 410 411 memcpy(insn_buff, replacement, a->replacementlen); 412 insn_buff_sz = a->replacementlen; 413 414 /* 415 * 0xe8 is a relative jump; fix the offset. 416 * 417 * Instruction length is checked before the opcode to avoid 418 * accessing uninitialized bytes for zero-length replacements. 419 */ 420 if (a->replacementlen == 5 && *insn_buff == 0xe8) { 421 *(s32 *)(insn_buff + 1) += replacement - instr; 422 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", 423 *(s32 *)(insn_buff + 1), 424 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5); 425 } 426 427 if (a->replacementlen && is_jmp(replacement[0])) 428 recompute_jump(a, instr, replacement, insn_buff); 429 430 if (a->instrlen > a->replacementlen) { 431 add_nops(insn_buff + a->replacementlen, 432 a->instrlen - a->replacementlen); 433 insn_buff_sz += a->instrlen - a->replacementlen; 434 } 435 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr); 436 437 text_poke_early(instr, insn_buff, insn_buff_sz); 438 } 439 } 440 441 #ifdef CONFIG_SMP 442 static void alternatives_smp_lock(const s32 *start, const s32 *end, 443 u8 *text, u8 *text_end) 444 { 445 const s32 *poff; 446 447 for (poff = start; poff < end; poff++) { 448 u8 *ptr = (u8 *)poff + *poff; 449 450 if (!*poff || ptr < text || ptr >= text_end) 451 continue; 452 /* turn DS segment override prefix into lock prefix */ 453 if (*ptr == 0x3e) 454 text_poke(ptr, ((unsigned char []){0xf0}), 1); 455 } 456 } 457 458 static void alternatives_smp_unlock(const s32 *start, const s32 *end, 459 u8 *text, u8 *text_end) 460 { 461 const s32 *poff; 462 463 for (poff = start; poff < end; poff++) { 464 u8 *ptr = (u8 *)poff + *poff; 465 466 if (!*poff || ptr < text || ptr >= text_end) 467 continue; 468 /* turn lock prefix into DS segment override prefix */ 469 if (*ptr == 0xf0) 470 text_poke(ptr, ((unsigned char []){0x3E}), 1); 471 } 472 } 473 474 struct smp_alt_module { 475 /* what is this ??? */ 476 struct module *mod; 477 char *name; 478 479 /* ptrs to lock prefixes */ 480 const s32 *locks; 481 const s32 *locks_end; 482 483 /* .text segment, needed to avoid patching init code ;) */ 484 u8 *text; 485 u8 *text_end; 486 487 struct list_head next; 488 }; 489 static LIST_HEAD(smp_alt_modules); 490 static bool uniproc_patched = false; /* protected by text_mutex */ 491 492 void __init_or_module alternatives_smp_module_add(struct module *mod, 493 char *name, 494 void *locks, void *locks_end, 495 void *text, void *text_end) 496 { 497 struct smp_alt_module *smp; 498 499 mutex_lock(&text_mutex); 500 if (!uniproc_patched) 501 goto unlock; 502 503 if (num_possible_cpus() == 1) 504 /* Don't bother remembering, we'll never have to undo it. */ 505 goto smp_unlock; 506 507 smp = kzalloc(sizeof(*smp), GFP_KERNEL); 508 if (NULL == smp) 509 /* we'll run the (safe but slow) SMP code then ... */ 510 goto unlock; 511 512 smp->mod = mod; 513 smp->name = name; 514 smp->locks = locks; 515 smp->locks_end = locks_end; 516 smp->text = text; 517 smp->text_end = text_end; 518 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n", 519 smp->locks, smp->locks_end, 520 smp->text, smp->text_end, smp->name); 521 522 list_add_tail(&smp->next, &smp_alt_modules); 523 smp_unlock: 524 alternatives_smp_unlock(locks, locks_end, text, text_end); 525 unlock: 526 mutex_unlock(&text_mutex); 527 } 528 529 void __init_or_module alternatives_smp_module_del(struct module *mod) 530 { 531 struct smp_alt_module *item; 532 533 mutex_lock(&text_mutex); 534 list_for_each_entry(item, &smp_alt_modules, next) { 535 if (mod != item->mod) 536 continue; 537 list_del(&item->next); 538 kfree(item); 539 break; 540 } 541 mutex_unlock(&text_mutex); 542 } 543 544 void alternatives_enable_smp(void) 545 { 546 struct smp_alt_module *mod; 547 548 /* Why bother if there are no other CPUs? */ 549 BUG_ON(num_possible_cpus() == 1); 550 551 mutex_lock(&text_mutex); 552 553 if (uniproc_patched) { 554 pr_info("switching to SMP code\n"); 555 BUG_ON(num_online_cpus() != 1); 556 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 557 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 558 list_for_each_entry(mod, &smp_alt_modules, next) 559 alternatives_smp_lock(mod->locks, mod->locks_end, 560 mod->text, mod->text_end); 561 uniproc_patched = false; 562 } 563 mutex_unlock(&text_mutex); 564 } 565 566 /* 567 * Return 1 if the address range is reserved for SMP-alternatives. 568 * Must hold text_mutex. 569 */ 570 int alternatives_text_reserved(void *start, void *end) 571 { 572 struct smp_alt_module *mod; 573 const s32 *poff; 574 u8 *text_start = start; 575 u8 *text_end = end; 576 577 lockdep_assert_held(&text_mutex); 578 579 list_for_each_entry(mod, &smp_alt_modules, next) { 580 if (mod->text > text_end || mod->text_end < text_start) 581 continue; 582 for (poff = mod->locks; poff < mod->locks_end; poff++) { 583 const u8 *ptr = (const u8 *)poff + *poff; 584 585 if (text_start <= ptr && text_end > ptr) 586 return 1; 587 } 588 } 589 590 return 0; 591 } 592 #endif /* CONFIG_SMP */ 593 594 #ifdef CONFIG_PARAVIRT 595 void __init_or_module apply_paravirt(struct paravirt_patch_site *start, 596 struct paravirt_patch_site *end) 597 { 598 struct paravirt_patch_site *p; 599 char insn_buff[MAX_PATCH_LEN]; 600 601 for (p = start; p < end; p++) { 602 unsigned int used; 603 604 BUG_ON(p->len > MAX_PATCH_LEN); 605 /* prep the buffer with the original instructions */ 606 memcpy(insn_buff, p->instr, p->len); 607 used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len); 608 609 BUG_ON(used > p->len); 610 611 /* Pad the rest with nops */ 612 add_nops(insn_buff + used, p->len - used); 613 text_poke_early(p->instr, insn_buff, p->len); 614 } 615 } 616 extern struct paravirt_patch_site __start_parainstructions[], 617 __stop_parainstructions[]; 618 #endif /* CONFIG_PARAVIRT */ 619 620 /* 621 * Self-test for the INT3 based CALL emulation code. 622 * 623 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up 624 * properly and that there is a stack gap between the INT3 frame and the 625 * previous context. Without this gap doing a virtual PUSH on the interrupted 626 * stack would corrupt the INT3 IRET frame. 627 * 628 * See entry_{32,64}.S for more details. 629 */ 630 631 /* 632 * We define the int3_magic() function in assembly to control the calling 633 * convention such that we can 'call' it from assembly. 634 */ 635 636 extern void int3_magic(unsigned int *ptr); /* defined in asm */ 637 638 asm ( 639 " .pushsection .init.text, \"ax\", @progbits\n" 640 " .type int3_magic, @function\n" 641 "int3_magic:\n" 642 " movl $1, (%" _ASM_ARG1 ")\n" 643 " ret\n" 644 " .size int3_magic, .-int3_magic\n" 645 " .popsection\n" 646 ); 647 648 extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */ 649 650 static int __init 651 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data) 652 { 653 struct die_args *args = data; 654 struct pt_regs *regs = args->regs; 655 656 if (!regs || user_mode(regs)) 657 return NOTIFY_DONE; 658 659 if (val != DIE_INT3) 660 return NOTIFY_DONE; 661 662 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip) 663 return NOTIFY_DONE; 664 665 int3_emulate_call(regs, (unsigned long)&int3_magic); 666 return NOTIFY_STOP; 667 } 668 669 static void __init int3_selftest(void) 670 { 671 static __initdata struct notifier_block int3_exception_nb = { 672 .notifier_call = int3_exception_notify, 673 .priority = INT_MAX-1, /* last */ 674 }; 675 unsigned int val = 0; 676 677 BUG_ON(register_die_notifier(&int3_exception_nb)); 678 679 /* 680 * Basically: int3_magic(&val); but really complicated :-) 681 * 682 * Stick the address of the INT3 instruction into int3_selftest_ip, 683 * then trigger the INT3, padded with NOPs to match a CALL instruction 684 * length. 685 */ 686 asm volatile ("1: int3; nop; nop; nop; nop\n\t" 687 ".pushsection .init.data,\"aw\"\n\t" 688 ".align " __ASM_SEL(4, 8) "\n\t" 689 ".type int3_selftest_ip, @object\n\t" 690 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t" 691 "int3_selftest_ip:\n\t" 692 __ASM_SEL(.long, .quad) " 1b\n\t" 693 ".popsection\n\t" 694 : ASM_CALL_CONSTRAINT 695 : __ASM_SEL_RAW(a, D) (&val) 696 : "memory"); 697 698 BUG_ON(val != 1); 699 700 unregister_die_notifier(&int3_exception_nb); 701 } 702 703 void __init alternative_instructions(void) 704 { 705 int3_selftest(); 706 707 /* 708 * The patching is not fully atomic, so try to avoid local 709 * interruptions that might execute the to be patched code. 710 * Other CPUs are not running. 711 */ 712 stop_nmi(); 713 714 /* 715 * Don't stop machine check exceptions while patching. 716 * MCEs only happen when something got corrupted and in this 717 * case we must do something about the corruption. 718 * Ignoring it is worse than an unlikely patching race. 719 * Also machine checks tend to be broadcast and if one CPU 720 * goes into machine check the others follow quickly, so we don't 721 * expect a machine check to cause undue problems during to code 722 * patching. 723 */ 724 725 apply_alternatives(__alt_instructions, __alt_instructions_end); 726 727 #ifdef CONFIG_SMP 728 /* Patch to UP if other cpus not imminent. */ 729 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { 730 uniproc_patched = true; 731 alternatives_smp_module_add(NULL, "core kernel", 732 __smp_locks, __smp_locks_end, 733 _text, _etext); 734 } 735 736 if (!uniproc_patched || num_possible_cpus() == 1) { 737 free_init_pages("SMP alternatives", 738 (unsigned long)__smp_locks, 739 (unsigned long)__smp_locks_end); 740 } 741 #endif 742 743 apply_paravirt(__parainstructions, __parainstructions_end); 744 745 restart_nmi(); 746 alternatives_patched = 1; 747 } 748 749 /** 750 * text_poke_early - Update instructions on a live kernel at boot time 751 * @addr: address to modify 752 * @opcode: source of the copy 753 * @len: length to copy 754 * 755 * When you use this code to patch more than one byte of an instruction 756 * you need to make sure that other CPUs cannot execute this code in parallel. 757 * Also no thread must be currently preempted in the middle of these 758 * instructions. And on the local CPU you need to be protected against NMI or 759 * MCE handlers seeing an inconsistent instruction while you patch. 760 */ 761 void __init_or_module text_poke_early(void *addr, const void *opcode, 762 size_t len) 763 { 764 unsigned long flags; 765 766 if (boot_cpu_has(X86_FEATURE_NX) && 767 is_module_text_address((unsigned long)addr)) { 768 /* 769 * Modules text is marked initially as non-executable, so the 770 * code cannot be running and speculative code-fetches are 771 * prevented. Just change the code. 772 */ 773 memcpy(addr, opcode, len); 774 } else { 775 local_irq_save(flags); 776 memcpy(addr, opcode, len); 777 local_irq_restore(flags); 778 sync_core(); 779 780 /* 781 * Could also do a CLFLUSH here to speed up CPU recovery; but 782 * that causes hangs on some VIA CPUs. 783 */ 784 } 785 } 786 787 typedef struct { 788 struct mm_struct *mm; 789 } temp_mm_state_t; 790 791 /* 792 * Using a temporary mm allows to set temporary mappings that are not accessible 793 * by other CPUs. Such mappings are needed to perform sensitive memory writes 794 * that override the kernel memory protections (e.g., W^X), without exposing the 795 * temporary page-table mappings that are required for these write operations to 796 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the 797 * mapping is torn down. 798 * 799 * Context: The temporary mm needs to be used exclusively by a single core. To 800 * harden security IRQs must be disabled while the temporary mm is 801 * loaded, thereby preventing interrupt handler bugs from overriding 802 * the kernel memory protection. 803 */ 804 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) 805 { 806 temp_mm_state_t temp_state; 807 808 lockdep_assert_irqs_disabled(); 809 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); 810 switch_mm_irqs_off(NULL, mm, current); 811 812 /* 813 * If breakpoints are enabled, disable them while the temporary mm is 814 * used. Userspace might set up watchpoints on addresses that are used 815 * in the temporary mm, which would lead to wrong signals being sent or 816 * crashes. 817 * 818 * Note that breakpoints are not disabled selectively, which also causes 819 * kernel breakpoints (e.g., perf's) to be disabled. This might be 820 * undesirable, but still seems reasonable as the code that runs in the 821 * temporary mm should be short. 822 */ 823 if (hw_breakpoint_active()) 824 hw_breakpoint_disable(); 825 826 return temp_state; 827 } 828 829 static inline void unuse_temporary_mm(temp_mm_state_t prev_state) 830 { 831 lockdep_assert_irqs_disabled(); 832 switch_mm_irqs_off(NULL, prev_state.mm, current); 833 834 /* 835 * Restore the breakpoints if they were disabled before the temporary mm 836 * was loaded. 837 */ 838 if (hw_breakpoint_active()) 839 hw_breakpoint_restore(); 840 } 841 842 __ro_after_init struct mm_struct *poking_mm; 843 __ro_after_init unsigned long poking_addr; 844 845 static void *__text_poke(void *addr, const void *opcode, size_t len) 846 { 847 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE; 848 struct page *pages[2] = {NULL}; 849 temp_mm_state_t prev; 850 unsigned long flags; 851 pte_t pte, *ptep; 852 spinlock_t *ptl; 853 pgprot_t pgprot; 854 855 /* 856 * While boot memory allocator is running we cannot use struct pages as 857 * they are not yet initialized. There is no way to recover. 858 */ 859 BUG_ON(!after_bootmem); 860 861 if (!core_kernel_text((unsigned long)addr)) { 862 pages[0] = vmalloc_to_page(addr); 863 if (cross_page_boundary) 864 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 865 } else { 866 pages[0] = virt_to_page(addr); 867 WARN_ON(!PageReserved(pages[0])); 868 if (cross_page_boundary) 869 pages[1] = virt_to_page(addr + PAGE_SIZE); 870 } 871 /* 872 * If something went wrong, crash and burn since recovery paths are not 873 * implemented. 874 */ 875 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1])); 876 877 local_irq_save(flags); 878 879 /* 880 * Map the page without the global bit, as TLB flushing is done with 881 * flush_tlb_mm_range(), which is intended for non-global PTEs. 882 */ 883 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL); 884 885 /* 886 * The lock is not really needed, but this allows to avoid open-coding. 887 */ 888 ptep = get_locked_pte(poking_mm, poking_addr, &ptl); 889 890 /* 891 * This must not fail; preallocated in poking_init(). 892 */ 893 VM_BUG_ON(!ptep); 894 895 pte = mk_pte(pages[0], pgprot); 896 set_pte_at(poking_mm, poking_addr, ptep, pte); 897 898 if (cross_page_boundary) { 899 pte = mk_pte(pages[1], pgprot); 900 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); 901 } 902 903 /* 904 * Loading the temporary mm behaves as a compiler barrier, which 905 * guarantees that the PTE will be set at the time memcpy() is done. 906 */ 907 prev = use_temporary_mm(poking_mm); 908 909 kasan_disable_current(); 910 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len); 911 kasan_enable_current(); 912 913 /* 914 * Ensure that the PTE is only cleared after the instructions of memcpy 915 * were issued by using a compiler barrier. 916 */ 917 barrier(); 918 919 pte_clear(poking_mm, poking_addr, ptep); 920 if (cross_page_boundary) 921 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1); 922 923 /* 924 * Loading the previous page-table hierarchy requires a serializing 925 * instruction that already allows the core to see the updated version. 926 * Xen-PV is assumed to serialize execution in a similar manner. 927 */ 928 unuse_temporary_mm(prev); 929 930 /* 931 * Flushing the TLB might involve IPIs, which would require enabled 932 * IRQs, but not if the mm is not used, as it is in this point. 933 */ 934 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr + 935 (cross_page_boundary ? 2 : 1) * PAGE_SIZE, 936 PAGE_SHIFT, false); 937 938 /* 939 * If the text does not match what we just wrote then something is 940 * fundamentally screwy; there's nothing we can really do about that. 941 */ 942 BUG_ON(memcmp(addr, opcode, len)); 943 944 pte_unmap_unlock(ptep, ptl); 945 local_irq_restore(flags); 946 return addr; 947 } 948 949 /** 950 * text_poke - Update instructions on a live kernel 951 * @addr: address to modify 952 * @opcode: source of the copy 953 * @len: length to copy 954 * 955 * Only atomic text poke/set should be allowed when not doing early patching. 956 * It means the size must be writable atomically and the address must be aligned 957 * in a way that permits an atomic write. It also makes sure we fit on a single 958 * page. 959 * 960 * Note that the caller must ensure that if the modified code is part of a 961 * module, the module would not be removed during poking. This can be achieved 962 * by registering a module notifier, and ordering module removal and patching 963 * trough a mutex. 964 */ 965 void *text_poke(void *addr, const void *opcode, size_t len) 966 { 967 lockdep_assert_held(&text_mutex); 968 969 return __text_poke(addr, opcode, len); 970 } 971 972 /** 973 * text_poke_kgdb - Update instructions on a live kernel by kgdb 974 * @addr: address to modify 975 * @opcode: source of the copy 976 * @len: length to copy 977 * 978 * Only atomic text poke/set should be allowed when not doing early patching. 979 * It means the size must be writable atomically and the address must be aligned 980 * in a way that permits an atomic write. It also makes sure we fit on a single 981 * page. 982 * 983 * Context: should only be used by kgdb, which ensures no other core is running, 984 * despite the fact it does not hold the text_mutex. 985 */ 986 void *text_poke_kgdb(void *addr, const void *opcode, size_t len) 987 { 988 return __text_poke(addr, opcode, len); 989 } 990 991 static void do_sync_core(void *info) 992 { 993 sync_core(); 994 } 995 996 void text_poke_sync(void) 997 { 998 on_each_cpu(do_sync_core, NULL, 1); 999 } 1000 1001 struct text_poke_loc { 1002 s32 rel_addr; /* addr := _stext + rel_addr */ 1003 s32 rel32; 1004 u8 opcode; 1005 const u8 text[POKE_MAX_OPCODE_SIZE]; 1006 u8 old; 1007 }; 1008 1009 struct bp_patching_desc { 1010 struct text_poke_loc *vec; 1011 int nr_entries; 1012 atomic_t refs; 1013 }; 1014 1015 static struct bp_patching_desc *bp_desc; 1016 1017 static __always_inline 1018 struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp) 1019 { 1020 struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */ 1021 1022 if (!desc || !arch_atomic_inc_not_zero(&desc->refs)) 1023 return NULL; 1024 1025 return desc; 1026 } 1027 1028 static __always_inline void put_desc(struct bp_patching_desc *desc) 1029 { 1030 smp_mb__before_atomic(); 1031 arch_atomic_dec(&desc->refs); 1032 } 1033 1034 static __always_inline void *text_poke_addr(struct text_poke_loc *tp) 1035 { 1036 return _stext + tp->rel_addr; 1037 } 1038 1039 static __always_inline int patch_cmp(const void *key, const void *elt) 1040 { 1041 struct text_poke_loc *tp = (struct text_poke_loc *) elt; 1042 1043 if (key < text_poke_addr(tp)) 1044 return -1; 1045 if (key > text_poke_addr(tp)) 1046 return 1; 1047 return 0; 1048 } 1049 1050 noinstr int poke_int3_handler(struct pt_regs *regs) 1051 { 1052 struct bp_patching_desc *desc; 1053 struct text_poke_loc *tp; 1054 int len, ret = 0; 1055 void *ip; 1056 1057 if (user_mode(regs)) 1058 return 0; 1059 1060 /* 1061 * Having observed our INT3 instruction, we now must observe 1062 * bp_desc: 1063 * 1064 * bp_desc = desc INT3 1065 * WMB RMB 1066 * write INT3 if (desc) 1067 */ 1068 smp_rmb(); 1069 1070 desc = try_get_desc(&bp_desc); 1071 if (!desc) 1072 return 0; 1073 1074 /* 1075 * Discount the INT3. See text_poke_bp_batch(). 1076 */ 1077 ip = (void *) regs->ip - INT3_INSN_SIZE; 1078 1079 /* 1080 * Skip the binary search if there is a single member in the vector. 1081 */ 1082 if (unlikely(desc->nr_entries > 1)) { 1083 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries, 1084 sizeof(struct text_poke_loc), 1085 patch_cmp); 1086 if (!tp) 1087 goto out_put; 1088 } else { 1089 tp = desc->vec; 1090 if (text_poke_addr(tp) != ip) 1091 goto out_put; 1092 } 1093 1094 len = text_opcode_size(tp->opcode); 1095 ip += len; 1096 1097 switch (tp->opcode) { 1098 case INT3_INSN_OPCODE: 1099 /* 1100 * Someone poked an explicit INT3, they'll want to handle it, 1101 * do not consume. 1102 */ 1103 goto out_put; 1104 1105 case CALL_INSN_OPCODE: 1106 int3_emulate_call(regs, (long)ip + tp->rel32); 1107 break; 1108 1109 case JMP32_INSN_OPCODE: 1110 case JMP8_INSN_OPCODE: 1111 int3_emulate_jmp(regs, (long)ip + tp->rel32); 1112 break; 1113 1114 default: 1115 BUG(); 1116 } 1117 1118 ret = 1; 1119 1120 out_put: 1121 put_desc(desc); 1122 return ret; 1123 } 1124 1125 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc)) 1126 static struct text_poke_loc tp_vec[TP_VEC_MAX]; 1127 static int tp_vec_nr; 1128 1129 /** 1130 * text_poke_bp_batch() -- update instructions on live kernel on SMP 1131 * @tp: vector of instructions to patch 1132 * @nr_entries: number of entries in the vector 1133 * 1134 * Modify multi-byte instruction by using int3 breakpoint on SMP. 1135 * We completely avoid stop_machine() here, and achieve the 1136 * synchronization using int3 breakpoint. 1137 * 1138 * The way it is done: 1139 * - For each entry in the vector: 1140 * - add a int3 trap to the address that will be patched 1141 * - sync cores 1142 * - For each entry in the vector: 1143 * - update all but the first byte of the patched range 1144 * - sync cores 1145 * - For each entry in the vector: 1146 * - replace the first byte (int3) by the first byte of 1147 * replacing opcode 1148 * - sync cores 1149 */ 1150 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) 1151 { 1152 struct bp_patching_desc desc = { 1153 .vec = tp, 1154 .nr_entries = nr_entries, 1155 .refs = ATOMIC_INIT(1), 1156 }; 1157 unsigned char int3 = INT3_INSN_OPCODE; 1158 unsigned int i; 1159 int do_sync; 1160 1161 lockdep_assert_held(&text_mutex); 1162 1163 smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */ 1164 1165 /* 1166 * Corresponding read barrier in int3 notifier for making sure the 1167 * nr_entries and handler are correctly ordered wrt. patching. 1168 */ 1169 smp_wmb(); 1170 1171 /* 1172 * First step: add a int3 trap to the address that will be patched. 1173 */ 1174 for (i = 0; i < nr_entries; i++) { 1175 tp[i].old = *(u8 *)text_poke_addr(&tp[i]); 1176 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE); 1177 } 1178 1179 text_poke_sync(); 1180 1181 /* 1182 * Second step: update all but the first byte of the patched range. 1183 */ 1184 for (do_sync = 0, i = 0; i < nr_entries; i++) { 1185 u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, }; 1186 int len = text_opcode_size(tp[i].opcode); 1187 1188 if (len - INT3_INSN_SIZE > 0) { 1189 memcpy(old + INT3_INSN_SIZE, 1190 text_poke_addr(&tp[i]) + INT3_INSN_SIZE, 1191 len - INT3_INSN_SIZE); 1192 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE, 1193 (const char *)tp[i].text + INT3_INSN_SIZE, 1194 len - INT3_INSN_SIZE); 1195 do_sync++; 1196 } 1197 1198 /* 1199 * Emit a perf event to record the text poke, primarily to 1200 * support Intel PT decoding which must walk the executable code 1201 * to reconstruct the trace. The flow up to here is: 1202 * - write INT3 byte 1203 * - IPI-SYNC 1204 * - write instruction tail 1205 * At this point the actual control flow will be through the 1206 * INT3 and handler and not hit the old or new instruction. 1207 * Intel PT outputs FUP/TIP packets for the INT3, so the flow 1208 * can still be decoded. Subsequently: 1209 * - emit RECORD_TEXT_POKE with the new instruction 1210 * - IPI-SYNC 1211 * - write first byte 1212 * - IPI-SYNC 1213 * So before the text poke event timestamp, the decoder will see 1214 * either the old instruction flow or FUP/TIP of INT3. After the 1215 * text poke event timestamp, the decoder will see either the 1216 * new instruction flow or FUP/TIP of INT3. Thus decoders can 1217 * use the timestamp as the point at which to modify the 1218 * executable code. 1219 * The old instruction is recorded so that the event can be 1220 * processed forwards or backwards. 1221 */ 1222 perf_event_text_poke(text_poke_addr(&tp[i]), old, len, 1223 tp[i].text, len); 1224 } 1225 1226 if (do_sync) { 1227 /* 1228 * According to Intel, this core syncing is very likely 1229 * not necessary and we'd be safe even without it. But 1230 * better safe than sorry (plus there's not only Intel). 1231 */ 1232 text_poke_sync(); 1233 } 1234 1235 /* 1236 * Third step: replace the first byte (int3) by the first byte of 1237 * replacing opcode. 1238 */ 1239 for (do_sync = 0, i = 0; i < nr_entries; i++) { 1240 if (tp[i].text[0] == INT3_INSN_OPCODE) 1241 continue; 1242 1243 text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE); 1244 do_sync++; 1245 } 1246 1247 if (do_sync) 1248 text_poke_sync(); 1249 1250 /* 1251 * Remove and synchronize_rcu(), except we have a very primitive 1252 * refcount based completion. 1253 */ 1254 WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */ 1255 if (!atomic_dec_and_test(&desc.refs)) 1256 atomic_cond_read_acquire(&desc.refs, !VAL); 1257 } 1258 1259 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, 1260 const void *opcode, size_t len, const void *emulate) 1261 { 1262 struct insn insn; 1263 1264 memcpy((void *)tp->text, opcode, len); 1265 if (!emulate) 1266 emulate = opcode; 1267 1268 kernel_insn_init(&insn, emulate, MAX_INSN_SIZE); 1269 insn_get_length(&insn); 1270 1271 BUG_ON(!insn_complete(&insn)); 1272 BUG_ON(len != insn.length); 1273 1274 tp->rel_addr = addr - (void *)_stext; 1275 tp->opcode = insn.opcode.bytes[0]; 1276 1277 switch (tp->opcode) { 1278 case INT3_INSN_OPCODE: 1279 break; 1280 1281 case CALL_INSN_OPCODE: 1282 case JMP32_INSN_OPCODE: 1283 case JMP8_INSN_OPCODE: 1284 tp->rel32 = insn.immediate.value; 1285 break; 1286 1287 default: /* assume NOP */ 1288 switch (len) { 1289 case 2: /* NOP2 -- emulate as JMP8+0 */ 1290 BUG_ON(memcmp(emulate, ideal_nops[len], len)); 1291 tp->opcode = JMP8_INSN_OPCODE; 1292 tp->rel32 = 0; 1293 break; 1294 1295 case 5: /* NOP5 -- emulate as JMP32+0 */ 1296 BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len)); 1297 tp->opcode = JMP32_INSN_OPCODE; 1298 tp->rel32 = 0; 1299 break; 1300 1301 default: /* unknown instruction */ 1302 BUG(); 1303 } 1304 break; 1305 } 1306 } 1307 1308 /* 1309 * We hard rely on the tp_vec being ordered; ensure this is so by flushing 1310 * early if needed. 1311 */ 1312 static bool tp_order_fail(void *addr) 1313 { 1314 struct text_poke_loc *tp; 1315 1316 if (!tp_vec_nr) 1317 return false; 1318 1319 if (!addr) /* force */ 1320 return true; 1321 1322 tp = &tp_vec[tp_vec_nr - 1]; 1323 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr) 1324 return true; 1325 1326 return false; 1327 } 1328 1329 static void text_poke_flush(void *addr) 1330 { 1331 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) { 1332 text_poke_bp_batch(tp_vec, tp_vec_nr); 1333 tp_vec_nr = 0; 1334 } 1335 } 1336 1337 void text_poke_finish(void) 1338 { 1339 text_poke_flush(NULL); 1340 } 1341 1342 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate) 1343 { 1344 struct text_poke_loc *tp; 1345 1346 if (unlikely(system_state == SYSTEM_BOOTING)) { 1347 text_poke_early(addr, opcode, len); 1348 return; 1349 } 1350 1351 text_poke_flush(addr); 1352 1353 tp = &tp_vec[tp_vec_nr++]; 1354 text_poke_loc_init(tp, addr, opcode, len, emulate); 1355 } 1356 1357 /** 1358 * text_poke_bp() -- update instructions on live kernel on SMP 1359 * @addr: address to patch 1360 * @opcode: opcode of new instruction 1361 * @len: length to copy 1362 * @handler: address to jump to when the temporary breakpoint is hit 1363 * 1364 * Update a single instruction with the vector in the stack, avoiding 1365 * dynamically allocated memory. This function should be used when it is 1366 * not possible to allocate memory. 1367 */ 1368 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate) 1369 { 1370 struct text_poke_loc tp; 1371 1372 if (unlikely(system_state == SYSTEM_BOOTING)) { 1373 text_poke_early(addr, opcode, len); 1374 return; 1375 } 1376 1377 text_poke_loc_init(&tp, addr, opcode, len, emulate); 1378 text_poke_bp_batch(&tp, 1); 1379 } 1380