1 // SPDX-License-Identifier: GPL-2.0-only 2 #define pr_fmt(fmt) "SMP alternatives: " fmt 3 4 #include <linux/module.h> 5 #include <linux/sched.h> 6 #include <linux/perf_event.h> 7 #include <linux/mutex.h> 8 #include <linux/list.h> 9 #include <linux/stringify.h> 10 #include <linux/highmem.h> 11 #include <linux/mm.h> 12 #include <linux/vmalloc.h> 13 #include <linux/memory.h> 14 #include <linux/stop_machine.h> 15 #include <linux/slab.h> 16 #include <linux/kdebug.h> 17 #include <linux/kprobes.h> 18 #include <linux/mmu_context.h> 19 #include <linux/bsearch.h> 20 #include <linux/sync_core.h> 21 #include <asm/text-patching.h> 22 #include <asm/alternative.h> 23 #include <asm/sections.h> 24 #include <asm/mce.h> 25 #include <asm/nmi.h> 26 #include <asm/cacheflush.h> 27 #include <asm/tlbflush.h> 28 #include <asm/insn.h> 29 #include <asm/io.h> 30 #include <asm/fixmap.h> 31 #include <asm/paravirt.h> 32 #include <asm/asm-prototypes.h> 33 34 int __read_mostly alternatives_patched; 35 36 EXPORT_SYMBOL_GPL(alternatives_patched); 37 38 #define MAX_PATCH_LEN (255-1) 39 40 #define DA_ALL (~0) 41 #define DA_ALT 0x01 42 #define DA_RET 0x02 43 #define DA_RETPOLINE 0x04 44 #define DA_ENDBR 0x08 45 #define DA_SMP 0x10 46 47 static unsigned int __initdata_or_module debug_alternative; 48 49 static int __init debug_alt(char *str) 50 { 51 if (str && *str == '=') 52 str++; 53 54 if (!str || kstrtouint(str, 0, &debug_alternative)) 55 debug_alternative = DA_ALL; 56 57 return 1; 58 } 59 __setup("debug-alternative", debug_alt); 60 61 static int noreplace_smp; 62 63 static int __init setup_noreplace_smp(char *str) 64 { 65 noreplace_smp = 1; 66 return 1; 67 } 68 __setup("noreplace-smp", setup_noreplace_smp); 69 70 #define DPRINTK(type, fmt, args...) \ 71 do { \ 72 if (debug_alternative & DA_##type) \ 73 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \ 74 } while (0) 75 76 #define DUMP_BYTES(type, buf, len, fmt, args...) \ 77 do { \ 78 if (unlikely(debug_alternative & DA_##type)) { \ 79 int j; \ 80 \ 81 if (!(len)) \ 82 break; \ 83 \ 84 printk(KERN_DEBUG pr_fmt(fmt), ##args); \ 85 for (j = 0; j < (len) - 1; j++) \ 86 printk(KERN_CONT "%02hhx ", buf[j]); \ 87 printk(KERN_CONT "%02hhx\n", buf[j]); \ 88 } \ 89 } while (0) 90 91 static const unsigned char x86nops[] = 92 { 93 BYTES_NOP1, 94 BYTES_NOP2, 95 BYTES_NOP3, 96 BYTES_NOP4, 97 BYTES_NOP5, 98 BYTES_NOP6, 99 BYTES_NOP7, 100 BYTES_NOP8, 101 #ifdef CONFIG_64BIT 102 BYTES_NOP9, 103 BYTES_NOP10, 104 BYTES_NOP11, 105 #endif 106 }; 107 108 const unsigned char * const x86_nops[ASM_NOP_MAX+1] = 109 { 110 NULL, 111 x86nops, 112 x86nops + 1, 113 x86nops + 1 + 2, 114 x86nops + 1 + 2 + 3, 115 x86nops + 1 + 2 + 3 + 4, 116 x86nops + 1 + 2 + 3 + 4 + 5, 117 x86nops + 1 + 2 + 3 + 4 + 5 + 6, 118 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 119 #ifdef CONFIG_64BIT 120 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 121 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9, 122 x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10, 123 #endif 124 }; 125 126 /* 127 * Fill the buffer with a single effective instruction of size @len. 128 * 129 * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info) 130 * for every single-byte NOP, try to generate the maximally available NOP of 131 * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for 132 * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and 133 * *jump* over instead of executing long and daft NOPs. 134 */ 135 static void __init_or_module add_nop(u8 *instr, unsigned int len) 136 { 137 u8 *target = instr + len; 138 139 if (!len) 140 return; 141 142 if (len <= ASM_NOP_MAX) { 143 memcpy(instr, x86_nops[len], len); 144 return; 145 } 146 147 if (len < 128) { 148 __text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE); 149 instr += JMP8_INSN_SIZE; 150 } else { 151 __text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE); 152 instr += JMP32_INSN_SIZE; 153 } 154 155 for (;instr < target; instr++) 156 *instr = INT3_INSN_OPCODE; 157 } 158 159 extern s32 __retpoline_sites[], __retpoline_sites_end[]; 160 extern s32 __return_sites[], __return_sites_end[]; 161 extern s32 __cfi_sites[], __cfi_sites_end[]; 162 extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[]; 163 extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 164 extern s32 __smp_locks[], __smp_locks_end[]; 165 void text_poke_early(void *addr, const void *opcode, size_t len); 166 167 /* 168 * Matches NOP and NOPL, not any of the other possible NOPs. 169 */ 170 static bool insn_is_nop(struct insn *insn) 171 { 172 /* Anything NOP, but no REP NOP */ 173 if (insn->opcode.bytes[0] == 0x90 && 174 (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3)) 175 return true; 176 177 /* NOPL */ 178 if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F) 179 return true; 180 181 /* TODO: more nops */ 182 183 return false; 184 } 185 186 /* 187 * Find the offset of the first non-NOP instruction starting at @offset 188 * but no further than @len. 189 */ 190 static int skip_nops(u8 *instr, int offset, int len) 191 { 192 struct insn insn; 193 194 for (; offset < len; offset += insn.length) { 195 if (insn_decode_kernel(&insn, &instr[offset])) 196 break; 197 198 if (!insn_is_nop(&insn)) 199 break; 200 } 201 202 return offset; 203 } 204 205 /* 206 * Optimize a sequence of NOPs, possibly preceded by an unconditional jump 207 * to the end of the NOP sequence into a single NOP. 208 */ 209 static bool __init_or_module 210 __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target) 211 { 212 int i = *next - insn->length; 213 214 switch (insn->opcode.bytes[0]) { 215 case JMP8_INSN_OPCODE: 216 case JMP32_INSN_OPCODE: 217 *prev = i; 218 *target = *next + insn->immediate.value; 219 return false; 220 } 221 222 if (insn_is_nop(insn)) { 223 int nop = i; 224 225 *next = skip_nops(instr, *next, len); 226 if (*target && *next == *target) 227 nop = *prev; 228 229 add_nop(instr + nop, *next - nop); 230 DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next); 231 return true; 232 } 233 234 *target = 0; 235 return false; 236 } 237 238 /* 239 * "noinline" to cause control flow change and thus invalidate I$ and 240 * cause refetch after modification. 241 */ 242 static void __init_or_module noinline optimize_nops(u8 *instr, size_t len) 243 { 244 int prev, target = 0; 245 246 for (int next, i = 0; i < len; i = next) { 247 struct insn insn; 248 249 if (insn_decode_kernel(&insn, &instr[i])) 250 return; 251 252 next = i + insn.length; 253 254 __optimize_nops(instr, len, &insn, &next, &prev, &target); 255 } 256 } 257 258 /* 259 * In this context, "source" is where the instructions are placed in the 260 * section .altinstr_replacement, for example during kernel build by the 261 * toolchain. 262 * "Destination" is where the instructions are being patched in by this 263 * machinery. 264 * 265 * The source offset is: 266 * 267 * src_imm = target - src_next_ip (1) 268 * 269 * and the target offset is: 270 * 271 * dst_imm = target - dst_next_ip (2) 272 * 273 * so rework (1) as an expression for target like: 274 * 275 * target = src_imm + src_next_ip (1a) 276 * 277 * and substitute in (2) to get: 278 * 279 * dst_imm = (src_imm + src_next_ip) - dst_next_ip (3) 280 * 281 * Now, since the instruction stream is 'identical' at src and dst (it 282 * is being copied after all) it can be stated that: 283 * 284 * src_next_ip = src + ip_offset 285 * dst_next_ip = dst + ip_offset (4) 286 * 287 * Substitute (4) in (3) and observe ip_offset being cancelled out to 288 * obtain: 289 * 290 * dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset) 291 * = src_imm + src - dst + ip_offset - ip_offset 292 * = src_imm + src - dst (5) 293 * 294 * IOW, only the relative displacement of the code block matters. 295 */ 296 297 #define apply_reloc_n(n_, p_, d_) \ 298 do { \ 299 s32 v = *(s##n_ *)(p_); \ 300 v += (d_); \ 301 BUG_ON((v >> 31) != (v >> (n_-1))); \ 302 *(s##n_ *)(p_) = (s##n_)v; \ 303 } while (0) 304 305 306 static __always_inline 307 void apply_reloc(int n, void *ptr, uintptr_t diff) 308 { 309 switch (n) { 310 case 1: apply_reloc_n(8, ptr, diff); break; 311 case 2: apply_reloc_n(16, ptr, diff); break; 312 case 4: apply_reloc_n(32, ptr, diff); break; 313 default: BUG(); 314 } 315 } 316 317 static __always_inline 318 bool need_reloc(unsigned long offset, u8 *src, size_t src_len) 319 { 320 u8 *target = src + offset; 321 /* 322 * If the target is inside the patched block, it's relative to the 323 * block itself and does not need relocation. 324 */ 325 return (target < src || target > src + src_len); 326 } 327 328 static void __init_or_module noinline 329 apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len) 330 { 331 int prev, target = 0; 332 333 for (int next, i = 0; i < len; i = next) { 334 struct insn insn; 335 336 if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i]))) 337 return; 338 339 next = i + insn.length; 340 341 if (__optimize_nops(buf, len, &insn, &next, &prev, &target)) 342 continue; 343 344 switch (insn.opcode.bytes[0]) { 345 case 0x0f: 346 if (insn.opcode.bytes[1] < 0x80 || 347 insn.opcode.bytes[1] > 0x8f) 348 break; 349 350 fallthrough; /* Jcc.d32 */ 351 case 0x70 ... 0x7f: /* Jcc.d8 */ 352 case JMP8_INSN_OPCODE: 353 case JMP32_INSN_OPCODE: 354 case CALL_INSN_OPCODE: 355 if (need_reloc(next + insn.immediate.value, src, src_len)) { 356 apply_reloc(insn.immediate.nbytes, 357 buf + i + insn_offset_immediate(&insn), 358 src - dest); 359 } 360 361 /* 362 * Where possible, convert JMP.d32 into JMP.d8. 363 */ 364 if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) { 365 s32 imm = insn.immediate.value; 366 imm += src - dest; 367 imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE; 368 if ((imm >> 31) == (imm >> 7)) { 369 buf[i+0] = JMP8_INSN_OPCODE; 370 buf[i+1] = (s8)imm; 371 372 memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2); 373 } 374 } 375 break; 376 } 377 378 if (insn_rip_relative(&insn)) { 379 if (need_reloc(next + insn.displacement.value, src, src_len)) { 380 apply_reloc(insn.displacement.nbytes, 381 buf + i + insn_offset_displacement(&insn), 382 src - dest); 383 } 384 } 385 } 386 } 387 388 /* 389 * Replace instructions with better alternatives for this CPU type. This runs 390 * before SMP is initialized to avoid SMP problems with self modifying code. 391 * This implies that asymmetric systems where APs have less capabilities than 392 * the boot processor are not handled. Tough. Make sure you disable such 393 * features by hand. 394 * 395 * Marked "noinline" to cause control flow change and thus insn cache 396 * to refetch changed I$ lines. 397 */ 398 void __init_or_module noinline apply_alternatives(struct alt_instr *start, 399 struct alt_instr *end) 400 { 401 struct alt_instr *a; 402 u8 *instr, *replacement; 403 u8 insn_buff[MAX_PATCH_LEN]; 404 405 DPRINTK(ALT, "alt table %px, -> %px", start, end); 406 /* 407 * The scan order should be from start to end. A later scanned 408 * alternative code can overwrite previously scanned alternative code. 409 * Some kernel functions (e.g. memcpy, memset, etc) use this order to 410 * patch code. 411 * 412 * So be careful if you want to change the scan order to any other 413 * order. 414 */ 415 for (a = start; a < end; a++) { 416 int insn_buff_sz = 0; 417 418 instr = (u8 *)&a->instr_offset + a->instr_offset; 419 replacement = (u8 *)&a->repl_offset + a->repl_offset; 420 BUG_ON(a->instrlen > sizeof(insn_buff)); 421 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); 422 423 /* 424 * Patch if either: 425 * - feature is present 426 * - feature not present but ALT_FLAG_NOT is set to mean, 427 * patch if feature is *NOT* present. 428 */ 429 if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) { 430 optimize_nops(instr, a->instrlen); 431 continue; 432 } 433 434 DPRINTK(ALT, "feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)", 435 (a->flags & ALT_FLAG_NOT) ? "!" : "", 436 a->cpuid >> 5, 437 a->cpuid & 0x1f, 438 instr, instr, a->instrlen, 439 replacement, a->replacementlen); 440 441 memcpy(insn_buff, replacement, a->replacementlen); 442 insn_buff_sz = a->replacementlen; 443 444 for (; insn_buff_sz < a->instrlen; insn_buff_sz++) 445 insn_buff[insn_buff_sz] = 0x90; 446 447 apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen); 448 449 DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr); 450 DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement); 451 DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr); 452 453 text_poke_early(instr, insn_buff, insn_buff_sz); 454 } 455 } 456 457 static inline bool is_jcc32(struct insn *insn) 458 { 459 /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */ 460 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80; 461 } 462 463 #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL) 464 465 /* 466 * CALL/JMP *%\reg 467 */ 468 static int emit_indirect(int op, int reg, u8 *bytes) 469 { 470 int i = 0; 471 u8 modrm; 472 473 switch (op) { 474 case CALL_INSN_OPCODE: 475 modrm = 0x10; /* Reg = 2; CALL r/m */ 476 break; 477 478 case JMP32_INSN_OPCODE: 479 modrm = 0x20; /* Reg = 4; JMP r/m */ 480 break; 481 482 default: 483 WARN_ON_ONCE(1); 484 return -1; 485 } 486 487 if (reg >= 8) { 488 bytes[i++] = 0x41; /* REX.B prefix */ 489 reg -= 8; 490 } 491 492 modrm |= 0xc0; /* Mod = 3 */ 493 modrm += reg; 494 495 bytes[i++] = 0xff; /* opcode */ 496 bytes[i++] = modrm; 497 498 return i; 499 } 500 501 static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) 502 { 503 u8 op = insn->opcode.bytes[0]; 504 int i = 0; 505 506 /* 507 * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional 508 * tail-calls. Deal with them. 509 */ 510 if (is_jcc32(insn)) { 511 bytes[i++] = op; 512 op = insn->opcode.bytes[1]; 513 goto clang_jcc; 514 } 515 516 if (insn->length == 6) 517 bytes[i++] = 0x2e; /* CS-prefix */ 518 519 switch (op) { 520 case CALL_INSN_OPCODE: 521 __text_gen_insn(bytes+i, op, addr+i, 522 __x86_indirect_call_thunk_array[reg], 523 CALL_INSN_SIZE); 524 i += CALL_INSN_SIZE; 525 break; 526 527 case JMP32_INSN_OPCODE: 528 clang_jcc: 529 __text_gen_insn(bytes+i, op, addr+i, 530 __x86_indirect_jump_thunk_array[reg], 531 JMP32_INSN_SIZE); 532 i += JMP32_INSN_SIZE; 533 break; 534 535 default: 536 WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr); 537 return -1; 538 } 539 540 WARN_ON_ONCE(i != insn->length); 541 542 return i; 543 } 544 545 /* 546 * Rewrite the compiler generated retpoline thunk calls. 547 * 548 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate 549 * indirect instructions, avoiding the extra indirection. 550 * 551 * For example, convert: 552 * 553 * CALL __x86_indirect_thunk_\reg 554 * 555 * into: 556 * 557 * CALL *%\reg 558 * 559 * It also tries to inline spectre_v2=retpoline,lfence when size permits. 560 */ 561 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) 562 { 563 retpoline_thunk_t *target; 564 int reg, ret, i = 0; 565 u8 op, cc; 566 567 target = addr + insn->length + insn->immediate.value; 568 reg = target - __x86_indirect_thunk_array; 569 570 if (WARN_ON_ONCE(reg & ~0xf)) 571 return -1; 572 573 /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */ 574 BUG_ON(reg == 4); 575 576 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) && 577 !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 578 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) 579 return emit_call_track_retpoline(addr, insn, reg, bytes); 580 581 return -1; 582 } 583 584 op = insn->opcode.bytes[0]; 585 586 /* 587 * Convert: 588 * 589 * Jcc.d32 __x86_indirect_thunk_\reg 590 * 591 * into: 592 * 593 * Jncc.d8 1f 594 * [ LFENCE ] 595 * JMP *%\reg 596 * [ NOP ] 597 * 1: 598 */ 599 if (is_jcc32(insn)) { 600 cc = insn->opcode.bytes[1] & 0xf; 601 cc ^= 1; /* invert condition */ 602 603 bytes[i++] = 0x70 + cc; /* Jcc.d8 */ 604 bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */ 605 606 /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */ 607 op = JMP32_INSN_OPCODE; 608 } 609 610 /* 611 * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE. 612 */ 613 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { 614 bytes[i++] = 0x0f; 615 bytes[i++] = 0xae; 616 bytes[i++] = 0xe8; /* LFENCE */ 617 } 618 619 ret = emit_indirect(op, reg, bytes + i); 620 if (ret < 0) 621 return ret; 622 i += ret; 623 624 /* 625 * The compiler is supposed to EMIT an INT3 after every unconditional 626 * JMP instruction due to AMD BTC. However, if the compiler is too old 627 * or SLS isn't enabled, we still need an INT3 after indirect JMPs 628 * even on Intel. 629 */ 630 if (op == JMP32_INSN_OPCODE && i < insn->length) 631 bytes[i++] = INT3_INSN_OPCODE; 632 633 for (; i < insn->length;) 634 bytes[i++] = BYTES_NOP1; 635 636 return i; 637 } 638 639 /* 640 * Generated by 'objtool --retpoline'. 641 */ 642 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) 643 { 644 s32 *s; 645 646 for (s = start; s < end; s++) { 647 void *addr = (void *)s + *s; 648 struct insn insn; 649 int len, ret; 650 u8 bytes[16]; 651 u8 op1, op2; 652 653 ret = insn_decode_kernel(&insn, addr); 654 if (WARN_ON_ONCE(ret < 0)) 655 continue; 656 657 op1 = insn.opcode.bytes[0]; 658 op2 = insn.opcode.bytes[1]; 659 660 switch (op1) { 661 case CALL_INSN_OPCODE: 662 case JMP32_INSN_OPCODE: 663 break; 664 665 case 0x0f: /* escape */ 666 if (op2 >= 0x80 && op2 <= 0x8f) 667 break; 668 fallthrough; 669 default: 670 WARN_ON_ONCE(1); 671 continue; 672 } 673 674 DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS", 675 addr, addr, insn.length, 676 addr + insn.length + insn.immediate.value); 677 678 len = patch_retpoline(addr, &insn, bytes); 679 if (len == insn.length) { 680 optimize_nops(bytes, len); 681 DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr); 682 DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr); 683 text_poke_early(addr, bytes, len); 684 } 685 } 686 } 687 688 #ifdef CONFIG_RETHUNK 689 690 #ifdef CONFIG_CALL_THUNKS 691 void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; 692 #endif 693 694 /* 695 * Rewrite the compiler generated return thunk tail-calls. 696 * 697 * For example, convert: 698 * 699 * JMP __x86_return_thunk 700 * 701 * into: 702 * 703 * RET 704 */ 705 static int patch_return(void *addr, struct insn *insn, u8 *bytes) 706 { 707 int i = 0; 708 709 /* Patch the custom return thunks... */ 710 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { 711 i = JMP32_INSN_SIZE; 712 __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); 713 } else { 714 /* ... or patch them out if not needed. */ 715 bytes[i++] = RET_INSN_OPCODE; 716 } 717 718 for (; i < insn->length;) 719 bytes[i++] = INT3_INSN_OPCODE; 720 return i; 721 } 722 723 void __init_or_module noinline apply_returns(s32 *start, s32 *end) 724 { 725 s32 *s; 726 727 /* 728 * Do not patch out the default return thunks if those needed are the 729 * ones generated by the compiler. 730 */ 731 if (cpu_feature_enabled(X86_FEATURE_RETHUNK) && 732 (x86_return_thunk == __x86_return_thunk)) 733 return; 734 735 for (s = start; s < end; s++) { 736 void *dest = NULL, *addr = (void *)s + *s; 737 struct insn insn; 738 int len, ret; 739 u8 bytes[16]; 740 u8 op; 741 742 ret = insn_decode_kernel(&insn, addr); 743 if (WARN_ON_ONCE(ret < 0)) 744 continue; 745 746 op = insn.opcode.bytes[0]; 747 if (op == JMP32_INSN_OPCODE) 748 dest = addr + insn.length + insn.immediate.value; 749 750 if (__static_call_fixup(addr, op, dest) || 751 WARN_ONCE(dest != &__x86_return_thunk, 752 "missing return thunk: %pS-%pS: %*ph", 753 addr, dest, 5, addr)) 754 continue; 755 756 DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS", 757 addr, addr, insn.length, 758 addr + insn.length + insn.immediate.value); 759 760 len = patch_return(addr, &insn, bytes); 761 if (len == insn.length) { 762 DUMP_BYTES(RET, ((u8*)addr), len, "%px: orig: ", addr); 763 DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr); 764 text_poke_early(addr, bytes, len); 765 } 766 } 767 } 768 #else 769 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } 770 #endif /* CONFIG_RETHUNK */ 771 772 #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */ 773 774 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } 775 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } 776 777 #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */ 778 779 #ifdef CONFIG_X86_KERNEL_IBT 780 781 static void poison_cfi(void *addr); 782 783 static void __init_or_module poison_endbr(void *addr, bool warn) 784 { 785 u32 endbr, poison = gen_endbr_poison(); 786 787 if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr))) 788 return; 789 790 if (!is_endbr(endbr)) { 791 WARN_ON_ONCE(warn); 792 return; 793 } 794 795 DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr); 796 797 /* 798 * When we have IBT, the lack of ENDBR will trigger #CP 799 */ 800 DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr); 801 DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr); 802 text_poke_early(addr, &poison, 4); 803 } 804 805 /* 806 * Generated by: objtool --ibt 807 * 808 * Seal the functions for indirect calls by clobbering the ENDBR instructions 809 * and the kCFI hash value. 810 */ 811 void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end) 812 { 813 s32 *s; 814 815 for (s = start; s < end; s++) { 816 void *addr = (void *)s + *s; 817 818 poison_endbr(addr, true); 819 if (IS_ENABLED(CONFIG_FINEIBT)) 820 poison_cfi(addr - 16); 821 } 822 } 823 824 #else 825 826 void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { } 827 828 #endif /* CONFIG_X86_KERNEL_IBT */ 829 830 #ifdef CONFIG_FINEIBT 831 832 enum cfi_mode { 833 CFI_DEFAULT, 834 CFI_OFF, 835 CFI_KCFI, 836 CFI_FINEIBT, 837 }; 838 839 static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT; 840 static bool cfi_rand __ro_after_init = true; 841 static u32 cfi_seed __ro_after_init; 842 843 /* 844 * Re-hash the CFI hash with a boot-time seed while making sure the result is 845 * not a valid ENDBR instruction. 846 */ 847 static u32 cfi_rehash(u32 hash) 848 { 849 hash ^= cfi_seed; 850 while (unlikely(is_endbr(hash) || is_endbr(-hash))) { 851 bool lsb = hash & 1; 852 hash >>= 1; 853 if (lsb) 854 hash ^= 0x80200003; 855 } 856 return hash; 857 } 858 859 static __init int cfi_parse_cmdline(char *str) 860 { 861 if (!str) 862 return -EINVAL; 863 864 while (str) { 865 char *next = strchr(str, ','); 866 if (next) { 867 *next = 0; 868 next++; 869 } 870 871 if (!strcmp(str, "auto")) { 872 cfi_mode = CFI_DEFAULT; 873 } else if (!strcmp(str, "off")) { 874 cfi_mode = CFI_OFF; 875 cfi_rand = false; 876 } else if (!strcmp(str, "kcfi")) { 877 cfi_mode = CFI_KCFI; 878 } else if (!strcmp(str, "fineibt")) { 879 cfi_mode = CFI_FINEIBT; 880 } else if (!strcmp(str, "norand")) { 881 cfi_rand = false; 882 } else { 883 pr_err("Ignoring unknown cfi option (%s).", str); 884 } 885 886 str = next; 887 } 888 889 return 0; 890 } 891 early_param("cfi", cfi_parse_cmdline); 892 893 /* 894 * kCFI FineIBT 895 * 896 * __cfi_\func: __cfi_\func: 897 * movl $0x12345678,%eax // 5 endbr64 // 4 898 * nop subl $0x12345678,%r10d // 7 899 * nop jz 1f // 2 900 * nop ud2 // 2 901 * nop 1: nop // 1 902 * nop 903 * nop 904 * nop 905 * nop 906 * nop 907 * nop 908 * nop 909 * 910 * 911 * caller: caller: 912 * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6 913 * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4 914 * je 1f // 2 nop4 // 4 915 * ud2 // 2 916 * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5 917 * 918 */ 919 920 asm( ".pushsection .rodata \n" 921 "fineibt_preamble_start: \n" 922 " endbr64 \n" 923 " subl $0x12345678, %r10d \n" 924 " je fineibt_preamble_end \n" 925 " ud2 \n" 926 " nop \n" 927 "fineibt_preamble_end: \n" 928 ".popsection\n" 929 ); 930 931 extern u8 fineibt_preamble_start[]; 932 extern u8 fineibt_preamble_end[]; 933 934 #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start) 935 #define fineibt_preamble_hash 7 936 937 asm( ".pushsection .rodata \n" 938 "fineibt_caller_start: \n" 939 " movl $0x12345678, %r10d \n" 940 " sub $16, %r11 \n" 941 ASM_NOP4 942 "fineibt_caller_end: \n" 943 ".popsection \n" 944 ); 945 946 extern u8 fineibt_caller_start[]; 947 extern u8 fineibt_caller_end[]; 948 949 #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start) 950 #define fineibt_caller_hash 2 951 952 #define fineibt_caller_jmp (fineibt_caller_size - 2) 953 954 static u32 decode_preamble_hash(void *addr) 955 { 956 u8 *p = addr; 957 958 /* b8 78 56 34 12 mov $0x12345678,%eax */ 959 if (p[0] == 0xb8) 960 return *(u32 *)(addr + 1); 961 962 return 0; /* invalid hash value */ 963 } 964 965 static u32 decode_caller_hash(void *addr) 966 { 967 u8 *p = addr; 968 969 /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */ 970 if (p[0] == 0x41 && p[1] == 0xba) 971 return -*(u32 *)(addr + 2); 972 973 /* e8 0c 78 56 34 12 jmp.d8 +12 */ 974 if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp) 975 return -*(u32 *)(addr + 2); 976 977 return 0; /* invalid hash value */ 978 } 979 980 /* .retpoline_sites */ 981 static int cfi_disable_callers(s32 *start, s32 *end) 982 { 983 /* 984 * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate 985 * in tact for later usage. Also see decode_caller_hash() and 986 * cfi_rewrite_callers(). 987 */ 988 const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp }; 989 s32 *s; 990 991 for (s = start; s < end; s++) { 992 void *addr = (void *)s + *s; 993 u32 hash; 994 995 addr -= fineibt_caller_size; 996 hash = decode_caller_hash(addr); 997 if (!hash) /* nocfi callers */ 998 continue; 999 1000 text_poke_early(addr, jmp, 2); 1001 } 1002 1003 return 0; 1004 } 1005 1006 static int cfi_enable_callers(s32 *start, s32 *end) 1007 { 1008 /* 1009 * Re-enable kCFI, undo what cfi_disable_callers() did. 1010 */ 1011 const u8 mov[] = { 0x41, 0xba }; 1012 s32 *s; 1013 1014 for (s = start; s < end; s++) { 1015 void *addr = (void *)s + *s; 1016 u32 hash; 1017 1018 addr -= fineibt_caller_size; 1019 hash = decode_caller_hash(addr); 1020 if (!hash) /* nocfi callers */ 1021 continue; 1022 1023 text_poke_early(addr, mov, 2); 1024 } 1025 1026 return 0; 1027 } 1028 1029 /* .cfi_sites */ 1030 static int cfi_rand_preamble(s32 *start, s32 *end) 1031 { 1032 s32 *s; 1033 1034 for (s = start; s < end; s++) { 1035 void *addr = (void *)s + *s; 1036 u32 hash; 1037 1038 hash = decode_preamble_hash(addr); 1039 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", 1040 addr, addr, 5, addr)) 1041 return -EINVAL; 1042 1043 hash = cfi_rehash(hash); 1044 text_poke_early(addr + 1, &hash, 4); 1045 } 1046 1047 return 0; 1048 } 1049 1050 static int cfi_rewrite_preamble(s32 *start, s32 *end) 1051 { 1052 s32 *s; 1053 1054 for (s = start; s < end; s++) { 1055 void *addr = (void *)s + *s; 1056 u32 hash; 1057 1058 hash = decode_preamble_hash(addr); 1059 if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n", 1060 addr, addr, 5, addr)) 1061 return -EINVAL; 1062 1063 text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size); 1064 WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678); 1065 text_poke_early(addr + fineibt_preamble_hash, &hash, 4); 1066 } 1067 1068 return 0; 1069 } 1070 1071 static void cfi_rewrite_endbr(s32 *start, s32 *end) 1072 { 1073 s32 *s; 1074 1075 for (s = start; s < end; s++) { 1076 void *addr = (void *)s + *s; 1077 1078 poison_endbr(addr+16, false); 1079 } 1080 } 1081 1082 /* .retpoline_sites */ 1083 static int cfi_rand_callers(s32 *start, s32 *end) 1084 { 1085 s32 *s; 1086 1087 for (s = start; s < end; s++) { 1088 void *addr = (void *)s + *s; 1089 u32 hash; 1090 1091 addr -= fineibt_caller_size; 1092 hash = decode_caller_hash(addr); 1093 if (hash) { 1094 hash = -cfi_rehash(hash); 1095 text_poke_early(addr + 2, &hash, 4); 1096 } 1097 } 1098 1099 return 0; 1100 } 1101 1102 static int cfi_rewrite_callers(s32 *start, s32 *end) 1103 { 1104 s32 *s; 1105 1106 for (s = start; s < end; s++) { 1107 void *addr = (void *)s + *s; 1108 u32 hash; 1109 1110 addr -= fineibt_caller_size; 1111 hash = decode_caller_hash(addr); 1112 if (hash) { 1113 text_poke_early(addr, fineibt_caller_start, fineibt_caller_size); 1114 WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678); 1115 text_poke_early(addr + fineibt_caller_hash, &hash, 4); 1116 } 1117 /* rely on apply_retpolines() */ 1118 } 1119 1120 return 0; 1121 } 1122 1123 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, 1124 s32 *start_cfi, s32 *end_cfi, bool builtin) 1125 { 1126 int ret; 1127 1128 if (WARN_ONCE(fineibt_preamble_size != 16, 1129 "FineIBT preamble wrong size: %ld", fineibt_preamble_size)) 1130 return; 1131 1132 if (cfi_mode == CFI_DEFAULT) { 1133 cfi_mode = CFI_KCFI; 1134 if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) 1135 cfi_mode = CFI_FINEIBT; 1136 } 1137 1138 /* 1139 * Rewrite the callers to not use the __cfi_ stubs, such that we might 1140 * rewrite them. This disables all CFI. If this succeeds but any of the 1141 * later stages fails, we're without CFI. 1142 */ 1143 ret = cfi_disable_callers(start_retpoline, end_retpoline); 1144 if (ret) 1145 goto err; 1146 1147 if (cfi_rand) { 1148 if (builtin) 1149 cfi_seed = get_random_u32(); 1150 1151 ret = cfi_rand_preamble(start_cfi, end_cfi); 1152 if (ret) 1153 goto err; 1154 1155 ret = cfi_rand_callers(start_retpoline, end_retpoline); 1156 if (ret) 1157 goto err; 1158 } 1159 1160 switch (cfi_mode) { 1161 case CFI_OFF: 1162 if (builtin) 1163 pr_info("Disabling CFI\n"); 1164 return; 1165 1166 case CFI_KCFI: 1167 ret = cfi_enable_callers(start_retpoline, end_retpoline); 1168 if (ret) 1169 goto err; 1170 1171 if (builtin) 1172 pr_info("Using kCFI\n"); 1173 return; 1174 1175 case CFI_FINEIBT: 1176 /* place the FineIBT preamble at func()-16 */ 1177 ret = cfi_rewrite_preamble(start_cfi, end_cfi); 1178 if (ret) 1179 goto err; 1180 1181 /* rewrite the callers to target func()-16 */ 1182 ret = cfi_rewrite_callers(start_retpoline, end_retpoline); 1183 if (ret) 1184 goto err; 1185 1186 /* now that nobody targets func()+0, remove ENDBR there */ 1187 cfi_rewrite_endbr(start_cfi, end_cfi); 1188 1189 if (builtin) 1190 pr_info("Using FineIBT CFI\n"); 1191 return; 1192 1193 default: 1194 break; 1195 } 1196 1197 err: 1198 pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n"); 1199 } 1200 1201 static inline void poison_hash(void *addr) 1202 { 1203 *(u32 *)addr = 0; 1204 } 1205 1206 static void poison_cfi(void *addr) 1207 { 1208 switch (cfi_mode) { 1209 case CFI_FINEIBT: 1210 /* 1211 * __cfi_\func: 1212 * osp nopl (%rax) 1213 * subl $0, %r10d 1214 * jz 1f 1215 * ud2 1216 * 1: nop 1217 */ 1218 poison_endbr(addr, false); 1219 poison_hash(addr + fineibt_preamble_hash); 1220 break; 1221 1222 case CFI_KCFI: 1223 /* 1224 * __cfi_\func: 1225 * movl $0, %eax 1226 * .skip 11, 0x90 1227 */ 1228 poison_hash(addr + 1); 1229 break; 1230 1231 default: 1232 break; 1233 } 1234 } 1235 1236 #else 1237 1238 static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, 1239 s32 *start_cfi, s32 *end_cfi, bool builtin) 1240 { 1241 } 1242 1243 #ifdef CONFIG_X86_KERNEL_IBT 1244 static void poison_cfi(void *addr) { } 1245 #endif 1246 1247 #endif 1248 1249 void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, 1250 s32 *start_cfi, s32 *end_cfi) 1251 { 1252 return __apply_fineibt(start_retpoline, end_retpoline, 1253 start_cfi, end_cfi, 1254 /* .builtin = */ false); 1255 } 1256 1257 #ifdef CONFIG_SMP 1258 static void alternatives_smp_lock(const s32 *start, const s32 *end, 1259 u8 *text, u8 *text_end) 1260 { 1261 const s32 *poff; 1262 1263 for (poff = start; poff < end; poff++) { 1264 u8 *ptr = (u8 *)poff + *poff; 1265 1266 if (!*poff || ptr < text || ptr >= text_end) 1267 continue; 1268 /* turn DS segment override prefix into lock prefix */ 1269 if (*ptr == 0x3e) 1270 text_poke(ptr, ((unsigned char []){0xf0}), 1); 1271 } 1272 } 1273 1274 static void alternatives_smp_unlock(const s32 *start, const s32 *end, 1275 u8 *text, u8 *text_end) 1276 { 1277 const s32 *poff; 1278 1279 for (poff = start; poff < end; poff++) { 1280 u8 *ptr = (u8 *)poff + *poff; 1281 1282 if (!*poff || ptr < text || ptr >= text_end) 1283 continue; 1284 /* turn lock prefix into DS segment override prefix */ 1285 if (*ptr == 0xf0) 1286 text_poke(ptr, ((unsigned char []){0x3E}), 1); 1287 } 1288 } 1289 1290 struct smp_alt_module { 1291 /* what is this ??? */ 1292 struct module *mod; 1293 char *name; 1294 1295 /* ptrs to lock prefixes */ 1296 const s32 *locks; 1297 const s32 *locks_end; 1298 1299 /* .text segment, needed to avoid patching init code ;) */ 1300 u8 *text; 1301 u8 *text_end; 1302 1303 struct list_head next; 1304 }; 1305 static LIST_HEAD(smp_alt_modules); 1306 static bool uniproc_patched = false; /* protected by text_mutex */ 1307 1308 void __init_or_module alternatives_smp_module_add(struct module *mod, 1309 char *name, 1310 void *locks, void *locks_end, 1311 void *text, void *text_end) 1312 { 1313 struct smp_alt_module *smp; 1314 1315 mutex_lock(&text_mutex); 1316 if (!uniproc_patched) 1317 goto unlock; 1318 1319 if (num_possible_cpus() == 1) 1320 /* Don't bother remembering, we'll never have to undo it. */ 1321 goto smp_unlock; 1322 1323 smp = kzalloc(sizeof(*smp), GFP_KERNEL); 1324 if (NULL == smp) 1325 /* we'll run the (safe but slow) SMP code then ... */ 1326 goto unlock; 1327 1328 smp->mod = mod; 1329 smp->name = name; 1330 smp->locks = locks; 1331 smp->locks_end = locks_end; 1332 smp->text = text; 1333 smp->text_end = text_end; 1334 DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n", 1335 smp->locks, smp->locks_end, 1336 smp->text, smp->text_end, smp->name); 1337 1338 list_add_tail(&smp->next, &smp_alt_modules); 1339 smp_unlock: 1340 alternatives_smp_unlock(locks, locks_end, text, text_end); 1341 unlock: 1342 mutex_unlock(&text_mutex); 1343 } 1344 1345 void __init_or_module alternatives_smp_module_del(struct module *mod) 1346 { 1347 struct smp_alt_module *item; 1348 1349 mutex_lock(&text_mutex); 1350 list_for_each_entry(item, &smp_alt_modules, next) { 1351 if (mod != item->mod) 1352 continue; 1353 list_del(&item->next); 1354 kfree(item); 1355 break; 1356 } 1357 mutex_unlock(&text_mutex); 1358 } 1359 1360 void alternatives_enable_smp(void) 1361 { 1362 struct smp_alt_module *mod; 1363 1364 /* Why bother if there are no other CPUs? */ 1365 BUG_ON(num_possible_cpus() == 1); 1366 1367 mutex_lock(&text_mutex); 1368 1369 if (uniproc_patched) { 1370 pr_info("switching to SMP code\n"); 1371 BUG_ON(num_online_cpus() != 1); 1372 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 1373 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 1374 list_for_each_entry(mod, &smp_alt_modules, next) 1375 alternatives_smp_lock(mod->locks, mod->locks_end, 1376 mod->text, mod->text_end); 1377 uniproc_patched = false; 1378 } 1379 mutex_unlock(&text_mutex); 1380 } 1381 1382 /* 1383 * Return 1 if the address range is reserved for SMP-alternatives. 1384 * Must hold text_mutex. 1385 */ 1386 int alternatives_text_reserved(void *start, void *end) 1387 { 1388 struct smp_alt_module *mod; 1389 const s32 *poff; 1390 u8 *text_start = start; 1391 u8 *text_end = end; 1392 1393 lockdep_assert_held(&text_mutex); 1394 1395 list_for_each_entry(mod, &smp_alt_modules, next) { 1396 if (mod->text > text_end || mod->text_end < text_start) 1397 continue; 1398 for (poff = mod->locks; poff < mod->locks_end; poff++) { 1399 const u8 *ptr = (const u8 *)poff + *poff; 1400 1401 if (text_start <= ptr && text_end > ptr) 1402 return 1; 1403 } 1404 } 1405 1406 return 0; 1407 } 1408 #endif /* CONFIG_SMP */ 1409 1410 #ifdef CONFIG_PARAVIRT 1411 1412 /* Use this to add nops to a buffer, then text_poke the whole buffer. */ 1413 static void __init_or_module add_nops(void *insns, unsigned int len) 1414 { 1415 while (len > 0) { 1416 unsigned int noplen = len; 1417 if (noplen > ASM_NOP_MAX) 1418 noplen = ASM_NOP_MAX; 1419 memcpy(insns, x86_nops[noplen], noplen); 1420 insns += noplen; 1421 len -= noplen; 1422 } 1423 } 1424 1425 void __init_or_module apply_paravirt(struct paravirt_patch_site *start, 1426 struct paravirt_patch_site *end) 1427 { 1428 struct paravirt_patch_site *p; 1429 char insn_buff[MAX_PATCH_LEN]; 1430 1431 for (p = start; p < end; p++) { 1432 unsigned int used; 1433 1434 BUG_ON(p->len > MAX_PATCH_LEN); 1435 /* prep the buffer with the original instructions */ 1436 memcpy(insn_buff, p->instr, p->len); 1437 used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len); 1438 1439 BUG_ON(used > p->len); 1440 1441 /* Pad the rest with nops */ 1442 add_nops(insn_buff + used, p->len - used); 1443 text_poke_early(p->instr, insn_buff, p->len); 1444 } 1445 } 1446 extern struct paravirt_patch_site __start_parainstructions[], 1447 __stop_parainstructions[]; 1448 #endif /* CONFIG_PARAVIRT */ 1449 1450 /* 1451 * Self-test for the INT3 based CALL emulation code. 1452 * 1453 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up 1454 * properly and that there is a stack gap between the INT3 frame and the 1455 * previous context. Without this gap doing a virtual PUSH on the interrupted 1456 * stack would corrupt the INT3 IRET frame. 1457 * 1458 * See entry_{32,64}.S for more details. 1459 */ 1460 1461 /* 1462 * We define the int3_magic() function in assembly to control the calling 1463 * convention such that we can 'call' it from assembly. 1464 */ 1465 1466 extern void int3_magic(unsigned int *ptr); /* defined in asm */ 1467 1468 asm ( 1469 " .pushsection .init.text, \"ax\", @progbits\n" 1470 " .type int3_magic, @function\n" 1471 "int3_magic:\n" 1472 ANNOTATE_NOENDBR 1473 " movl $1, (%" _ASM_ARG1 ")\n" 1474 ASM_RET 1475 " .size int3_magic, .-int3_magic\n" 1476 " .popsection\n" 1477 ); 1478 1479 extern void int3_selftest_ip(void); /* defined in asm below */ 1480 1481 static int __init 1482 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data) 1483 { 1484 unsigned long selftest = (unsigned long)&int3_selftest_ip; 1485 struct die_args *args = data; 1486 struct pt_regs *regs = args->regs; 1487 1488 OPTIMIZER_HIDE_VAR(selftest); 1489 1490 if (!regs || user_mode(regs)) 1491 return NOTIFY_DONE; 1492 1493 if (val != DIE_INT3) 1494 return NOTIFY_DONE; 1495 1496 if (regs->ip - INT3_INSN_SIZE != selftest) 1497 return NOTIFY_DONE; 1498 1499 int3_emulate_call(regs, (unsigned long)&int3_magic); 1500 return NOTIFY_STOP; 1501 } 1502 1503 /* Must be noinline to ensure uniqueness of int3_selftest_ip. */ 1504 static noinline void __init int3_selftest(void) 1505 { 1506 static __initdata struct notifier_block int3_exception_nb = { 1507 .notifier_call = int3_exception_notify, 1508 .priority = INT_MAX-1, /* last */ 1509 }; 1510 unsigned int val = 0; 1511 1512 BUG_ON(register_die_notifier(&int3_exception_nb)); 1513 1514 /* 1515 * Basically: int3_magic(&val); but really complicated :-) 1516 * 1517 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb 1518 * notifier above will emulate CALL for us. 1519 */ 1520 asm volatile ("int3_selftest_ip:\n\t" 1521 ANNOTATE_NOENDBR 1522 " int3; nop; nop; nop; nop\n\t" 1523 : ASM_CALL_CONSTRAINT 1524 : __ASM_SEL_RAW(a, D) (&val) 1525 : "memory"); 1526 1527 BUG_ON(val != 1); 1528 1529 unregister_die_notifier(&int3_exception_nb); 1530 } 1531 1532 static __initdata int __alt_reloc_selftest_addr; 1533 1534 __visible noinline void __init __alt_reloc_selftest(void *arg) 1535 { 1536 WARN_ON(arg != &__alt_reloc_selftest_addr); 1537 } 1538 1539 static noinline void __init alt_reloc_selftest(void) 1540 { 1541 /* 1542 * Tests apply_relocation(). 1543 * 1544 * This has a relative immediate (CALL) in a place other than the first 1545 * instruction and additionally on x86_64 we get a RIP-relative LEA: 1546 * 1547 * lea 0x0(%rip),%rdi # 5d0: R_X86_64_PC32 .init.data+0x5566c 1548 * call +0 # 5d5: R_X86_64_PLT32 __alt_reloc_selftest-0x4 1549 * 1550 * Getting this wrong will either crash and burn or tickle the WARN 1551 * above. 1552 */ 1553 asm_inline volatile ( 1554 ALTERNATIVE("", "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;", X86_FEATURE_ALWAYS) 1555 : /* output */ 1556 : [mem] "m" (__alt_reloc_selftest_addr) 1557 : _ASM_ARG1 1558 ); 1559 } 1560 1561 void __init alternative_instructions(void) 1562 { 1563 int3_selftest(); 1564 1565 /* 1566 * The patching is not fully atomic, so try to avoid local 1567 * interruptions that might execute the to be patched code. 1568 * Other CPUs are not running. 1569 */ 1570 stop_nmi(); 1571 1572 /* 1573 * Don't stop machine check exceptions while patching. 1574 * MCEs only happen when something got corrupted and in this 1575 * case we must do something about the corruption. 1576 * Ignoring it is worse than an unlikely patching race. 1577 * Also machine checks tend to be broadcast and if one CPU 1578 * goes into machine check the others follow quickly, so we don't 1579 * expect a machine check to cause undue problems during to code 1580 * patching. 1581 */ 1582 1583 /* 1584 * Paravirt patching and alternative patching can be combined to 1585 * replace a function call with a short direct code sequence (e.g. 1586 * by setting a constant return value instead of doing that in an 1587 * external function). 1588 * In order to make this work the following sequence is required: 1589 * 1. set (artificial) features depending on used paravirt 1590 * functions which can later influence alternative patching 1591 * 2. apply paravirt patching (generally replacing an indirect 1592 * function call with a direct one) 1593 * 3. apply alternative patching (e.g. replacing a direct function 1594 * call with a custom code sequence) 1595 * Doing paravirt patching after alternative patching would clobber 1596 * the optimization of the custom code with a function call again. 1597 */ 1598 paravirt_set_cap(); 1599 1600 /* 1601 * First patch paravirt functions, such that we overwrite the indirect 1602 * call with the direct call. 1603 */ 1604 apply_paravirt(__parainstructions, __parainstructions_end); 1605 1606 __apply_fineibt(__retpoline_sites, __retpoline_sites_end, 1607 __cfi_sites, __cfi_sites_end, true); 1608 1609 /* 1610 * Rewrite the retpolines, must be done before alternatives since 1611 * those can rewrite the retpoline thunks. 1612 */ 1613 apply_retpolines(__retpoline_sites, __retpoline_sites_end); 1614 apply_returns(__return_sites, __return_sites_end); 1615 1616 /* 1617 * Then patch alternatives, such that those paravirt calls that are in 1618 * alternatives can be overwritten by their immediate fragments. 1619 */ 1620 apply_alternatives(__alt_instructions, __alt_instructions_end); 1621 1622 /* 1623 * Now all calls are established. Apply the call thunks if 1624 * required. 1625 */ 1626 callthunks_patch_builtin_calls(); 1627 1628 /* 1629 * Seal all functions that do not have their address taken. 1630 */ 1631 apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end); 1632 1633 #ifdef CONFIG_SMP 1634 /* Patch to UP if other cpus not imminent. */ 1635 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { 1636 uniproc_patched = true; 1637 alternatives_smp_module_add(NULL, "core kernel", 1638 __smp_locks, __smp_locks_end, 1639 _text, _etext); 1640 } 1641 1642 if (!uniproc_patched || num_possible_cpus() == 1) { 1643 free_init_pages("SMP alternatives", 1644 (unsigned long)__smp_locks, 1645 (unsigned long)__smp_locks_end); 1646 } 1647 #endif 1648 1649 restart_nmi(); 1650 alternatives_patched = 1; 1651 1652 alt_reloc_selftest(); 1653 } 1654 1655 /** 1656 * text_poke_early - Update instructions on a live kernel at boot time 1657 * @addr: address to modify 1658 * @opcode: source of the copy 1659 * @len: length to copy 1660 * 1661 * When you use this code to patch more than one byte of an instruction 1662 * you need to make sure that other CPUs cannot execute this code in parallel. 1663 * Also no thread must be currently preempted in the middle of these 1664 * instructions. And on the local CPU you need to be protected against NMI or 1665 * MCE handlers seeing an inconsistent instruction while you patch. 1666 */ 1667 void __init_or_module text_poke_early(void *addr, const void *opcode, 1668 size_t len) 1669 { 1670 unsigned long flags; 1671 1672 if (boot_cpu_has(X86_FEATURE_NX) && 1673 is_module_text_address((unsigned long)addr)) { 1674 /* 1675 * Modules text is marked initially as non-executable, so the 1676 * code cannot be running and speculative code-fetches are 1677 * prevented. Just change the code. 1678 */ 1679 memcpy(addr, opcode, len); 1680 } else { 1681 local_irq_save(flags); 1682 memcpy(addr, opcode, len); 1683 local_irq_restore(flags); 1684 sync_core(); 1685 1686 /* 1687 * Could also do a CLFLUSH here to speed up CPU recovery; but 1688 * that causes hangs on some VIA CPUs. 1689 */ 1690 } 1691 } 1692 1693 typedef struct { 1694 struct mm_struct *mm; 1695 } temp_mm_state_t; 1696 1697 /* 1698 * Using a temporary mm allows to set temporary mappings that are not accessible 1699 * by other CPUs. Such mappings are needed to perform sensitive memory writes 1700 * that override the kernel memory protections (e.g., W^X), without exposing the 1701 * temporary page-table mappings that are required for these write operations to 1702 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the 1703 * mapping is torn down. 1704 * 1705 * Context: The temporary mm needs to be used exclusively by a single core. To 1706 * harden security IRQs must be disabled while the temporary mm is 1707 * loaded, thereby preventing interrupt handler bugs from overriding 1708 * the kernel memory protection. 1709 */ 1710 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) 1711 { 1712 temp_mm_state_t temp_state; 1713 1714 lockdep_assert_irqs_disabled(); 1715 1716 /* 1717 * Make sure not to be in TLB lazy mode, as otherwise we'll end up 1718 * with a stale address space WITHOUT being in lazy mode after 1719 * restoring the previous mm. 1720 */ 1721 if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) 1722 leave_mm(smp_processor_id()); 1723 1724 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); 1725 switch_mm_irqs_off(NULL, mm, current); 1726 1727 /* 1728 * If breakpoints are enabled, disable them while the temporary mm is 1729 * used. Userspace might set up watchpoints on addresses that are used 1730 * in the temporary mm, which would lead to wrong signals being sent or 1731 * crashes. 1732 * 1733 * Note that breakpoints are not disabled selectively, which also causes 1734 * kernel breakpoints (e.g., perf's) to be disabled. This might be 1735 * undesirable, but still seems reasonable as the code that runs in the 1736 * temporary mm should be short. 1737 */ 1738 if (hw_breakpoint_active()) 1739 hw_breakpoint_disable(); 1740 1741 return temp_state; 1742 } 1743 1744 static inline void unuse_temporary_mm(temp_mm_state_t prev_state) 1745 { 1746 lockdep_assert_irqs_disabled(); 1747 switch_mm_irqs_off(NULL, prev_state.mm, current); 1748 1749 /* 1750 * Restore the breakpoints if they were disabled before the temporary mm 1751 * was loaded. 1752 */ 1753 if (hw_breakpoint_active()) 1754 hw_breakpoint_restore(); 1755 } 1756 1757 __ro_after_init struct mm_struct *poking_mm; 1758 __ro_after_init unsigned long poking_addr; 1759 1760 static void text_poke_memcpy(void *dst, const void *src, size_t len) 1761 { 1762 memcpy(dst, src, len); 1763 } 1764 1765 static void text_poke_memset(void *dst, const void *src, size_t len) 1766 { 1767 int c = *(const int *)src; 1768 1769 memset(dst, c, len); 1770 } 1771 1772 typedef void text_poke_f(void *dst, const void *src, size_t len); 1773 1774 static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len) 1775 { 1776 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE; 1777 struct page *pages[2] = {NULL}; 1778 temp_mm_state_t prev; 1779 unsigned long flags; 1780 pte_t pte, *ptep; 1781 spinlock_t *ptl; 1782 pgprot_t pgprot; 1783 1784 /* 1785 * While boot memory allocator is running we cannot use struct pages as 1786 * they are not yet initialized. There is no way to recover. 1787 */ 1788 BUG_ON(!after_bootmem); 1789 1790 if (!core_kernel_text((unsigned long)addr)) { 1791 pages[0] = vmalloc_to_page(addr); 1792 if (cross_page_boundary) 1793 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 1794 } else { 1795 pages[0] = virt_to_page(addr); 1796 WARN_ON(!PageReserved(pages[0])); 1797 if (cross_page_boundary) 1798 pages[1] = virt_to_page(addr + PAGE_SIZE); 1799 } 1800 /* 1801 * If something went wrong, crash and burn since recovery paths are not 1802 * implemented. 1803 */ 1804 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1])); 1805 1806 /* 1807 * Map the page without the global bit, as TLB flushing is done with 1808 * flush_tlb_mm_range(), which is intended for non-global PTEs. 1809 */ 1810 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL); 1811 1812 /* 1813 * The lock is not really needed, but this allows to avoid open-coding. 1814 */ 1815 ptep = get_locked_pte(poking_mm, poking_addr, &ptl); 1816 1817 /* 1818 * This must not fail; preallocated in poking_init(). 1819 */ 1820 VM_BUG_ON(!ptep); 1821 1822 local_irq_save(flags); 1823 1824 pte = mk_pte(pages[0], pgprot); 1825 set_pte_at(poking_mm, poking_addr, ptep, pte); 1826 1827 if (cross_page_boundary) { 1828 pte = mk_pte(pages[1], pgprot); 1829 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); 1830 } 1831 1832 /* 1833 * Loading the temporary mm behaves as a compiler barrier, which 1834 * guarantees that the PTE will be set at the time memcpy() is done. 1835 */ 1836 prev = use_temporary_mm(poking_mm); 1837 1838 kasan_disable_current(); 1839 func((u8 *)poking_addr + offset_in_page(addr), src, len); 1840 kasan_enable_current(); 1841 1842 /* 1843 * Ensure that the PTE is only cleared after the instructions of memcpy 1844 * were issued by using a compiler barrier. 1845 */ 1846 barrier(); 1847 1848 pte_clear(poking_mm, poking_addr, ptep); 1849 if (cross_page_boundary) 1850 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1); 1851 1852 /* 1853 * Loading the previous page-table hierarchy requires a serializing 1854 * instruction that already allows the core to see the updated version. 1855 * Xen-PV is assumed to serialize execution in a similar manner. 1856 */ 1857 unuse_temporary_mm(prev); 1858 1859 /* 1860 * Flushing the TLB might involve IPIs, which would require enabled 1861 * IRQs, but not if the mm is not used, as it is in this point. 1862 */ 1863 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr + 1864 (cross_page_boundary ? 2 : 1) * PAGE_SIZE, 1865 PAGE_SHIFT, false); 1866 1867 if (func == text_poke_memcpy) { 1868 /* 1869 * If the text does not match what we just wrote then something is 1870 * fundamentally screwy; there's nothing we can really do about that. 1871 */ 1872 BUG_ON(memcmp(addr, src, len)); 1873 } 1874 1875 local_irq_restore(flags); 1876 pte_unmap_unlock(ptep, ptl); 1877 return addr; 1878 } 1879 1880 /** 1881 * text_poke - Update instructions on a live kernel 1882 * @addr: address to modify 1883 * @opcode: source of the copy 1884 * @len: length to copy 1885 * 1886 * Only atomic text poke/set should be allowed when not doing early patching. 1887 * It means the size must be writable atomically and the address must be aligned 1888 * in a way that permits an atomic write. It also makes sure we fit on a single 1889 * page. 1890 * 1891 * Note that the caller must ensure that if the modified code is part of a 1892 * module, the module would not be removed during poking. This can be achieved 1893 * by registering a module notifier, and ordering module removal and patching 1894 * trough a mutex. 1895 */ 1896 void *text_poke(void *addr, const void *opcode, size_t len) 1897 { 1898 lockdep_assert_held(&text_mutex); 1899 1900 return __text_poke(text_poke_memcpy, addr, opcode, len); 1901 } 1902 1903 /** 1904 * text_poke_kgdb - Update instructions on a live kernel by kgdb 1905 * @addr: address to modify 1906 * @opcode: source of the copy 1907 * @len: length to copy 1908 * 1909 * Only atomic text poke/set should be allowed when not doing early patching. 1910 * It means the size must be writable atomically and the address must be aligned 1911 * in a way that permits an atomic write. It also makes sure we fit on a single 1912 * page. 1913 * 1914 * Context: should only be used by kgdb, which ensures no other core is running, 1915 * despite the fact it does not hold the text_mutex. 1916 */ 1917 void *text_poke_kgdb(void *addr, const void *opcode, size_t len) 1918 { 1919 return __text_poke(text_poke_memcpy, addr, opcode, len); 1920 } 1921 1922 void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, 1923 bool core_ok) 1924 { 1925 unsigned long start = (unsigned long)addr; 1926 size_t patched = 0; 1927 1928 if (WARN_ON_ONCE(!core_ok && core_kernel_text(start))) 1929 return NULL; 1930 1931 while (patched < len) { 1932 unsigned long ptr = start + patched; 1933 size_t s; 1934 1935 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched); 1936 1937 __text_poke(text_poke_memcpy, (void *)ptr, opcode + patched, s); 1938 patched += s; 1939 } 1940 return addr; 1941 } 1942 1943 /** 1944 * text_poke_copy - Copy instructions into (an unused part of) RX memory 1945 * @addr: address to modify 1946 * @opcode: source of the copy 1947 * @len: length to copy, could be more than 2x PAGE_SIZE 1948 * 1949 * Not safe against concurrent execution; useful for JITs to dump 1950 * new code blocks into unused regions of RX memory. Can be used in 1951 * conjunction with synchronize_rcu_tasks() to wait for existing 1952 * execution to quiesce after having made sure no existing functions 1953 * pointers are live. 1954 */ 1955 void *text_poke_copy(void *addr, const void *opcode, size_t len) 1956 { 1957 mutex_lock(&text_mutex); 1958 addr = text_poke_copy_locked(addr, opcode, len, false); 1959 mutex_unlock(&text_mutex); 1960 return addr; 1961 } 1962 1963 /** 1964 * text_poke_set - memset into (an unused part of) RX memory 1965 * @addr: address to modify 1966 * @c: the byte to fill the area with 1967 * @len: length to copy, could be more than 2x PAGE_SIZE 1968 * 1969 * This is useful to overwrite unused regions of RX memory with illegal 1970 * instructions. 1971 */ 1972 void *text_poke_set(void *addr, int c, size_t len) 1973 { 1974 unsigned long start = (unsigned long)addr; 1975 size_t patched = 0; 1976 1977 if (WARN_ON_ONCE(core_kernel_text(start))) 1978 return NULL; 1979 1980 mutex_lock(&text_mutex); 1981 while (patched < len) { 1982 unsigned long ptr = start + patched; 1983 size_t s; 1984 1985 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched); 1986 1987 __text_poke(text_poke_memset, (void *)ptr, (void *)&c, s); 1988 patched += s; 1989 } 1990 mutex_unlock(&text_mutex); 1991 return addr; 1992 } 1993 1994 static void do_sync_core(void *info) 1995 { 1996 sync_core(); 1997 } 1998 1999 void text_poke_sync(void) 2000 { 2001 on_each_cpu(do_sync_core, NULL, 1); 2002 } 2003 2004 /* 2005 * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of 2006 * this thing. When len == 6 everything is prefixed with 0x0f and we map 2007 * opcode to Jcc.d8, using len to distinguish. 2008 */ 2009 struct text_poke_loc { 2010 /* addr := _stext + rel_addr */ 2011 s32 rel_addr; 2012 s32 disp; 2013 u8 len; 2014 u8 opcode; 2015 const u8 text[POKE_MAX_OPCODE_SIZE]; 2016 /* see text_poke_bp_batch() */ 2017 u8 old; 2018 }; 2019 2020 struct bp_patching_desc { 2021 struct text_poke_loc *vec; 2022 int nr_entries; 2023 atomic_t refs; 2024 }; 2025 2026 static struct bp_patching_desc bp_desc; 2027 2028 static __always_inline 2029 struct bp_patching_desc *try_get_desc(void) 2030 { 2031 struct bp_patching_desc *desc = &bp_desc; 2032 2033 if (!raw_atomic_inc_not_zero(&desc->refs)) 2034 return NULL; 2035 2036 return desc; 2037 } 2038 2039 static __always_inline void put_desc(void) 2040 { 2041 struct bp_patching_desc *desc = &bp_desc; 2042 2043 smp_mb__before_atomic(); 2044 raw_atomic_dec(&desc->refs); 2045 } 2046 2047 static __always_inline void *text_poke_addr(struct text_poke_loc *tp) 2048 { 2049 return _stext + tp->rel_addr; 2050 } 2051 2052 static __always_inline int patch_cmp(const void *key, const void *elt) 2053 { 2054 struct text_poke_loc *tp = (struct text_poke_loc *) elt; 2055 2056 if (key < text_poke_addr(tp)) 2057 return -1; 2058 if (key > text_poke_addr(tp)) 2059 return 1; 2060 return 0; 2061 } 2062 2063 noinstr int poke_int3_handler(struct pt_regs *regs) 2064 { 2065 struct bp_patching_desc *desc; 2066 struct text_poke_loc *tp; 2067 int ret = 0; 2068 void *ip; 2069 2070 if (user_mode(regs)) 2071 return 0; 2072 2073 /* 2074 * Having observed our INT3 instruction, we now must observe 2075 * bp_desc with non-zero refcount: 2076 * 2077 * bp_desc.refs = 1 INT3 2078 * WMB RMB 2079 * write INT3 if (bp_desc.refs != 0) 2080 */ 2081 smp_rmb(); 2082 2083 desc = try_get_desc(); 2084 if (!desc) 2085 return 0; 2086 2087 /* 2088 * Discount the INT3. See text_poke_bp_batch(). 2089 */ 2090 ip = (void *) regs->ip - INT3_INSN_SIZE; 2091 2092 /* 2093 * Skip the binary search if there is a single member in the vector. 2094 */ 2095 if (unlikely(desc->nr_entries > 1)) { 2096 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries, 2097 sizeof(struct text_poke_loc), 2098 patch_cmp); 2099 if (!tp) 2100 goto out_put; 2101 } else { 2102 tp = desc->vec; 2103 if (text_poke_addr(tp) != ip) 2104 goto out_put; 2105 } 2106 2107 ip += tp->len; 2108 2109 switch (tp->opcode) { 2110 case INT3_INSN_OPCODE: 2111 /* 2112 * Someone poked an explicit INT3, they'll want to handle it, 2113 * do not consume. 2114 */ 2115 goto out_put; 2116 2117 case RET_INSN_OPCODE: 2118 int3_emulate_ret(regs); 2119 break; 2120 2121 case CALL_INSN_OPCODE: 2122 int3_emulate_call(regs, (long)ip + tp->disp); 2123 break; 2124 2125 case JMP32_INSN_OPCODE: 2126 case JMP8_INSN_OPCODE: 2127 int3_emulate_jmp(regs, (long)ip + tp->disp); 2128 break; 2129 2130 case 0x70 ... 0x7f: /* Jcc */ 2131 int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp); 2132 break; 2133 2134 default: 2135 BUG(); 2136 } 2137 2138 ret = 1; 2139 2140 out_put: 2141 put_desc(); 2142 return ret; 2143 } 2144 2145 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc)) 2146 static struct text_poke_loc tp_vec[TP_VEC_MAX]; 2147 static int tp_vec_nr; 2148 2149 /** 2150 * text_poke_bp_batch() -- update instructions on live kernel on SMP 2151 * @tp: vector of instructions to patch 2152 * @nr_entries: number of entries in the vector 2153 * 2154 * Modify multi-byte instruction by using int3 breakpoint on SMP. 2155 * We completely avoid stop_machine() here, and achieve the 2156 * synchronization using int3 breakpoint. 2157 * 2158 * The way it is done: 2159 * - For each entry in the vector: 2160 * - add a int3 trap to the address that will be patched 2161 * - sync cores 2162 * - For each entry in the vector: 2163 * - update all but the first byte of the patched range 2164 * - sync cores 2165 * - For each entry in the vector: 2166 * - replace the first byte (int3) by the first byte of 2167 * replacing opcode 2168 * - sync cores 2169 */ 2170 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) 2171 { 2172 unsigned char int3 = INT3_INSN_OPCODE; 2173 unsigned int i; 2174 int do_sync; 2175 2176 lockdep_assert_held(&text_mutex); 2177 2178 bp_desc.vec = tp; 2179 bp_desc.nr_entries = nr_entries; 2180 2181 /* 2182 * Corresponds to the implicit memory barrier in try_get_desc() to 2183 * ensure reading a non-zero refcount provides up to date bp_desc data. 2184 */ 2185 atomic_set_release(&bp_desc.refs, 1); 2186 2187 /* 2188 * Function tracing can enable thousands of places that need to be 2189 * updated. This can take quite some time, and with full kernel debugging 2190 * enabled, this could cause the softlockup watchdog to trigger. 2191 * This function gets called every 256 entries added to be patched. 2192 * Call cond_resched() here to make sure that other tasks can get scheduled 2193 * while processing all the functions being patched. 2194 */ 2195 cond_resched(); 2196 2197 /* 2198 * Corresponding read barrier in int3 notifier for making sure the 2199 * nr_entries and handler are correctly ordered wrt. patching. 2200 */ 2201 smp_wmb(); 2202 2203 /* 2204 * First step: add a int3 trap to the address that will be patched. 2205 */ 2206 for (i = 0; i < nr_entries; i++) { 2207 tp[i].old = *(u8 *)text_poke_addr(&tp[i]); 2208 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE); 2209 } 2210 2211 text_poke_sync(); 2212 2213 /* 2214 * Second step: update all but the first byte of the patched range. 2215 */ 2216 for (do_sync = 0, i = 0; i < nr_entries; i++) { 2217 u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, }; 2218 u8 _new[POKE_MAX_OPCODE_SIZE+1]; 2219 const u8 *new = tp[i].text; 2220 int len = tp[i].len; 2221 2222 if (len - INT3_INSN_SIZE > 0) { 2223 memcpy(old + INT3_INSN_SIZE, 2224 text_poke_addr(&tp[i]) + INT3_INSN_SIZE, 2225 len - INT3_INSN_SIZE); 2226 2227 if (len == 6) { 2228 _new[0] = 0x0f; 2229 memcpy(_new + 1, new, 5); 2230 new = _new; 2231 } 2232 2233 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE, 2234 new + INT3_INSN_SIZE, 2235 len - INT3_INSN_SIZE); 2236 2237 do_sync++; 2238 } 2239 2240 /* 2241 * Emit a perf event to record the text poke, primarily to 2242 * support Intel PT decoding which must walk the executable code 2243 * to reconstruct the trace. The flow up to here is: 2244 * - write INT3 byte 2245 * - IPI-SYNC 2246 * - write instruction tail 2247 * At this point the actual control flow will be through the 2248 * INT3 and handler and not hit the old or new instruction. 2249 * Intel PT outputs FUP/TIP packets for the INT3, so the flow 2250 * can still be decoded. Subsequently: 2251 * - emit RECORD_TEXT_POKE with the new instruction 2252 * - IPI-SYNC 2253 * - write first byte 2254 * - IPI-SYNC 2255 * So before the text poke event timestamp, the decoder will see 2256 * either the old instruction flow or FUP/TIP of INT3. After the 2257 * text poke event timestamp, the decoder will see either the 2258 * new instruction flow or FUP/TIP of INT3. Thus decoders can 2259 * use the timestamp as the point at which to modify the 2260 * executable code. 2261 * The old instruction is recorded so that the event can be 2262 * processed forwards or backwards. 2263 */ 2264 perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len); 2265 } 2266 2267 if (do_sync) { 2268 /* 2269 * According to Intel, this core syncing is very likely 2270 * not necessary and we'd be safe even without it. But 2271 * better safe than sorry (plus there's not only Intel). 2272 */ 2273 text_poke_sync(); 2274 } 2275 2276 /* 2277 * Third step: replace the first byte (int3) by the first byte of 2278 * replacing opcode. 2279 */ 2280 for (do_sync = 0, i = 0; i < nr_entries; i++) { 2281 u8 byte = tp[i].text[0]; 2282 2283 if (tp[i].len == 6) 2284 byte = 0x0f; 2285 2286 if (byte == INT3_INSN_OPCODE) 2287 continue; 2288 2289 text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE); 2290 do_sync++; 2291 } 2292 2293 if (do_sync) 2294 text_poke_sync(); 2295 2296 /* 2297 * Remove and wait for refs to be zero. 2298 */ 2299 if (!atomic_dec_and_test(&bp_desc.refs)) 2300 atomic_cond_read_acquire(&bp_desc.refs, !VAL); 2301 } 2302 2303 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, 2304 const void *opcode, size_t len, const void *emulate) 2305 { 2306 struct insn insn; 2307 int ret, i = 0; 2308 2309 if (len == 6) 2310 i = 1; 2311 memcpy((void *)tp->text, opcode+i, len-i); 2312 if (!emulate) 2313 emulate = opcode; 2314 2315 ret = insn_decode_kernel(&insn, emulate); 2316 BUG_ON(ret < 0); 2317 2318 tp->rel_addr = addr - (void *)_stext; 2319 tp->len = len; 2320 tp->opcode = insn.opcode.bytes[0]; 2321 2322 if (is_jcc32(&insn)) { 2323 /* 2324 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish. 2325 */ 2326 tp->opcode = insn.opcode.bytes[1] - 0x10; 2327 } 2328 2329 switch (tp->opcode) { 2330 case RET_INSN_OPCODE: 2331 case JMP32_INSN_OPCODE: 2332 case JMP8_INSN_OPCODE: 2333 /* 2334 * Control flow instructions without implied execution of the 2335 * next instruction can be padded with INT3. 2336 */ 2337 for (i = insn.length; i < len; i++) 2338 BUG_ON(tp->text[i] != INT3_INSN_OPCODE); 2339 break; 2340 2341 default: 2342 BUG_ON(len != insn.length); 2343 } 2344 2345 switch (tp->opcode) { 2346 case INT3_INSN_OPCODE: 2347 case RET_INSN_OPCODE: 2348 break; 2349 2350 case CALL_INSN_OPCODE: 2351 case JMP32_INSN_OPCODE: 2352 case JMP8_INSN_OPCODE: 2353 case 0x70 ... 0x7f: /* Jcc */ 2354 tp->disp = insn.immediate.value; 2355 break; 2356 2357 default: /* assume NOP */ 2358 switch (len) { 2359 case 2: /* NOP2 -- emulate as JMP8+0 */ 2360 BUG_ON(memcmp(emulate, x86_nops[len], len)); 2361 tp->opcode = JMP8_INSN_OPCODE; 2362 tp->disp = 0; 2363 break; 2364 2365 case 5: /* NOP5 -- emulate as JMP32+0 */ 2366 BUG_ON(memcmp(emulate, x86_nops[len], len)); 2367 tp->opcode = JMP32_INSN_OPCODE; 2368 tp->disp = 0; 2369 break; 2370 2371 default: /* unknown instruction */ 2372 BUG(); 2373 } 2374 break; 2375 } 2376 } 2377 2378 /* 2379 * We hard rely on the tp_vec being ordered; ensure this is so by flushing 2380 * early if needed. 2381 */ 2382 static bool tp_order_fail(void *addr) 2383 { 2384 struct text_poke_loc *tp; 2385 2386 if (!tp_vec_nr) 2387 return false; 2388 2389 if (!addr) /* force */ 2390 return true; 2391 2392 tp = &tp_vec[tp_vec_nr - 1]; 2393 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr) 2394 return true; 2395 2396 return false; 2397 } 2398 2399 static void text_poke_flush(void *addr) 2400 { 2401 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) { 2402 text_poke_bp_batch(tp_vec, tp_vec_nr); 2403 tp_vec_nr = 0; 2404 } 2405 } 2406 2407 void text_poke_finish(void) 2408 { 2409 text_poke_flush(NULL); 2410 } 2411 2412 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate) 2413 { 2414 struct text_poke_loc *tp; 2415 2416 text_poke_flush(addr); 2417 2418 tp = &tp_vec[tp_vec_nr++]; 2419 text_poke_loc_init(tp, addr, opcode, len, emulate); 2420 } 2421 2422 /** 2423 * text_poke_bp() -- update instructions on live kernel on SMP 2424 * @addr: address to patch 2425 * @opcode: opcode of new instruction 2426 * @len: length to copy 2427 * @emulate: instruction to be emulated 2428 * 2429 * Update a single instruction with the vector in the stack, avoiding 2430 * dynamically allocated memory. This function should be used when it is 2431 * not possible to allocate memory. 2432 */ 2433 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate) 2434 { 2435 struct text_poke_loc tp; 2436 2437 text_poke_loc_init(&tp, addr, opcode, len, emulate); 2438 text_poke_bp_batch(&tp, 1); 2439 } 2440