1 #define pr_fmt(fmt) "SMP alternatives: " fmt 2 3 #include <linux/module.h> 4 #include <linux/sched.h> 5 #include <linux/mutex.h> 6 #include <linux/list.h> 7 #include <linux/stringify.h> 8 #include <linux/mm.h> 9 #include <linux/vmalloc.h> 10 #include <linux/memory.h> 11 #include <linux/stop_machine.h> 12 #include <linux/slab.h> 13 #include <linux/kdebug.h> 14 #include <asm/alternative.h> 15 #include <asm/sections.h> 16 #include <asm/pgtable.h> 17 #include <asm/mce.h> 18 #include <asm/nmi.h> 19 #include <asm/cacheflush.h> 20 #include <asm/tlbflush.h> 21 #include <asm/io.h> 22 #include <asm/fixmap.h> 23 24 #define MAX_PATCH_LEN (255-1) 25 26 static int __initdata_or_module debug_alternative; 27 28 static int __init debug_alt(char *str) 29 { 30 debug_alternative = 1; 31 return 1; 32 } 33 __setup("debug-alternative", debug_alt); 34 35 static int noreplace_smp; 36 37 static int __init setup_noreplace_smp(char *str) 38 { 39 noreplace_smp = 1; 40 return 1; 41 } 42 __setup("noreplace-smp", setup_noreplace_smp); 43 44 #ifdef CONFIG_PARAVIRT 45 static int __initdata_or_module noreplace_paravirt = 0; 46 47 static int __init setup_noreplace_paravirt(char *str) 48 { 49 noreplace_paravirt = 1; 50 return 1; 51 } 52 __setup("noreplace-paravirt", setup_noreplace_paravirt); 53 #endif 54 55 #define DPRINTK(fmt, args...) \ 56 do { \ 57 if (debug_alternative) \ 58 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \ 59 } while (0) 60 61 #define DUMP_BYTES(buf, len, fmt, args...) \ 62 do { \ 63 if (unlikely(debug_alternative)) { \ 64 int j; \ 65 \ 66 if (!(len)) \ 67 break; \ 68 \ 69 printk(KERN_DEBUG fmt, ##args); \ 70 for (j = 0; j < (len) - 1; j++) \ 71 printk(KERN_CONT "%02hhx ", buf[j]); \ 72 printk(KERN_CONT "%02hhx\n", buf[j]); \ 73 } \ 74 } while (0) 75 76 /* 77 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes 78 * that correspond to that nop. Getting from one nop to the next, we 79 * add to the array the offset that is equal to the sum of all sizes of 80 * nops preceding the one we are after. 81 * 82 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the 83 * nice symmetry of sizes of the previous nops. 84 */ 85 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) 86 static const unsigned char intelnops[] = 87 { 88 GENERIC_NOP1, 89 GENERIC_NOP2, 90 GENERIC_NOP3, 91 GENERIC_NOP4, 92 GENERIC_NOP5, 93 GENERIC_NOP6, 94 GENERIC_NOP7, 95 GENERIC_NOP8, 96 GENERIC_NOP5_ATOMIC 97 }; 98 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = 99 { 100 NULL, 101 intelnops, 102 intelnops + 1, 103 intelnops + 1 + 2, 104 intelnops + 1 + 2 + 3, 105 intelnops + 1 + 2 + 3 + 4, 106 intelnops + 1 + 2 + 3 + 4 + 5, 107 intelnops + 1 + 2 + 3 + 4 + 5 + 6, 108 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 109 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 110 }; 111 #endif 112 113 #ifdef K8_NOP1 114 static const unsigned char k8nops[] = 115 { 116 K8_NOP1, 117 K8_NOP2, 118 K8_NOP3, 119 K8_NOP4, 120 K8_NOP5, 121 K8_NOP6, 122 K8_NOP7, 123 K8_NOP8, 124 K8_NOP5_ATOMIC 125 }; 126 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = 127 { 128 NULL, 129 k8nops, 130 k8nops + 1, 131 k8nops + 1 + 2, 132 k8nops + 1 + 2 + 3, 133 k8nops + 1 + 2 + 3 + 4, 134 k8nops + 1 + 2 + 3 + 4 + 5, 135 k8nops + 1 + 2 + 3 + 4 + 5 + 6, 136 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 137 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 138 }; 139 #endif 140 141 #if defined(K7_NOP1) && !defined(CONFIG_X86_64) 142 static const unsigned char k7nops[] = 143 { 144 K7_NOP1, 145 K7_NOP2, 146 K7_NOP3, 147 K7_NOP4, 148 K7_NOP5, 149 K7_NOP6, 150 K7_NOP7, 151 K7_NOP8, 152 K7_NOP5_ATOMIC 153 }; 154 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = 155 { 156 NULL, 157 k7nops, 158 k7nops + 1, 159 k7nops + 1 + 2, 160 k7nops + 1 + 2 + 3, 161 k7nops + 1 + 2 + 3 + 4, 162 k7nops + 1 + 2 + 3 + 4 + 5, 163 k7nops + 1 + 2 + 3 + 4 + 5 + 6, 164 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 165 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 166 }; 167 #endif 168 169 #ifdef P6_NOP1 170 static const unsigned char p6nops[] = 171 { 172 P6_NOP1, 173 P6_NOP2, 174 P6_NOP3, 175 P6_NOP4, 176 P6_NOP5, 177 P6_NOP6, 178 P6_NOP7, 179 P6_NOP8, 180 P6_NOP5_ATOMIC 181 }; 182 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = 183 { 184 NULL, 185 p6nops, 186 p6nops + 1, 187 p6nops + 1 + 2, 188 p6nops + 1 + 2 + 3, 189 p6nops + 1 + 2 + 3 + 4, 190 p6nops + 1 + 2 + 3 + 4 + 5, 191 p6nops + 1 + 2 + 3 + 4 + 5 + 6, 192 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 193 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 194 }; 195 #endif 196 197 /* Initialize these to a safe default */ 198 #ifdef CONFIG_X86_64 199 const unsigned char * const *ideal_nops = p6_nops; 200 #else 201 const unsigned char * const *ideal_nops = intel_nops; 202 #endif 203 204 void __init arch_init_ideal_nops(void) 205 { 206 switch (boot_cpu_data.x86_vendor) { 207 case X86_VENDOR_INTEL: 208 /* 209 * Due to a decoder implementation quirk, some 210 * specific Intel CPUs actually perform better with 211 * the "k8_nops" than with the SDM-recommended NOPs. 212 */ 213 if (boot_cpu_data.x86 == 6 && 214 boot_cpu_data.x86_model >= 0x0f && 215 boot_cpu_data.x86_model != 0x1c && 216 boot_cpu_data.x86_model != 0x26 && 217 boot_cpu_data.x86_model != 0x27 && 218 boot_cpu_data.x86_model < 0x30) { 219 ideal_nops = k8_nops; 220 } else if (boot_cpu_has(X86_FEATURE_NOPL)) { 221 ideal_nops = p6_nops; 222 } else { 223 #ifdef CONFIG_X86_64 224 ideal_nops = k8_nops; 225 #else 226 ideal_nops = intel_nops; 227 #endif 228 } 229 break; 230 default: 231 #ifdef CONFIG_X86_64 232 ideal_nops = k8_nops; 233 #else 234 if (boot_cpu_has(X86_FEATURE_K8)) 235 ideal_nops = k8_nops; 236 else if (boot_cpu_has(X86_FEATURE_K7)) 237 ideal_nops = k7_nops; 238 else 239 ideal_nops = intel_nops; 240 #endif 241 } 242 } 243 244 /* Use this to add nops to a buffer, then text_poke the whole buffer. */ 245 static void __init_or_module add_nops(void *insns, unsigned int len) 246 { 247 while (len > 0) { 248 unsigned int noplen = len; 249 if (noplen > ASM_NOP_MAX) 250 noplen = ASM_NOP_MAX; 251 memcpy(insns, ideal_nops[noplen], noplen); 252 insns += noplen; 253 len -= noplen; 254 } 255 } 256 257 extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 258 extern s32 __smp_locks[], __smp_locks_end[]; 259 void *text_poke_early(void *addr, const void *opcode, size_t len); 260 261 /* 262 * Are we looking at a near JMP with a 1 or 4-byte displacement. 263 */ 264 static inline bool is_jmp(const u8 opcode) 265 { 266 return opcode == 0xeb || opcode == 0xe9; 267 } 268 269 static void __init_or_module 270 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) 271 { 272 u8 *next_rip, *tgt_rip; 273 s32 n_dspl, o_dspl; 274 int repl_len; 275 276 if (a->replacementlen != 5) 277 return; 278 279 o_dspl = *(s32 *)(insnbuf + 1); 280 281 /* next_rip of the replacement JMP */ 282 next_rip = repl_insn + a->replacementlen; 283 /* target rip of the replacement JMP */ 284 tgt_rip = next_rip + o_dspl; 285 n_dspl = tgt_rip - orig_insn; 286 287 DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl); 288 289 if (tgt_rip - orig_insn >= 0) { 290 if (n_dspl - 2 <= 127) 291 goto two_byte_jmp; 292 else 293 goto five_byte_jmp; 294 /* negative offset */ 295 } else { 296 if (((n_dspl - 2) & 0xff) == (n_dspl - 2)) 297 goto two_byte_jmp; 298 else 299 goto five_byte_jmp; 300 } 301 302 two_byte_jmp: 303 n_dspl -= 2; 304 305 insnbuf[0] = 0xeb; 306 insnbuf[1] = (s8)n_dspl; 307 add_nops(insnbuf + 2, 3); 308 309 repl_len = 2; 310 goto done; 311 312 five_byte_jmp: 313 n_dspl -= 5; 314 315 insnbuf[0] = 0xe9; 316 *(s32 *)&insnbuf[1] = n_dspl; 317 318 repl_len = 5; 319 320 done: 321 322 DPRINTK("final displ: 0x%08x, JMP 0x%lx", 323 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); 324 } 325 326 static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) 327 { 328 if (instr[0] != 0x90) 329 return; 330 331 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 332 333 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", 334 instr, a->instrlen - a->padlen, a->padlen); 335 } 336 337 /* 338 * Replace instructions with better alternatives for this CPU type. This runs 339 * before SMP is initialized to avoid SMP problems with self modifying code. 340 * This implies that asymmetric systems where APs have less capabilities than 341 * the boot processor are not handled. Tough. Make sure you disable such 342 * features by hand. 343 */ 344 void __init_or_module apply_alternatives(struct alt_instr *start, 345 struct alt_instr *end) 346 { 347 struct alt_instr *a; 348 u8 *instr, *replacement; 349 u8 insnbuf[MAX_PATCH_LEN]; 350 351 DPRINTK("alt table %p -> %p", start, end); 352 /* 353 * The scan order should be from start to end. A later scanned 354 * alternative code can overwrite previously scanned alternative code. 355 * Some kernel functions (e.g. memcpy, memset, etc) use this order to 356 * patch code. 357 * 358 * So be careful if you want to change the scan order to any other 359 * order. 360 */ 361 for (a = start; a < end; a++) { 362 int insnbuf_sz = 0; 363 364 instr = (u8 *)&a->instr_offset + a->instr_offset; 365 replacement = (u8 *)&a->repl_offset + a->repl_offset; 366 BUG_ON(a->instrlen > sizeof(insnbuf)); 367 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); 368 if (!boot_cpu_has(a->cpuid)) { 369 if (a->padlen > 1) 370 optimize_nops(a, instr); 371 372 continue; 373 } 374 375 DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d", 376 a->cpuid >> 5, 377 a->cpuid & 0x1f, 378 instr, a->instrlen, 379 replacement, a->replacementlen, a->padlen); 380 381 DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr); 382 DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement); 383 384 memcpy(insnbuf, replacement, a->replacementlen); 385 insnbuf_sz = a->replacementlen; 386 387 /* 0xe8 is a relative jump; fix the offset. */ 388 if (*insnbuf == 0xe8 && a->replacementlen == 5) { 389 *(s32 *)(insnbuf + 1) += replacement - instr; 390 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", 391 *(s32 *)(insnbuf + 1), 392 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5); 393 } 394 395 if (a->replacementlen && is_jmp(replacement[0])) 396 recompute_jump(a, instr, replacement, insnbuf); 397 398 if (a->instrlen > a->replacementlen) { 399 add_nops(insnbuf + a->replacementlen, 400 a->instrlen - a->replacementlen); 401 insnbuf_sz += a->instrlen - a->replacementlen; 402 } 403 DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr); 404 405 text_poke_early(instr, insnbuf, insnbuf_sz); 406 } 407 } 408 409 #ifdef CONFIG_SMP 410 static void alternatives_smp_lock(const s32 *start, const s32 *end, 411 u8 *text, u8 *text_end) 412 { 413 const s32 *poff; 414 415 mutex_lock(&text_mutex); 416 for (poff = start; poff < end; poff++) { 417 u8 *ptr = (u8 *)poff + *poff; 418 419 if (!*poff || ptr < text || ptr >= text_end) 420 continue; 421 /* turn DS segment override prefix into lock prefix */ 422 if (*ptr == 0x3e) 423 text_poke(ptr, ((unsigned char []){0xf0}), 1); 424 } 425 mutex_unlock(&text_mutex); 426 } 427 428 static void alternatives_smp_unlock(const s32 *start, const s32 *end, 429 u8 *text, u8 *text_end) 430 { 431 const s32 *poff; 432 433 mutex_lock(&text_mutex); 434 for (poff = start; poff < end; poff++) { 435 u8 *ptr = (u8 *)poff + *poff; 436 437 if (!*poff || ptr < text || ptr >= text_end) 438 continue; 439 /* turn lock prefix into DS segment override prefix */ 440 if (*ptr == 0xf0) 441 text_poke(ptr, ((unsigned char []){0x3E}), 1); 442 } 443 mutex_unlock(&text_mutex); 444 } 445 446 struct smp_alt_module { 447 /* what is this ??? */ 448 struct module *mod; 449 char *name; 450 451 /* ptrs to lock prefixes */ 452 const s32 *locks; 453 const s32 *locks_end; 454 455 /* .text segment, needed to avoid patching init code ;) */ 456 u8 *text; 457 u8 *text_end; 458 459 struct list_head next; 460 }; 461 static LIST_HEAD(smp_alt_modules); 462 static DEFINE_MUTEX(smp_alt); 463 static bool uniproc_patched = false; /* protected by smp_alt */ 464 465 void __init_or_module alternatives_smp_module_add(struct module *mod, 466 char *name, 467 void *locks, void *locks_end, 468 void *text, void *text_end) 469 { 470 struct smp_alt_module *smp; 471 472 mutex_lock(&smp_alt); 473 if (!uniproc_patched) 474 goto unlock; 475 476 if (num_possible_cpus() == 1) 477 /* Don't bother remembering, we'll never have to undo it. */ 478 goto smp_unlock; 479 480 smp = kzalloc(sizeof(*smp), GFP_KERNEL); 481 if (NULL == smp) 482 /* we'll run the (safe but slow) SMP code then ... */ 483 goto unlock; 484 485 smp->mod = mod; 486 smp->name = name; 487 smp->locks = locks; 488 smp->locks_end = locks_end; 489 smp->text = text; 490 smp->text_end = text_end; 491 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n", 492 smp->locks, smp->locks_end, 493 smp->text, smp->text_end, smp->name); 494 495 list_add_tail(&smp->next, &smp_alt_modules); 496 smp_unlock: 497 alternatives_smp_unlock(locks, locks_end, text, text_end); 498 unlock: 499 mutex_unlock(&smp_alt); 500 } 501 502 void __init_or_module alternatives_smp_module_del(struct module *mod) 503 { 504 struct smp_alt_module *item; 505 506 mutex_lock(&smp_alt); 507 list_for_each_entry(item, &smp_alt_modules, next) { 508 if (mod != item->mod) 509 continue; 510 list_del(&item->next); 511 kfree(item); 512 break; 513 } 514 mutex_unlock(&smp_alt); 515 } 516 517 void alternatives_enable_smp(void) 518 { 519 struct smp_alt_module *mod; 520 521 /* Why bother if there are no other CPUs? */ 522 BUG_ON(num_possible_cpus() == 1); 523 524 mutex_lock(&smp_alt); 525 526 if (uniproc_patched) { 527 pr_info("switching to SMP code\n"); 528 BUG_ON(num_online_cpus() != 1); 529 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); 530 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); 531 list_for_each_entry(mod, &smp_alt_modules, next) 532 alternatives_smp_lock(mod->locks, mod->locks_end, 533 mod->text, mod->text_end); 534 uniproc_patched = false; 535 } 536 mutex_unlock(&smp_alt); 537 } 538 539 /* Return 1 if the address range is reserved for smp-alternatives */ 540 int alternatives_text_reserved(void *start, void *end) 541 { 542 struct smp_alt_module *mod; 543 const s32 *poff; 544 u8 *text_start = start; 545 u8 *text_end = end; 546 547 list_for_each_entry(mod, &smp_alt_modules, next) { 548 if (mod->text > text_end || mod->text_end < text_start) 549 continue; 550 for (poff = mod->locks; poff < mod->locks_end; poff++) { 551 const u8 *ptr = (const u8 *)poff + *poff; 552 553 if (text_start <= ptr && text_end > ptr) 554 return 1; 555 } 556 } 557 558 return 0; 559 } 560 #endif /* CONFIG_SMP */ 561 562 #ifdef CONFIG_PARAVIRT 563 void __init_or_module apply_paravirt(struct paravirt_patch_site *start, 564 struct paravirt_patch_site *end) 565 { 566 struct paravirt_patch_site *p; 567 char insnbuf[MAX_PATCH_LEN]; 568 569 if (noreplace_paravirt) 570 return; 571 572 for (p = start; p < end; p++) { 573 unsigned int used; 574 575 BUG_ON(p->len > MAX_PATCH_LEN); 576 /* prep the buffer with the original instructions */ 577 memcpy(insnbuf, p->instr, p->len); 578 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, 579 (unsigned long)p->instr, p->len); 580 581 BUG_ON(used > p->len); 582 583 /* Pad the rest with nops */ 584 add_nops(insnbuf + used, p->len - used); 585 text_poke_early(p->instr, insnbuf, p->len); 586 } 587 } 588 extern struct paravirt_patch_site __start_parainstructions[], 589 __stop_parainstructions[]; 590 #endif /* CONFIG_PARAVIRT */ 591 592 void __init alternative_instructions(void) 593 { 594 /* The patching is not fully atomic, so try to avoid local interruptions 595 that might execute the to be patched code. 596 Other CPUs are not running. */ 597 stop_nmi(); 598 599 /* 600 * Don't stop machine check exceptions while patching. 601 * MCEs only happen when something got corrupted and in this 602 * case we must do something about the corruption. 603 * Ignoring it is worse than a unlikely patching race. 604 * Also machine checks tend to be broadcast and if one CPU 605 * goes into machine check the others follow quickly, so we don't 606 * expect a machine check to cause undue problems during to code 607 * patching. 608 */ 609 610 apply_alternatives(__alt_instructions, __alt_instructions_end); 611 612 #ifdef CONFIG_SMP 613 /* Patch to UP if other cpus not imminent. */ 614 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { 615 uniproc_patched = true; 616 alternatives_smp_module_add(NULL, "core kernel", 617 __smp_locks, __smp_locks_end, 618 _text, _etext); 619 } 620 621 if (!uniproc_patched || num_possible_cpus() == 1) 622 free_init_pages("SMP alternatives", 623 (unsigned long)__smp_locks, 624 (unsigned long)__smp_locks_end); 625 #endif 626 627 apply_paravirt(__parainstructions, __parainstructions_end); 628 629 restart_nmi(); 630 } 631 632 /** 633 * text_poke_early - Update instructions on a live kernel at boot time 634 * @addr: address to modify 635 * @opcode: source of the copy 636 * @len: length to copy 637 * 638 * When you use this code to patch more than one byte of an instruction 639 * you need to make sure that other CPUs cannot execute this code in parallel. 640 * Also no thread must be currently preempted in the middle of these 641 * instructions. And on the local CPU you need to be protected again NMI or MCE 642 * handlers seeing an inconsistent instruction while you patch. 643 */ 644 void *__init_or_module text_poke_early(void *addr, const void *opcode, 645 size_t len) 646 { 647 unsigned long flags; 648 local_irq_save(flags); 649 memcpy(addr, opcode, len); 650 sync_core(); 651 local_irq_restore(flags); 652 /* Could also do a CLFLUSH here to speed up CPU recovery; but 653 that causes hangs on some VIA CPUs. */ 654 return addr; 655 } 656 657 /** 658 * text_poke - Update instructions on a live kernel 659 * @addr: address to modify 660 * @opcode: source of the copy 661 * @len: length to copy 662 * 663 * Only atomic text poke/set should be allowed when not doing early patching. 664 * It means the size must be writable atomically and the address must be aligned 665 * in a way that permits an atomic write. It also makes sure we fit on a single 666 * page. 667 * 668 * Note: Must be called under text_mutex. 669 */ 670 void *text_poke(void *addr, const void *opcode, size_t len) 671 { 672 unsigned long flags; 673 char *vaddr; 674 struct page *pages[2]; 675 int i; 676 677 if (!core_kernel_text((unsigned long)addr)) { 678 pages[0] = vmalloc_to_page(addr); 679 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 680 } else { 681 pages[0] = virt_to_page(addr); 682 WARN_ON(!PageReserved(pages[0])); 683 pages[1] = virt_to_page(addr + PAGE_SIZE); 684 } 685 BUG_ON(!pages[0]); 686 local_irq_save(flags); 687 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); 688 if (pages[1]) 689 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); 690 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); 691 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 692 clear_fixmap(FIX_TEXT_POKE0); 693 if (pages[1]) 694 clear_fixmap(FIX_TEXT_POKE1); 695 local_flush_tlb(); 696 sync_core(); 697 /* Could also do a CLFLUSH here to speed up CPU recovery; but 698 that causes hangs on some VIA CPUs. */ 699 for (i = 0; i < len; i++) 700 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); 701 local_irq_restore(flags); 702 return addr; 703 } 704 705 static void do_sync_core(void *info) 706 { 707 sync_core(); 708 } 709 710 static bool bp_patching_in_progress; 711 static void *bp_int3_handler, *bp_int3_addr; 712 713 int poke_int3_handler(struct pt_regs *regs) 714 { 715 /* bp_patching_in_progress */ 716 smp_rmb(); 717 718 if (likely(!bp_patching_in_progress)) 719 return 0; 720 721 if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr) 722 return 0; 723 724 /* set up the specified breakpoint handler */ 725 regs->ip = (unsigned long) bp_int3_handler; 726 727 return 1; 728 729 } 730 731 /** 732 * text_poke_bp() -- update instructions on live kernel on SMP 733 * @addr: address to patch 734 * @opcode: opcode of new instruction 735 * @len: length to copy 736 * @handler: address to jump to when the temporary breakpoint is hit 737 * 738 * Modify multi-byte instruction by using int3 breakpoint on SMP. 739 * We completely avoid stop_machine() here, and achieve the 740 * synchronization using int3 breakpoint. 741 * 742 * The way it is done: 743 * - add a int3 trap to the address that will be patched 744 * - sync cores 745 * - update all but the first byte of the patched range 746 * - sync cores 747 * - replace the first byte (int3) by the first byte of 748 * replacing opcode 749 * - sync cores 750 * 751 * Note: must be called under text_mutex. 752 */ 753 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) 754 { 755 unsigned char int3 = 0xcc; 756 757 bp_int3_handler = handler; 758 bp_int3_addr = (u8 *)addr + sizeof(int3); 759 bp_patching_in_progress = true; 760 /* 761 * Corresponding read barrier in int3 notifier for 762 * making sure the in_progress flags is correctly ordered wrt. 763 * patching 764 */ 765 smp_wmb(); 766 767 text_poke(addr, &int3, sizeof(int3)); 768 769 on_each_cpu(do_sync_core, NULL, 1); 770 771 if (len - sizeof(int3) > 0) { 772 /* patch all but the first byte */ 773 text_poke((char *)addr + sizeof(int3), 774 (const char *) opcode + sizeof(int3), 775 len - sizeof(int3)); 776 /* 777 * According to Intel, this core syncing is very likely 778 * not necessary and we'd be safe even without it. But 779 * better safe than sorry (plus there's not only Intel). 780 */ 781 on_each_cpu(do_sync_core, NULL, 1); 782 } 783 784 /* patch the first byte */ 785 text_poke(addr, opcode, sizeof(int3)); 786 787 on_each_cpu(do_sync_core, NULL, 1); 788 789 bp_patching_in_progress = false; 790 smp_wmb(); 791 792 return addr; 793 } 794 795