1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * CPU Microcode Update Driver for Linux 4 * 5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> 6 * 2006 Shaohua Li <shaohua.li@intel.com> 7 * 2013-2016 Borislav Petkov <bp@alien8.de> 8 * 9 * X86 CPU microcode early update for Linux: 10 * 11 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> 12 * H Peter Anvin" <hpa@zytor.com> 13 * (C) 2015 Borislav Petkov <bp@alien8.de> 14 * 15 * This driver allows to upgrade microcode on x86 processors. 16 */ 17 18 #define pr_fmt(fmt) "microcode: " fmt 19 20 #include <linux/platform_device.h> 21 #include <linux/stop_machine.h> 22 #include <linux/syscore_ops.h> 23 #include <linux/miscdevice.h> 24 #include <linux/capability.h> 25 #include <linux/firmware.h> 26 #include <linux/cpumask.h> 27 #include <linux/kernel.h> 28 #include <linux/delay.h> 29 #include <linux/mutex.h> 30 #include <linux/cpu.h> 31 #include <linux/nmi.h> 32 #include <linux/fs.h> 33 #include <linux/mm.h> 34 35 #include <asm/apic.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/perf_event.h> 38 #include <asm/processor.h> 39 #include <asm/cmdline.h> 40 #include <asm/setup.h> 41 42 #include "internal.h" 43 44 #define DRIVER_VERSION "2.2" 45 46 static struct microcode_ops *microcode_ops; 47 bool dis_ucode_ldr = true; 48 49 bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); 50 module_param(force_minrev, bool, S_IRUSR | S_IWUSR); 51 52 /* 53 * Synchronization. 54 * 55 * All non cpu-hotplug-callback call sites use: 56 * 57 * - cpus_read_lock/unlock() to synchronize with 58 * the cpu-hotplug-callback call sites. 59 * 60 * We guarantee that only a single cpu is being 61 * updated at any particular moment of time. 62 */ 63 struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 64 65 struct cpu_info_ctx { 66 struct cpu_signature *cpu_sig; 67 int err; 68 }; 69 70 /* 71 * Those patch levels cannot be updated to newer ones and thus should be final. 72 */ 73 static u32 final_levels[] = { 74 0x01000098, 75 0x0100009f, 76 0x010000af, 77 0, /* T-101 terminator */ 78 }; 79 80 struct early_load_data early_data; 81 82 /* 83 * Check the current patch level on this CPU. 84 * 85 * Returns: 86 * - true: if update should stop 87 * - false: otherwise 88 */ 89 static bool amd_check_current_patch_level(void) 90 { 91 u32 lvl, dummy, i; 92 u32 *levels; 93 94 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); 95 96 levels = final_levels; 97 98 for (i = 0; levels[i]; i++) { 99 if (lvl == levels[i]) 100 return true; 101 } 102 return false; 103 } 104 105 static bool __init check_loader_disabled_bsp(void) 106 { 107 static const char *__dis_opt_str = "dis_ucode_ldr"; 108 const char *cmdline = boot_command_line; 109 const char *option = __dis_opt_str; 110 111 /* 112 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not 113 * completely accurate as xen pv guests don't see that CPUID bit set but 114 * that's good enough as they don't land on the BSP path anyway. 115 */ 116 if (native_cpuid_ecx(1) & BIT(31)) 117 return true; 118 119 if (x86_cpuid_vendor() == X86_VENDOR_AMD) { 120 if (amd_check_current_patch_level()) 121 return true; 122 } 123 124 if (cmdline_find_option_bool(cmdline, option) <= 0) 125 dis_ucode_ldr = false; 126 127 return dis_ucode_ldr; 128 } 129 130 void __init load_ucode_bsp(void) 131 { 132 unsigned int cpuid_1_eax; 133 bool intel = true; 134 135 if (!have_cpuid_p()) 136 return; 137 138 cpuid_1_eax = native_cpuid_eax(1); 139 140 switch (x86_cpuid_vendor()) { 141 case X86_VENDOR_INTEL: 142 if (x86_family(cpuid_1_eax) < 6) 143 return; 144 break; 145 146 case X86_VENDOR_AMD: 147 if (x86_family(cpuid_1_eax) < 0x10) 148 return; 149 intel = false; 150 break; 151 152 default: 153 return; 154 } 155 156 if (check_loader_disabled_bsp()) 157 return; 158 159 if (intel) 160 load_ucode_intel_bsp(&early_data); 161 else 162 load_ucode_amd_bsp(&early_data, cpuid_1_eax); 163 } 164 165 void load_ucode_ap(void) 166 { 167 unsigned int cpuid_1_eax; 168 169 if (dis_ucode_ldr) 170 return; 171 172 cpuid_1_eax = native_cpuid_eax(1); 173 174 switch (x86_cpuid_vendor()) { 175 case X86_VENDOR_INTEL: 176 if (x86_family(cpuid_1_eax) >= 6) 177 load_ucode_intel_ap(); 178 break; 179 case X86_VENDOR_AMD: 180 if (x86_family(cpuid_1_eax) >= 0x10) 181 load_ucode_amd_ap(cpuid_1_eax); 182 break; 183 default: 184 break; 185 } 186 } 187 188 struct cpio_data __init find_microcode_in_initrd(const char *path) 189 { 190 #ifdef CONFIG_BLK_DEV_INITRD 191 unsigned long start = 0; 192 size_t size; 193 194 #ifdef CONFIG_X86_32 195 size = boot_params.hdr.ramdisk_size; 196 /* Early load on BSP has a temporary mapping. */ 197 if (size) 198 start = initrd_start_early; 199 200 #else /* CONFIG_X86_64 */ 201 size = (unsigned long)boot_params.ext_ramdisk_size << 32; 202 size |= boot_params.hdr.ramdisk_size; 203 204 if (size) { 205 start = (unsigned long)boot_params.ext_ramdisk_image << 32; 206 start |= boot_params.hdr.ramdisk_image; 207 start += PAGE_OFFSET; 208 } 209 #endif 210 211 /* 212 * Fixup the start address: after reserve_initrd() runs, initrd_start 213 * has the virtual address of the beginning of the initrd. It also 214 * possibly relocates the ramdisk. In either case, initrd_start contains 215 * the updated address so use that instead. 216 */ 217 if (initrd_start) 218 start = initrd_start; 219 220 return find_cpio_data(path, (void *)start, size, NULL); 221 #else /* !CONFIG_BLK_DEV_INITRD */ 222 return (struct cpio_data){ NULL, 0, "" }; 223 #endif 224 } 225 226 static void reload_early_microcode(unsigned int cpu) 227 { 228 int vendor, family; 229 230 vendor = x86_cpuid_vendor(); 231 family = x86_cpuid_family(); 232 233 switch (vendor) { 234 case X86_VENDOR_INTEL: 235 if (family >= 6) 236 reload_ucode_intel(); 237 break; 238 case X86_VENDOR_AMD: 239 if (family >= 0x10) 240 reload_ucode_amd(cpu); 241 break; 242 default: 243 break; 244 } 245 } 246 247 /* fake device for request_firmware */ 248 static struct platform_device *microcode_pdev; 249 250 #ifdef CONFIG_MICROCODE_LATE_LOADING 251 /* 252 * Late loading dance. Why the heavy-handed stomp_machine effort? 253 * 254 * - HT siblings must be idle and not execute other code while the other sibling 255 * is loading microcode in order to avoid any negative interactions caused by 256 * the loading. 257 * 258 * - In addition, microcode update on the cores must be serialized until this 259 * requirement can be relaxed in the future. Right now, this is conservative 260 * and good. 261 */ 262 enum sibling_ctrl { 263 /* Spinwait with timeout */ 264 SCTRL_WAIT, 265 /* Invoke the microcode_apply() callback */ 266 SCTRL_APPLY, 267 /* Proceed without invoking the microcode_apply() callback */ 268 SCTRL_DONE, 269 }; 270 271 struct microcode_ctrl { 272 enum sibling_ctrl ctrl; 273 enum ucode_state result; 274 unsigned int ctrl_cpu; 275 bool nmi_enabled; 276 }; 277 278 DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); 279 static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); 280 static atomic_t late_cpus_in, offline_in_nmi; 281 static unsigned int loops_per_usec; 282 static cpumask_t cpu_offline_mask; 283 284 static noinstr bool wait_for_cpus(atomic_t *cnt) 285 { 286 unsigned int timeout, loops; 287 288 WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0); 289 290 for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { 291 if (!raw_atomic_read(cnt)) 292 return true; 293 294 for (loops = 0; loops < loops_per_usec; loops++) 295 cpu_relax(); 296 297 /* If invoked directly, tickle the NMI watchdog */ 298 if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { 299 instrumentation_begin(); 300 touch_nmi_watchdog(); 301 instrumentation_end(); 302 } 303 } 304 /* Prevent the late comers from making progress and let them time out */ 305 raw_atomic_inc(cnt); 306 return false; 307 } 308 309 static noinstr bool wait_for_ctrl(void) 310 { 311 unsigned int timeout, loops; 312 313 for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { 314 if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) 315 return true; 316 317 for (loops = 0; loops < loops_per_usec; loops++) 318 cpu_relax(); 319 320 /* If invoked directly, tickle the NMI watchdog */ 321 if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { 322 instrumentation_begin(); 323 touch_nmi_watchdog(); 324 instrumentation_end(); 325 } 326 } 327 return false; 328 } 329 330 /* 331 * Protected against instrumentation up to the point where the primary 332 * thread completed the update. See microcode_nmi_handler() for details. 333 */ 334 static noinstr bool load_secondary_wait(unsigned int ctrl_cpu) 335 { 336 /* Initial rendezvous to ensure that all CPUs have arrived */ 337 if (!wait_for_cpus(&late_cpus_in)) { 338 raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); 339 return false; 340 } 341 342 /* 343 * Wait for primary threads to complete. If one of them hangs due 344 * to the update, there is no way out. This is non-recoverable 345 * because the CPU might hold locks or resources and confuse the 346 * scheduler, watchdogs etc. There is no way to safely evacuate the 347 * machine. 348 */ 349 if (wait_for_ctrl()) 350 return true; 351 352 instrumentation_begin(); 353 panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); 354 instrumentation_end(); 355 } 356 357 /* 358 * Protected against instrumentation up to the point where the primary 359 * thread completed the update. See microcode_nmi_handler() for details. 360 */ 361 static noinstr void load_secondary(unsigned int cpu) 362 { 363 unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu); 364 enum ucode_state ret; 365 366 if (!load_secondary_wait(ctrl_cpu)) { 367 instrumentation_begin(); 368 pr_err_once("load: %d CPUs timed out\n", 369 atomic_read(&late_cpus_in) - 1); 370 instrumentation_end(); 371 return; 372 } 373 374 /* Primary thread completed. Allow to invoke instrumentable code */ 375 instrumentation_begin(); 376 /* 377 * If the primary succeeded then invoke the apply() callback, 378 * otherwise copy the state from the primary thread. 379 */ 380 if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY) 381 ret = microcode_ops->apply_microcode(cpu); 382 else 383 ret = per_cpu(ucode_ctrl.result, ctrl_cpu); 384 385 this_cpu_write(ucode_ctrl.result, ret); 386 this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); 387 instrumentation_end(); 388 } 389 390 static void __load_primary(unsigned int cpu) 391 { 392 struct cpumask *secondaries = topology_sibling_cpumask(cpu); 393 enum sibling_ctrl ctrl; 394 enum ucode_state ret; 395 unsigned int sibling; 396 397 /* Initial rendezvous to ensure that all CPUs have arrived */ 398 if (!wait_for_cpus(&late_cpus_in)) { 399 this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); 400 pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); 401 return; 402 } 403 404 ret = microcode_ops->apply_microcode(cpu); 405 this_cpu_write(ucode_ctrl.result, ret); 406 this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); 407 408 /* 409 * If the update was successful, let the siblings run the apply() 410 * callback. If not, tell them it's done. This also covers the 411 * case where the CPU has uniform loading at package or system 412 * scope implemented but does not advertise it. 413 */ 414 if (ret == UCODE_UPDATED || ret == UCODE_OK) 415 ctrl = SCTRL_APPLY; 416 else 417 ctrl = SCTRL_DONE; 418 419 for_each_cpu(sibling, secondaries) { 420 if (sibling != cpu) 421 per_cpu(ucode_ctrl.ctrl, sibling) = ctrl; 422 } 423 } 424 425 static bool kick_offline_cpus(unsigned int nr_offl) 426 { 427 unsigned int cpu, timeout; 428 429 for_each_cpu(cpu, &cpu_offline_mask) { 430 /* Enable the rendezvous handler and send NMI */ 431 per_cpu(ucode_ctrl.nmi_enabled, cpu) = true; 432 apic_send_nmi_to_offline_cpu(cpu); 433 } 434 435 /* Wait for them to arrive */ 436 for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) { 437 if (atomic_read(&offline_in_nmi) == nr_offl) 438 return true; 439 udelay(1); 440 } 441 /* Let the others time out */ 442 return false; 443 } 444 445 static void release_offline_cpus(void) 446 { 447 unsigned int cpu; 448 449 for_each_cpu(cpu, &cpu_offline_mask) 450 per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE; 451 } 452 453 static void load_primary(unsigned int cpu) 454 { 455 unsigned int nr_offl = cpumask_weight(&cpu_offline_mask); 456 bool proceed = true; 457 458 /* Kick soft-offlined SMT siblings if required */ 459 if (!cpu && nr_offl) 460 proceed = kick_offline_cpus(nr_offl); 461 462 /* If the soft-offlined CPUs did not respond, abort */ 463 if (proceed) 464 __load_primary(cpu); 465 466 /* Unconditionally release soft-offlined SMT siblings if required */ 467 if (!cpu && nr_offl) 468 release_offline_cpus(); 469 } 470 471 /* 472 * Minimal stub rendezvous handler for soft-offlined CPUs which participate 473 * in the NMI rendezvous to protect against a concurrent NMI on affected 474 * CPUs. 475 */ 476 void noinstr microcode_offline_nmi_handler(void) 477 { 478 if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) 479 return; 480 raw_cpu_write(ucode_ctrl.nmi_enabled, false); 481 raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE); 482 raw_atomic_inc(&offline_in_nmi); 483 wait_for_ctrl(); 484 } 485 486 static noinstr bool microcode_update_handler(void) 487 { 488 unsigned int cpu = raw_smp_processor_id(); 489 490 if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) { 491 instrumentation_begin(); 492 load_primary(cpu); 493 instrumentation_end(); 494 } else { 495 load_secondary(cpu); 496 } 497 498 instrumentation_begin(); 499 touch_nmi_watchdog(); 500 instrumentation_end(); 501 502 return true; 503 } 504 505 /* 506 * Protection against instrumentation is required for CPUs which are not 507 * safe against an NMI which is delivered to the secondary SMT sibling 508 * while the primary thread updates the microcode. Instrumentation can end 509 * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI 510 * which is the opposite of what the NMI rendezvous is trying to achieve. 511 * 512 * The primary thread is safe versus instrumentation as the actual 513 * microcode update handles this correctly. It's only the sibling code 514 * path which must be NMI safe until the primary thread completed the 515 * update. 516 */ 517 bool noinstr microcode_nmi_handler(void) 518 { 519 if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) 520 return false; 521 522 raw_cpu_write(ucode_ctrl.nmi_enabled, false); 523 return microcode_update_handler(); 524 } 525 526 static int load_cpus_stopped(void *unused) 527 { 528 if (microcode_ops->use_nmi) { 529 /* Enable the NMI handler and raise NMI */ 530 this_cpu_write(ucode_ctrl.nmi_enabled, true); 531 apic->send_IPI(smp_processor_id(), NMI_VECTOR); 532 } else { 533 /* Just invoke the handler directly */ 534 microcode_update_handler(); 535 } 536 return 0; 537 } 538 539 static int load_late_stop_cpus(bool is_safe) 540 { 541 unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; 542 unsigned int nr_offl, offline = 0; 543 int old_rev = boot_cpu_data.microcode; 544 struct cpuinfo_x86 prev_info; 545 546 if (!is_safe) { 547 pr_err("Late microcode loading without minimal revision check.\n"); 548 pr_err("You should switch to early loading, if possible.\n"); 549 } 550 551 atomic_set(&late_cpus_in, num_online_cpus()); 552 atomic_set(&offline_in_nmi, 0); 553 loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000); 554 555 /* 556 * Take a snapshot before the microcode update in order to compare and 557 * check whether any bits changed after an update. 558 */ 559 store_cpu_caps(&prev_info); 560 561 if (microcode_ops->use_nmi) 562 static_branch_enable_cpuslocked(µcode_nmi_handler_enable); 563 564 stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask); 565 566 if (microcode_ops->use_nmi) 567 static_branch_disable_cpuslocked(µcode_nmi_handler_enable); 568 569 /* Analyze the results */ 570 for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { 571 switch (per_cpu(ucode_ctrl.result, cpu)) { 572 case UCODE_UPDATED: updated++; break; 573 case UCODE_TIMEOUT: timedout++; break; 574 case UCODE_OK: siblings++; break; 575 case UCODE_OFFLINE: offline++; break; 576 default: failed++; break; 577 } 578 } 579 580 if (microcode_ops->finalize_late_load) 581 microcode_ops->finalize_late_load(!updated); 582 583 if (!updated) { 584 /* Nothing changed. */ 585 if (!failed && !timedout) 586 return 0; 587 588 nr_offl = cpumask_weight(&cpu_offline_mask); 589 if (offline < nr_offl) { 590 pr_warn("%u offline siblings did not respond.\n", 591 nr_offl - atomic_read(&offline_in_nmi)); 592 return -EIO; 593 } 594 pr_err("update failed: %u CPUs failed %u CPUs timed out\n", 595 failed, timedout); 596 return -EIO; 597 } 598 599 if (!is_safe || failed || timedout) 600 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 601 602 pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); 603 if (failed || timedout) { 604 pr_err("load incomplete. %u CPUs timed out or failed\n", 605 num_online_cpus() - (updated + siblings)); 606 } 607 pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode); 608 microcode_check(&prev_info); 609 610 return updated + siblings == num_online_cpus() ? 0 : -EIO; 611 } 612 613 /* 614 * This function does two things: 615 * 616 * 1) Ensure that all required CPUs which are present and have been booted 617 * once are online. 618 * 619 * To pass this check, all primary threads must be online. 620 * 621 * If the microcode load is not safe against NMI then all SMT threads 622 * must be online as well because they still react to NMIs when they are 623 * soft-offlined and parked in one of the play_dead() variants. So if a 624 * NMI hits while the primary thread updates the microcode the resulting 625 * behaviour is undefined. The default play_dead() implementation on 626 * modern CPUs uses MWAIT, which is also not guaranteed to be safe 627 * against a microcode update which affects MWAIT. 628 * 629 * As soft-offlined CPUs still react on NMIs, the SMT sibling 630 * restriction can be lifted when the vendor driver signals to use NMI 631 * for rendezvous and the APIC provides a mechanism to send an NMI to a 632 * soft-offlined CPU. The soft-offlined CPUs are then able to 633 * participate in the rendezvous in a trivial stub handler. 634 * 635 * 2) Initialize the per CPU control structure and create a cpumask 636 * which contains "offline"; secondary threads, so they can be handled 637 * correctly by a control CPU. 638 */ 639 static bool setup_cpus(void) 640 { 641 struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; 642 bool allow_smt_offline; 643 unsigned int cpu; 644 645 allow_smt_offline = microcode_ops->nmi_safe || 646 (microcode_ops->use_nmi && apic->nmi_to_offline_cpu); 647 648 cpumask_clear(&cpu_offline_mask); 649 650 for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { 651 /* 652 * Offline CPUs sit in one of the play_dead() functions 653 * with interrupts disabled, but they still react on NMIs 654 * and execute arbitrary code. Also MWAIT being updated 655 * while the offline CPU sits there is not necessarily safe 656 * on all CPU variants. 657 * 658 * Mark them in the offline_cpus mask which will be handled 659 * by CPU0 later in the update process. 660 * 661 * Ensure that the primary thread is online so that it is 662 * guaranteed that all cores are updated. 663 */ 664 if (!cpu_online(cpu)) { 665 if (topology_is_primary_thread(cpu) || !allow_smt_offline) { 666 pr_err("CPU %u not online, loading aborted\n", cpu); 667 return false; 668 } 669 cpumask_set_cpu(cpu, &cpu_offline_mask); 670 per_cpu(ucode_ctrl, cpu) = ctrl; 671 continue; 672 } 673 674 /* 675 * Initialize the per CPU state. This is core scope for now, 676 * but prepared to take package or system scope into account. 677 */ 678 ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu)); 679 per_cpu(ucode_ctrl, cpu) = ctrl; 680 } 681 return true; 682 } 683 684 static int load_late_locked(void) 685 { 686 if (!setup_cpus()) 687 return -EBUSY; 688 689 switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { 690 case UCODE_NEW: 691 return load_late_stop_cpus(false); 692 case UCODE_NEW_SAFE: 693 return load_late_stop_cpus(true); 694 case UCODE_NFOUND: 695 return -ENOENT; 696 default: 697 return -EBADFD; 698 } 699 } 700 701 static ssize_t reload_store(struct device *dev, 702 struct device_attribute *attr, 703 const char *buf, size_t size) 704 { 705 unsigned long val; 706 ssize_t ret; 707 708 ret = kstrtoul(buf, 0, &val); 709 if (ret || val != 1) 710 return -EINVAL; 711 712 cpus_read_lock(); 713 ret = load_late_locked(); 714 cpus_read_unlock(); 715 716 return ret ? : size; 717 } 718 719 static DEVICE_ATTR_WO(reload); 720 #endif 721 722 static ssize_t version_show(struct device *dev, 723 struct device_attribute *attr, char *buf) 724 { 725 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; 726 727 return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); 728 } 729 730 static ssize_t processor_flags_show(struct device *dev, 731 struct device_attribute *attr, char *buf) 732 { 733 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; 734 735 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); 736 } 737 738 static DEVICE_ATTR_RO(version); 739 static DEVICE_ATTR_RO(processor_flags); 740 741 static struct attribute *mc_default_attrs[] = { 742 &dev_attr_version.attr, 743 &dev_attr_processor_flags.attr, 744 NULL 745 }; 746 747 static const struct attribute_group mc_attr_group = { 748 .attrs = mc_default_attrs, 749 .name = "microcode", 750 }; 751 752 static void microcode_fini_cpu(int cpu) 753 { 754 if (microcode_ops->microcode_fini_cpu) 755 microcode_ops->microcode_fini_cpu(cpu); 756 } 757 758 /** 759 * microcode_bsp_resume - Update boot CPU microcode during resume. 760 */ 761 void microcode_bsp_resume(void) 762 { 763 int cpu = smp_processor_id(); 764 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 765 766 if (uci->mc) 767 microcode_ops->apply_microcode(cpu); 768 else 769 reload_early_microcode(cpu); 770 } 771 772 static struct syscore_ops mc_syscore_ops = { 773 .resume = microcode_bsp_resume, 774 }; 775 776 static int mc_cpu_online(unsigned int cpu) 777 { 778 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 779 struct device *dev = get_cpu_device(cpu); 780 781 memset(uci, 0, sizeof(*uci)); 782 783 microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); 784 cpu_data(cpu).microcode = uci->cpu_sig.rev; 785 if (!cpu) 786 boot_cpu_data.microcode = uci->cpu_sig.rev; 787 788 if (sysfs_create_group(&dev->kobj, &mc_attr_group)) 789 pr_err("Failed to create group for CPU%d\n", cpu); 790 return 0; 791 } 792 793 static int mc_cpu_down_prep(unsigned int cpu) 794 { 795 struct device *dev = get_cpu_device(cpu); 796 797 microcode_fini_cpu(cpu); 798 sysfs_remove_group(&dev->kobj, &mc_attr_group); 799 return 0; 800 } 801 802 static struct attribute *cpu_root_microcode_attrs[] = { 803 #ifdef CONFIG_MICROCODE_LATE_LOADING 804 &dev_attr_reload.attr, 805 #endif 806 NULL 807 }; 808 809 static const struct attribute_group cpu_root_microcode_group = { 810 .name = "microcode", 811 .attrs = cpu_root_microcode_attrs, 812 }; 813 814 static int __init microcode_init(void) 815 { 816 struct device *dev_root; 817 struct cpuinfo_x86 *c = &boot_cpu_data; 818 int error; 819 820 if (dis_ucode_ldr) 821 return -EINVAL; 822 823 if (c->x86_vendor == X86_VENDOR_INTEL) 824 microcode_ops = init_intel_microcode(); 825 else if (c->x86_vendor == X86_VENDOR_AMD) 826 microcode_ops = init_amd_microcode(); 827 else 828 pr_err("no support for this CPU vendor\n"); 829 830 if (!microcode_ops) 831 return -ENODEV; 832 833 pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev)); 834 835 if (early_data.new_rev) 836 pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev); 837 838 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); 839 if (IS_ERR(microcode_pdev)) 840 return PTR_ERR(microcode_pdev); 841 842 dev_root = bus_get_dev_root(&cpu_subsys); 843 if (dev_root) { 844 error = sysfs_create_group(&dev_root->kobj, &cpu_root_microcode_group); 845 put_device(dev_root); 846 if (error) { 847 pr_err("Error creating microcode group!\n"); 848 goto out_pdev; 849 } 850 } 851 852 register_syscore_ops(&mc_syscore_ops); 853 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", 854 mc_cpu_online, mc_cpu_down_prep); 855 856 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); 857 858 return 0; 859 860 out_pdev: 861 platform_device_unregister(microcode_pdev); 862 return error; 863 864 } 865 late_initcall(microcode_init); 866