1 /* 2 * CPU Microcode Update Driver for Linux 3 * 4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 5 * 2006 Shaohua Li <shaohua.li@intel.com> 6 * 2013-2015 Borislav Petkov <bp@alien8.de> 7 * 8 * X86 CPU microcode early update for Linux: 9 * 10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> 11 * H Peter Anvin" <hpa@zytor.com> 12 * (C) 2015 Borislav Petkov <bp@alien8.de> 13 * 14 * This driver allows to upgrade microcode on x86 processors. 15 * 16 * This program is free software; you can redistribute it and/or 17 * modify it under the terms of the GNU General Public License 18 * as published by the Free Software Foundation; either version 19 * 2 of the License, or (at your option) any later version. 20 */ 21 22 #define pr_fmt(fmt) "microcode: " fmt 23 24 #include <linux/platform_device.h> 25 #include <linux/syscore_ops.h> 26 #include <linux/miscdevice.h> 27 #include <linux/capability.h> 28 #include <linux/firmware.h> 29 #include <linux/kernel.h> 30 #include <linux/mutex.h> 31 #include <linux/cpu.h> 32 #include <linux/fs.h> 33 #include <linux/mm.h> 34 35 #include <asm/microcode_intel.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/microcode_amd.h> 38 #include <asm/perf_event.h> 39 #include <asm/microcode.h> 40 #include <asm/processor.h> 41 #include <asm/cmdline.h> 42 43 #define MICROCODE_VERSION "2.01" 44 45 static struct microcode_ops *microcode_ops; 46 static bool dis_ucode_ldr; 47 48 /* 49 * Synchronization. 50 * 51 * All non cpu-hotplug-callback call sites use: 52 * 53 * - microcode_mutex to synchronize with each other; 54 * - get/put_online_cpus() to synchronize with 55 * the cpu-hotplug-callback call sites. 56 * 57 * We guarantee that only a single cpu is being 58 * updated at any particular moment of time. 59 */ 60 static DEFINE_MUTEX(microcode_mutex); 61 62 struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 63 64 /* 65 * Operations that are run on a target cpu: 66 */ 67 68 struct cpu_info_ctx { 69 struct cpu_signature *cpu_sig; 70 int err; 71 }; 72 73 static bool __init check_loader_disabled_bsp(void) 74 { 75 static const char *__dis_opt_str = "dis_ucode_ldr"; 76 77 #ifdef CONFIG_X86_32 78 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); 79 const char *option = (const char *)__pa_nodebug(__dis_opt_str); 80 bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); 81 82 #else /* CONFIG_X86_64 */ 83 const char *cmdline = boot_command_line; 84 const char *option = __dis_opt_str; 85 bool *res = &dis_ucode_ldr; 86 #endif 87 88 if (cmdline_find_option_bool(cmdline, option)) 89 *res = true; 90 91 return *res; 92 } 93 94 extern struct builtin_fw __start_builtin_fw[]; 95 extern struct builtin_fw __end_builtin_fw[]; 96 97 bool get_builtin_firmware(struct cpio_data *cd, const char *name) 98 { 99 #ifdef CONFIG_FW_LOADER 100 struct builtin_fw *b_fw; 101 102 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { 103 if (!strcmp(name, b_fw->name)) { 104 cd->size = b_fw->size; 105 cd->data = b_fw->data; 106 return true; 107 } 108 } 109 #endif 110 return false; 111 } 112 113 void __init load_ucode_bsp(void) 114 { 115 int vendor; 116 unsigned int family; 117 118 if (check_loader_disabled_bsp()) 119 return; 120 121 if (!have_cpuid_p()) 122 return; 123 124 vendor = x86_cpuid_vendor(); 125 family = x86_cpuid_family(); 126 127 switch (vendor) { 128 case X86_VENDOR_INTEL: 129 if (family >= 6) 130 load_ucode_intel_bsp(); 131 break; 132 case X86_VENDOR_AMD: 133 if (family >= 0x10) 134 load_ucode_amd_bsp(family); 135 break; 136 default: 137 break; 138 } 139 } 140 141 static bool check_loader_disabled_ap(void) 142 { 143 #ifdef CONFIG_X86_32 144 return *((bool *)__pa_nodebug(&dis_ucode_ldr)); 145 #else 146 return dis_ucode_ldr; 147 #endif 148 } 149 150 void load_ucode_ap(void) 151 { 152 int vendor, family; 153 154 if (check_loader_disabled_ap()) 155 return; 156 157 if (!have_cpuid_p()) 158 return; 159 160 vendor = x86_cpuid_vendor(); 161 family = x86_cpuid_family(); 162 163 switch (vendor) { 164 case X86_VENDOR_INTEL: 165 if (family >= 6) 166 load_ucode_intel_ap(); 167 break; 168 case X86_VENDOR_AMD: 169 if (family >= 0x10) 170 load_ucode_amd_ap(); 171 break; 172 default: 173 break; 174 } 175 } 176 177 static int __init save_microcode_in_initrd(void) 178 { 179 struct cpuinfo_x86 *c = &boot_cpu_data; 180 181 switch (c->x86_vendor) { 182 case X86_VENDOR_INTEL: 183 if (c->x86 >= 6) 184 return save_microcode_in_initrd_intel(); 185 break; 186 case X86_VENDOR_AMD: 187 if (c->x86 >= 0x10) 188 return save_microcode_in_initrd_amd(); 189 break; 190 default: 191 break; 192 } 193 194 return -EINVAL; 195 } 196 197 void reload_early_microcode(void) 198 { 199 int vendor, family; 200 201 vendor = x86_cpuid_vendor(); 202 family = x86_cpuid_family(); 203 204 switch (vendor) { 205 case X86_VENDOR_INTEL: 206 if (family >= 6) 207 reload_ucode_intel(); 208 break; 209 case X86_VENDOR_AMD: 210 if (family >= 0x10) 211 reload_ucode_amd(); 212 break; 213 default: 214 break; 215 } 216 } 217 218 static void collect_cpu_info_local(void *arg) 219 { 220 struct cpu_info_ctx *ctx = arg; 221 222 ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(), 223 ctx->cpu_sig); 224 } 225 226 static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig) 227 { 228 struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 }; 229 int ret; 230 231 ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1); 232 if (!ret) 233 ret = ctx.err; 234 235 return ret; 236 } 237 238 static int collect_cpu_info(int cpu) 239 { 240 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 241 int ret; 242 243 memset(uci, 0, sizeof(*uci)); 244 245 ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig); 246 if (!ret) 247 uci->valid = 1; 248 249 return ret; 250 } 251 252 struct apply_microcode_ctx { 253 int err; 254 }; 255 256 static void apply_microcode_local(void *arg) 257 { 258 struct apply_microcode_ctx *ctx = arg; 259 260 ctx->err = microcode_ops->apply_microcode(smp_processor_id()); 261 } 262 263 static int apply_microcode_on_target(int cpu) 264 { 265 struct apply_microcode_ctx ctx = { .err = 0 }; 266 int ret; 267 268 ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); 269 if (!ret) 270 ret = ctx.err; 271 272 return ret; 273 } 274 275 #ifdef CONFIG_MICROCODE_OLD_INTERFACE 276 static int do_microcode_update(const void __user *buf, size_t size) 277 { 278 int error = 0; 279 int cpu; 280 281 for_each_online_cpu(cpu) { 282 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 283 enum ucode_state ustate; 284 285 if (!uci->valid) 286 continue; 287 288 ustate = microcode_ops->request_microcode_user(cpu, buf, size); 289 if (ustate == UCODE_ERROR) { 290 error = -1; 291 break; 292 } else if (ustate == UCODE_OK) 293 apply_microcode_on_target(cpu); 294 } 295 296 return error; 297 } 298 299 static int microcode_open(struct inode *inode, struct file *file) 300 { 301 return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM; 302 } 303 304 static ssize_t microcode_write(struct file *file, const char __user *buf, 305 size_t len, loff_t *ppos) 306 { 307 ssize_t ret = -EINVAL; 308 309 if ((len >> PAGE_SHIFT) > totalram_pages) { 310 pr_err("too much data (max %ld pages)\n", totalram_pages); 311 return ret; 312 } 313 314 get_online_cpus(); 315 mutex_lock(µcode_mutex); 316 317 if (do_microcode_update(buf, len) == 0) 318 ret = (ssize_t)len; 319 320 if (ret > 0) 321 perf_check_microcode(); 322 323 mutex_unlock(µcode_mutex); 324 put_online_cpus(); 325 326 return ret; 327 } 328 329 static const struct file_operations microcode_fops = { 330 .owner = THIS_MODULE, 331 .write = microcode_write, 332 .open = microcode_open, 333 .llseek = no_llseek, 334 }; 335 336 static struct miscdevice microcode_dev = { 337 .minor = MICROCODE_MINOR, 338 .name = "microcode", 339 .nodename = "cpu/microcode", 340 .fops = µcode_fops, 341 }; 342 343 static int __init microcode_dev_init(void) 344 { 345 int error; 346 347 error = misc_register(µcode_dev); 348 if (error) { 349 pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR); 350 return error; 351 } 352 353 return 0; 354 } 355 356 static void __exit microcode_dev_exit(void) 357 { 358 misc_deregister(µcode_dev); 359 } 360 #else 361 #define microcode_dev_init() 0 362 #define microcode_dev_exit() do { } while (0) 363 #endif 364 365 /* fake device for request_firmware */ 366 static struct platform_device *microcode_pdev; 367 368 static int reload_for_cpu(int cpu) 369 { 370 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 371 enum ucode_state ustate; 372 int err = 0; 373 374 if (!uci->valid) 375 return err; 376 377 ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true); 378 if (ustate == UCODE_OK) 379 apply_microcode_on_target(cpu); 380 else 381 if (ustate == UCODE_ERROR) 382 err = -EINVAL; 383 return err; 384 } 385 386 static ssize_t reload_store(struct device *dev, 387 struct device_attribute *attr, 388 const char *buf, size_t size) 389 { 390 unsigned long val; 391 int cpu; 392 ssize_t ret = 0, tmp_ret; 393 394 ret = kstrtoul(buf, 0, &val); 395 if (ret) 396 return ret; 397 398 if (val != 1) 399 return size; 400 401 get_online_cpus(); 402 mutex_lock(µcode_mutex); 403 for_each_online_cpu(cpu) { 404 tmp_ret = reload_for_cpu(cpu); 405 if (tmp_ret != 0) 406 pr_warn("Error reloading microcode on CPU %d\n", cpu); 407 408 /* save retval of the first encountered reload error */ 409 if (!ret) 410 ret = tmp_ret; 411 } 412 if (!ret) 413 perf_check_microcode(); 414 mutex_unlock(µcode_mutex); 415 put_online_cpus(); 416 417 if (!ret) 418 ret = size; 419 420 return ret; 421 } 422 423 static ssize_t version_show(struct device *dev, 424 struct device_attribute *attr, char *buf) 425 { 426 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; 427 428 return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); 429 } 430 431 static ssize_t pf_show(struct device *dev, 432 struct device_attribute *attr, char *buf) 433 { 434 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; 435 436 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); 437 } 438 439 static DEVICE_ATTR(reload, 0200, NULL, reload_store); 440 static DEVICE_ATTR(version, 0400, version_show, NULL); 441 static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); 442 443 static struct attribute *mc_default_attrs[] = { 444 &dev_attr_version.attr, 445 &dev_attr_processor_flags.attr, 446 NULL 447 }; 448 449 static struct attribute_group mc_attr_group = { 450 .attrs = mc_default_attrs, 451 .name = "microcode", 452 }; 453 454 static void microcode_fini_cpu(int cpu) 455 { 456 microcode_ops->microcode_fini_cpu(cpu); 457 } 458 459 static enum ucode_state microcode_resume_cpu(int cpu) 460 { 461 pr_debug("CPU%d updated upon resume\n", cpu); 462 463 if (apply_microcode_on_target(cpu)) 464 return UCODE_ERROR; 465 466 return UCODE_OK; 467 } 468 469 static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) 470 { 471 enum ucode_state ustate; 472 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 473 474 if (uci->valid) 475 return UCODE_OK; 476 477 if (collect_cpu_info(cpu)) 478 return UCODE_ERROR; 479 480 /* --dimm. Trigger a delayed update? */ 481 if (system_state != SYSTEM_RUNNING) 482 return UCODE_NFOUND; 483 484 ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, 485 refresh_fw); 486 487 if (ustate == UCODE_OK) { 488 pr_debug("CPU%d updated upon init\n", cpu); 489 apply_microcode_on_target(cpu); 490 } 491 492 return ustate; 493 } 494 495 static enum ucode_state microcode_update_cpu(int cpu) 496 { 497 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 498 499 if (uci->valid) 500 return microcode_resume_cpu(cpu); 501 502 return microcode_init_cpu(cpu, false); 503 } 504 505 static int mc_device_add(struct device *dev, struct subsys_interface *sif) 506 { 507 int err, cpu = dev->id; 508 509 if (!cpu_online(cpu)) 510 return 0; 511 512 pr_debug("CPU%d added\n", cpu); 513 514 err = sysfs_create_group(&dev->kobj, &mc_attr_group); 515 if (err) 516 return err; 517 518 if (microcode_init_cpu(cpu, true) == UCODE_ERROR) 519 return -EINVAL; 520 521 return err; 522 } 523 524 static void mc_device_remove(struct device *dev, struct subsys_interface *sif) 525 { 526 int cpu = dev->id; 527 528 if (!cpu_online(cpu)) 529 return; 530 531 pr_debug("CPU%d removed\n", cpu); 532 microcode_fini_cpu(cpu); 533 sysfs_remove_group(&dev->kobj, &mc_attr_group); 534 } 535 536 static struct subsys_interface mc_cpu_interface = { 537 .name = "microcode", 538 .subsys = &cpu_subsys, 539 .add_dev = mc_device_add, 540 .remove_dev = mc_device_remove, 541 }; 542 543 /** 544 * mc_bp_resume - Update boot CPU microcode during resume. 545 */ 546 static void mc_bp_resume(void) 547 { 548 int cpu = smp_processor_id(); 549 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 550 551 if (uci->valid && uci->mc) 552 microcode_ops->apply_microcode(cpu); 553 else if (!uci->mc) 554 reload_early_microcode(); 555 } 556 557 static struct syscore_ops mc_syscore_ops = { 558 .resume = mc_bp_resume, 559 }; 560 561 static int mc_cpu_online(unsigned int cpu) 562 { 563 struct device *dev; 564 565 dev = get_cpu_device(cpu); 566 microcode_update_cpu(cpu); 567 pr_debug("CPU%d added\n", cpu); 568 569 if (sysfs_create_group(&dev->kobj, &mc_attr_group)) 570 pr_err("Failed to create group for CPU%d\n", cpu); 571 return 0; 572 } 573 574 static int mc_cpu_down_prep(unsigned int cpu) 575 { 576 struct device *dev; 577 578 dev = get_cpu_device(cpu); 579 /* Suspend is in progress, only remove the interface */ 580 sysfs_remove_group(&dev->kobj, &mc_attr_group); 581 pr_debug("CPU%d removed\n", cpu); 582 /* 583 * When a CPU goes offline, don't free up or invalidate the copy of 584 * the microcode in kernel memory, so that we can reuse it when the 585 * CPU comes back online without unnecessarily requesting the userspace 586 * for it again. 587 */ 588 return 0; 589 } 590 591 static struct attribute *cpu_root_microcode_attrs[] = { 592 &dev_attr_reload.attr, 593 NULL 594 }; 595 596 static struct attribute_group cpu_root_microcode_group = { 597 .name = "microcode", 598 .attrs = cpu_root_microcode_attrs, 599 }; 600 601 int __init microcode_init(void) 602 { 603 struct cpuinfo_x86 *c = &boot_cpu_data; 604 int error; 605 606 if (dis_ucode_ldr) 607 return -EINVAL; 608 609 if (c->x86_vendor == X86_VENDOR_INTEL) 610 microcode_ops = init_intel_microcode(); 611 else if (c->x86_vendor == X86_VENDOR_AMD) 612 microcode_ops = init_amd_microcode(); 613 else 614 pr_err("no support for this CPU vendor\n"); 615 616 if (!microcode_ops) 617 return -ENODEV; 618 619 microcode_pdev = platform_device_register_simple("microcode", -1, 620 NULL, 0); 621 if (IS_ERR(microcode_pdev)) 622 return PTR_ERR(microcode_pdev); 623 624 get_online_cpus(); 625 mutex_lock(µcode_mutex); 626 627 error = subsys_interface_register(&mc_cpu_interface); 628 if (!error) 629 perf_check_microcode(); 630 mutex_unlock(µcode_mutex); 631 put_online_cpus(); 632 633 if (error) 634 goto out_pdev; 635 636 error = sysfs_create_group(&cpu_subsys.dev_root->kobj, 637 &cpu_root_microcode_group); 638 639 if (error) { 640 pr_err("Error creating microcode group!\n"); 641 goto out_driver; 642 } 643 644 error = microcode_dev_init(); 645 if (error) 646 goto out_ucode_group; 647 648 register_syscore_ops(&mc_syscore_ops); 649 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", 650 mc_cpu_online, mc_cpu_down_prep); 651 652 pr_info("Microcode Update Driver: v" MICROCODE_VERSION 653 " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n"); 654 655 return 0; 656 657 out_ucode_group: 658 sysfs_remove_group(&cpu_subsys.dev_root->kobj, 659 &cpu_root_microcode_group); 660 661 out_driver: 662 get_online_cpus(); 663 mutex_lock(µcode_mutex); 664 665 subsys_interface_unregister(&mc_cpu_interface); 666 667 mutex_unlock(µcode_mutex); 668 put_online_cpus(); 669 670 out_pdev: 671 platform_device_unregister(microcode_pdev); 672 return error; 673 674 } 675 fs_initcall(save_microcode_in_initrd); 676 late_initcall(microcode_init); 677