1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Intel CPU Microcode Update Driver for Linux 4 * 5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> 6 * 2006 Shaohua Li <shaohua.li@intel.com> 7 * 8 * Intel CPU microcode early update for Linux 9 * 10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> 11 * H Peter Anvin" <hpa@zytor.com> 12 */ 13 14 /* 15 * This needs to be before all headers so that pr_debug in printk.h doesn't turn 16 * printk calls into no_printk(). 17 * 18 *#define DEBUG 19 */ 20 #define pr_fmt(fmt) "microcode: " fmt 21 22 #include <linux/earlycpio.h> 23 #include <linux/firmware.h> 24 #include <linux/uaccess.h> 25 #include <linux/vmalloc.h> 26 #include <linux/initrd.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/cpu.h> 30 #include <linux/uio.h> 31 #include <linux/mm.h> 32 33 #include <asm/microcode_intel.h> 34 #include <asm/intel-family.h> 35 #include <asm/processor.h> 36 #include <asm/tlbflush.h> 37 #include <asm/setup.h> 38 #include <asm/msr.h> 39 40 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; 41 42 /* Current microcode patch used in early patching on the APs. */ 43 static struct microcode_intel *intel_ucode_patch; 44 45 /* last level cache size per core */ 46 static int llc_size_per_core; 47 48 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, 49 unsigned int s2, unsigned int p2) 50 { 51 if (s1 != s2) 52 return false; 53 54 /* Processor flags are either both 0 ... */ 55 if (!p1 && !p2) 56 return true; 57 58 /* ... or they intersect. */ 59 return p1 & p2; 60 } 61 62 /* 63 * Returns 1 if update has been found, 0 otherwise. 64 */ 65 static int find_matching_signature(void *mc, unsigned int csig, int cpf) 66 { 67 struct microcode_header_intel *mc_hdr = mc; 68 struct extended_sigtable *ext_hdr; 69 struct extended_signature *ext_sig; 70 int i; 71 72 if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) 73 return 1; 74 75 /* Look for ext. headers: */ 76 if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) 77 return 0; 78 79 ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; 80 ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; 81 82 for (i = 0; i < ext_hdr->count; i++) { 83 if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) 84 return 1; 85 ext_sig++; 86 } 87 return 0; 88 } 89 90 /* 91 * Returns 1 if update has been found, 0 otherwise. 92 */ 93 static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) 94 { 95 struct microcode_header_intel *mc_hdr = mc; 96 97 if (mc_hdr->rev <= new_rev) 98 return 0; 99 100 return find_matching_signature(mc, csig, cpf); 101 } 102 103 static struct ucode_patch *memdup_patch(void *data, unsigned int size) 104 { 105 struct ucode_patch *p; 106 107 p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); 108 if (!p) 109 return NULL; 110 111 p->data = kmemdup(data, size, GFP_KERNEL); 112 if (!p->data) { 113 kfree(p); 114 return NULL; 115 } 116 117 return p; 118 } 119 120 static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) 121 { 122 struct microcode_header_intel *mc_hdr, *mc_saved_hdr; 123 struct ucode_patch *iter, *tmp, *p = NULL; 124 bool prev_found = false; 125 unsigned int sig, pf; 126 127 mc_hdr = (struct microcode_header_intel *)data; 128 129 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { 130 mc_saved_hdr = (struct microcode_header_intel *)iter->data; 131 sig = mc_saved_hdr->sig; 132 pf = mc_saved_hdr->pf; 133 134 if (find_matching_signature(data, sig, pf)) { 135 prev_found = true; 136 137 if (mc_hdr->rev <= mc_saved_hdr->rev) 138 continue; 139 140 p = memdup_patch(data, size); 141 if (!p) 142 pr_err("Error allocating buffer %p\n", data); 143 else { 144 list_replace(&iter->plist, &p->plist); 145 kfree(iter->data); 146 kfree(iter); 147 } 148 } 149 } 150 151 /* 152 * There weren't any previous patches found in the list cache; save the 153 * newly found. 154 */ 155 if (!prev_found) { 156 p = memdup_patch(data, size); 157 if (!p) 158 pr_err("Error allocating buffer for %p\n", data); 159 else 160 list_add_tail(&p->plist, µcode_cache); 161 } 162 163 if (!p) 164 return; 165 166 if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) 167 return; 168 169 /* 170 * Save for early loading. On 32-bit, that needs to be a physical 171 * address as the APs are running from physical addresses, before 172 * paging has been enabled. 173 */ 174 if (IS_ENABLED(CONFIG_X86_32)) 175 intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); 176 else 177 intel_ucode_patch = p->data; 178 } 179 180 static int microcode_sanity_check(void *mc, int print_err) 181 { 182 unsigned long total_size, data_size, ext_table_size; 183 struct microcode_header_intel *mc_header = mc; 184 struct extended_sigtable *ext_header = NULL; 185 u32 sum, orig_sum, ext_sigcount = 0, i; 186 struct extended_signature *ext_sig; 187 188 total_size = get_totalsize(mc_header); 189 data_size = get_datasize(mc_header); 190 191 if (data_size + MC_HEADER_SIZE > total_size) { 192 if (print_err) 193 pr_err("Error: bad microcode data file size.\n"); 194 return -EINVAL; 195 } 196 197 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { 198 if (print_err) 199 pr_err("Error: invalid/unknown microcode update format.\n"); 200 return -EINVAL; 201 } 202 203 ext_table_size = total_size - (MC_HEADER_SIZE + data_size); 204 if (ext_table_size) { 205 u32 ext_table_sum = 0; 206 u32 *ext_tablep; 207 208 if ((ext_table_size < EXT_HEADER_SIZE) 209 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { 210 if (print_err) 211 pr_err("Error: truncated extended signature table.\n"); 212 return -EINVAL; 213 } 214 215 ext_header = mc + MC_HEADER_SIZE + data_size; 216 if (ext_table_size != exttable_size(ext_header)) { 217 if (print_err) 218 pr_err("Error: extended signature table size mismatch.\n"); 219 return -EFAULT; 220 } 221 222 ext_sigcount = ext_header->count; 223 224 /* 225 * Check extended table checksum: the sum of all dwords that 226 * comprise a valid table must be 0. 227 */ 228 ext_tablep = (u32 *)ext_header; 229 230 i = ext_table_size / sizeof(u32); 231 while (i--) 232 ext_table_sum += ext_tablep[i]; 233 234 if (ext_table_sum) { 235 if (print_err) 236 pr_warn("Bad extended signature table checksum, aborting.\n"); 237 return -EINVAL; 238 } 239 } 240 241 /* 242 * Calculate the checksum of update data and header. The checksum of 243 * valid update data and header including the extended signature table 244 * must be 0. 245 */ 246 orig_sum = 0; 247 i = (MC_HEADER_SIZE + data_size) / sizeof(u32); 248 while (i--) 249 orig_sum += ((u32 *)mc)[i]; 250 251 if (orig_sum) { 252 if (print_err) 253 pr_err("Bad microcode data checksum, aborting.\n"); 254 return -EINVAL; 255 } 256 257 if (!ext_table_size) 258 return 0; 259 260 /* 261 * Check extended signature checksum: 0 => valid. 262 */ 263 for (i = 0; i < ext_sigcount; i++) { 264 ext_sig = (void *)ext_header + EXT_HEADER_SIZE + 265 EXT_SIGNATURE_SIZE * i; 266 267 sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - 268 (ext_sig->sig + ext_sig->pf + ext_sig->cksum); 269 if (sum) { 270 if (print_err) 271 pr_err("Bad extended signature checksum, aborting.\n"); 272 return -EINVAL; 273 } 274 } 275 return 0; 276 } 277 278 /* 279 * Get microcode matching with BSP's model. Only CPUs with the same model as 280 * BSP can stay in the platform. 281 */ 282 static struct microcode_intel * 283 scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) 284 { 285 struct microcode_header_intel *mc_header; 286 struct microcode_intel *patch = NULL; 287 unsigned int mc_size; 288 289 while (size) { 290 if (size < sizeof(struct microcode_header_intel)) 291 break; 292 293 mc_header = (struct microcode_header_intel *)data; 294 295 mc_size = get_totalsize(mc_header); 296 if (!mc_size || 297 mc_size > size || 298 microcode_sanity_check(data, 0) < 0) 299 break; 300 301 size -= mc_size; 302 303 if (!find_matching_signature(data, uci->cpu_sig.sig, 304 uci->cpu_sig.pf)) { 305 data += mc_size; 306 continue; 307 } 308 309 if (save) { 310 save_microcode_patch(uci, data, mc_size); 311 goto next; 312 } 313 314 315 if (!patch) { 316 if (!has_newer_microcode(data, 317 uci->cpu_sig.sig, 318 uci->cpu_sig.pf, 319 uci->cpu_sig.rev)) 320 goto next; 321 322 } else { 323 struct microcode_header_intel *phdr = &patch->hdr; 324 325 if (!has_newer_microcode(data, 326 phdr->sig, 327 phdr->pf, 328 phdr->rev)) 329 goto next; 330 } 331 332 /* We have a newer patch, save it. */ 333 patch = data; 334 335 next: 336 data += mc_size; 337 } 338 339 if (size) 340 return NULL; 341 342 return patch; 343 } 344 345 static int collect_cpu_info_early(struct ucode_cpu_info *uci) 346 { 347 unsigned int val[2]; 348 unsigned int family, model; 349 struct cpu_signature csig = { 0 }; 350 unsigned int eax, ebx, ecx, edx; 351 352 memset(uci, 0, sizeof(*uci)); 353 354 eax = 0x00000001; 355 ecx = 0; 356 native_cpuid(&eax, &ebx, &ecx, &edx); 357 csig.sig = eax; 358 359 family = x86_family(eax); 360 model = x86_model(eax); 361 362 if ((model >= 5) || (family > 6)) { 363 /* get processor flags from MSR 0x17 */ 364 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 365 csig.pf = 1 << ((val[1] >> 18) & 7); 366 } 367 368 csig.rev = intel_get_microcode_revision(); 369 370 uci->cpu_sig = csig; 371 uci->valid = 1; 372 373 return 0; 374 } 375 376 static void show_saved_mc(void) 377 { 378 #ifdef DEBUG 379 int i = 0, j; 380 unsigned int sig, pf, rev, total_size, data_size, date; 381 struct ucode_cpu_info uci; 382 struct ucode_patch *p; 383 384 if (list_empty(µcode_cache)) { 385 pr_debug("no microcode data saved.\n"); 386 return; 387 } 388 389 collect_cpu_info_early(&uci); 390 391 sig = uci.cpu_sig.sig; 392 pf = uci.cpu_sig.pf; 393 rev = uci.cpu_sig.rev; 394 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); 395 396 list_for_each_entry(p, µcode_cache, plist) { 397 struct microcode_header_intel *mc_saved_header; 398 struct extended_sigtable *ext_header; 399 struct extended_signature *ext_sig; 400 int ext_sigcount; 401 402 mc_saved_header = (struct microcode_header_intel *)p->data; 403 404 sig = mc_saved_header->sig; 405 pf = mc_saved_header->pf; 406 rev = mc_saved_header->rev; 407 date = mc_saved_header->date; 408 409 total_size = get_totalsize(mc_saved_header); 410 data_size = get_datasize(mc_saved_header); 411 412 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n", 413 i++, sig, pf, rev, total_size, 414 date & 0xffff, 415 date >> 24, 416 (date >> 16) & 0xff); 417 418 /* Look for ext. headers: */ 419 if (total_size <= data_size + MC_HEADER_SIZE) 420 continue; 421 422 ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE; 423 ext_sigcount = ext_header->count; 424 ext_sig = (void *)ext_header + EXT_HEADER_SIZE; 425 426 for (j = 0; j < ext_sigcount; j++) { 427 sig = ext_sig->sig; 428 pf = ext_sig->pf; 429 430 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", 431 j, sig, pf); 432 433 ext_sig++; 434 } 435 } 436 #endif 437 } 438 439 /* 440 * Save this microcode patch. It will be loaded early when a CPU is 441 * hot-added or resumes. 442 */ 443 static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size) 444 { 445 /* Synchronization during CPU hotplug. */ 446 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 447 448 mutex_lock(&x86_cpu_microcode_mutex); 449 450 save_microcode_patch(uci, mc, size); 451 show_saved_mc(); 452 453 mutex_unlock(&x86_cpu_microcode_mutex); 454 } 455 456 static bool load_builtin_intel_microcode(struct cpio_data *cp) 457 { 458 unsigned int eax = 1, ebx, ecx = 0, edx; 459 char name[30]; 460 461 if (IS_ENABLED(CONFIG_X86_32)) 462 return false; 463 464 native_cpuid(&eax, &ebx, &ecx, &edx); 465 466 sprintf(name, "intel-ucode/%02x-%02x-%02x", 467 x86_family(eax), x86_model(eax), x86_stepping(eax)); 468 469 return get_builtin_firmware(cp, name); 470 } 471 472 /* 473 * Print ucode update info. 474 */ 475 static void 476 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 477 { 478 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", 479 uci->cpu_sig.rev, 480 date & 0xffff, 481 date >> 24, 482 (date >> 16) & 0xff); 483 } 484 485 #ifdef CONFIG_X86_32 486 487 static int delay_ucode_info; 488 static int current_mc_date; 489 490 /* 491 * Print early updated ucode info after printk works. This is delayed info dump. 492 */ 493 void show_ucode_info_early(void) 494 { 495 struct ucode_cpu_info uci; 496 497 if (delay_ucode_info) { 498 collect_cpu_info_early(&uci); 499 print_ucode_info(&uci, current_mc_date); 500 delay_ucode_info = 0; 501 } 502 } 503 504 /* 505 * At this point, we can not call printk() yet. Delay printing microcode info in 506 * show_ucode_info_early() until printk() works. 507 */ 508 static void print_ucode(struct ucode_cpu_info *uci) 509 { 510 struct microcode_intel *mc; 511 int *delay_ucode_info_p; 512 int *current_mc_date_p; 513 514 mc = uci->mc; 515 if (!mc) 516 return; 517 518 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); 519 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); 520 521 *delay_ucode_info_p = 1; 522 *current_mc_date_p = mc->hdr.date; 523 } 524 #else 525 526 static inline void print_ucode(struct ucode_cpu_info *uci) 527 { 528 struct microcode_intel *mc; 529 530 mc = uci->mc; 531 if (!mc) 532 return; 533 534 print_ucode_info(uci, mc->hdr.date); 535 } 536 #endif 537 538 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) 539 { 540 struct microcode_intel *mc; 541 u32 rev; 542 543 mc = uci->mc; 544 if (!mc) 545 return 0; 546 547 /* 548 * Save us the MSR write below - which is a particular expensive 549 * operation - when the other hyperthread has updated the microcode 550 * already. 551 */ 552 rev = intel_get_microcode_revision(); 553 if (rev >= mc->hdr.rev) { 554 uci->cpu_sig.rev = rev; 555 return UCODE_OK; 556 } 557 558 /* 559 * Writeback and invalidate caches before updating microcode to avoid 560 * internal issues depending on what the microcode is updating. 561 */ 562 native_wbinvd(); 563 564 /* write microcode via MSR 0x79 */ 565 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 566 567 rev = intel_get_microcode_revision(); 568 if (rev != mc->hdr.rev) 569 return -1; 570 571 uci->cpu_sig.rev = rev; 572 573 if (early) 574 print_ucode(uci); 575 else 576 print_ucode_info(uci, mc->hdr.date); 577 578 return 0; 579 } 580 581 int __init save_microcode_in_initrd_intel(void) 582 { 583 struct ucode_cpu_info uci; 584 struct cpio_data cp; 585 586 /* 587 * initrd is going away, clear patch ptr. We will scan the microcode one 588 * last time before jettisoning and save a patch, if found. Then we will 589 * update that pointer too, with a stable patch address to use when 590 * resuming the cores. 591 */ 592 intel_ucode_patch = NULL; 593 594 if (!load_builtin_intel_microcode(&cp)) 595 cp = find_microcode_in_initrd(ucode_path, false); 596 597 if (!(cp.data && cp.size)) 598 return 0; 599 600 collect_cpu_info_early(&uci); 601 602 scan_microcode(cp.data, cp.size, &uci, true); 603 604 show_saved_mc(); 605 606 return 0; 607 } 608 609 /* 610 * @res_patch, output: a pointer to the patch we found. 611 */ 612 static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) 613 { 614 static const char *path; 615 struct cpio_data cp; 616 bool use_pa; 617 618 if (IS_ENABLED(CONFIG_X86_32)) { 619 path = (const char *)__pa_nodebug(ucode_path); 620 use_pa = true; 621 } else { 622 path = ucode_path; 623 use_pa = false; 624 } 625 626 /* try built-in microcode first */ 627 if (!load_builtin_intel_microcode(&cp)) 628 cp = find_microcode_in_initrd(path, use_pa); 629 630 if (!(cp.data && cp.size)) 631 return NULL; 632 633 collect_cpu_info_early(uci); 634 635 return scan_microcode(cp.data, cp.size, uci, false); 636 } 637 638 void __init load_ucode_intel_bsp(void) 639 { 640 struct microcode_intel *patch; 641 struct ucode_cpu_info uci; 642 643 patch = __load_ucode_intel(&uci); 644 if (!patch) 645 return; 646 647 uci.mc = patch; 648 649 apply_microcode_early(&uci, true); 650 } 651 652 void load_ucode_intel_ap(void) 653 { 654 struct microcode_intel *patch, **iup; 655 struct ucode_cpu_info uci; 656 657 if (IS_ENABLED(CONFIG_X86_32)) 658 iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); 659 else 660 iup = &intel_ucode_patch; 661 662 reget: 663 if (!*iup) { 664 patch = __load_ucode_intel(&uci); 665 if (!patch) 666 return; 667 668 *iup = patch; 669 } 670 671 uci.mc = *iup; 672 673 if (apply_microcode_early(&uci, true)) { 674 /* Mixed-silicon system? Try to refetch the proper patch: */ 675 *iup = NULL; 676 677 goto reget; 678 } 679 } 680 681 static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) 682 { 683 struct microcode_header_intel *phdr; 684 struct ucode_patch *iter, *tmp; 685 686 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { 687 688 phdr = (struct microcode_header_intel *)iter->data; 689 690 if (phdr->rev <= uci->cpu_sig.rev) 691 continue; 692 693 if (!find_matching_signature(phdr, 694 uci->cpu_sig.sig, 695 uci->cpu_sig.pf)) 696 continue; 697 698 return iter->data; 699 } 700 return NULL; 701 } 702 703 void reload_ucode_intel(void) 704 { 705 struct microcode_intel *p; 706 struct ucode_cpu_info uci; 707 708 collect_cpu_info_early(&uci); 709 710 p = find_patch(&uci); 711 if (!p) 712 return; 713 714 uci.mc = p; 715 716 apply_microcode_early(&uci, false); 717 } 718 719 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) 720 { 721 static struct cpu_signature prev; 722 struct cpuinfo_x86 *c = &cpu_data(cpu_num); 723 unsigned int val[2]; 724 725 memset(csig, 0, sizeof(*csig)); 726 727 csig->sig = cpuid_eax(0x00000001); 728 729 if ((c->x86_model >= 5) || (c->x86 > 6)) { 730 /* get processor flags from MSR 0x17 */ 731 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 732 csig->pf = 1 << ((val[1] >> 18) & 7); 733 } 734 735 csig->rev = c->microcode; 736 737 /* No extra locking on prev, races are harmless. */ 738 if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) { 739 pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n", 740 csig->sig, csig->pf, csig->rev); 741 prev = *csig; 742 } 743 744 return 0; 745 } 746 747 static enum ucode_state apply_microcode_intel(int cpu) 748 { 749 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 750 struct cpuinfo_x86 *c = &cpu_data(cpu); 751 bool bsp = c->cpu_index == boot_cpu_data.cpu_index; 752 struct microcode_intel *mc; 753 enum ucode_state ret; 754 static int prev_rev; 755 u32 rev; 756 757 /* We should bind the task to the CPU */ 758 if (WARN_ON(raw_smp_processor_id() != cpu)) 759 return UCODE_ERROR; 760 761 /* Look for a newer patch in our cache: */ 762 mc = find_patch(uci); 763 if (!mc) { 764 mc = uci->mc; 765 if (!mc) 766 return UCODE_NFOUND; 767 } 768 769 /* 770 * Save us the MSR write below - which is a particular expensive 771 * operation - when the other hyperthread has updated the microcode 772 * already. 773 */ 774 rev = intel_get_microcode_revision(); 775 if (rev >= mc->hdr.rev) { 776 ret = UCODE_OK; 777 goto out; 778 } 779 780 /* 781 * Writeback and invalidate caches before updating microcode to avoid 782 * internal issues depending on what the microcode is updating. 783 */ 784 native_wbinvd(); 785 786 /* write microcode via MSR 0x79 */ 787 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 788 789 rev = intel_get_microcode_revision(); 790 791 if (rev != mc->hdr.rev) { 792 pr_err("CPU%d update to revision 0x%x failed\n", 793 cpu, mc->hdr.rev); 794 return UCODE_ERROR; 795 } 796 797 if (bsp && rev != prev_rev) { 798 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", 799 rev, 800 mc->hdr.date & 0xffff, 801 mc->hdr.date >> 24, 802 (mc->hdr.date >> 16) & 0xff); 803 prev_rev = rev; 804 } 805 806 ret = UCODE_UPDATED; 807 808 out: 809 uci->cpu_sig.rev = rev; 810 c->microcode = rev; 811 812 /* Update boot_cpu_data's revision too, if we're on the BSP: */ 813 if (bsp) 814 boot_cpu_data.microcode = rev; 815 816 return ret; 817 } 818 819 static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) 820 { 821 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 822 unsigned int curr_mc_size = 0, new_mc_size = 0; 823 enum ucode_state ret = UCODE_OK; 824 int new_rev = uci->cpu_sig.rev; 825 u8 *new_mc = NULL, *mc = NULL; 826 unsigned int csig, cpf; 827 828 while (iov_iter_count(iter)) { 829 struct microcode_header_intel mc_header; 830 unsigned int mc_size, data_size; 831 u8 *data; 832 833 if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { 834 pr_err("error! Truncated or inaccessible header in microcode data file\n"); 835 break; 836 } 837 838 mc_size = get_totalsize(&mc_header); 839 if (mc_size < sizeof(mc_header)) { 840 pr_err("error! Bad data in microcode data file (totalsize too small)\n"); 841 break; 842 } 843 data_size = mc_size - sizeof(mc_header); 844 if (data_size > iov_iter_count(iter)) { 845 pr_err("error! Bad data in microcode data file (truncated file?)\n"); 846 break; 847 } 848 849 /* For performance reasons, reuse mc area when possible */ 850 if (!mc || mc_size > curr_mc_size) { 851 vfree(mc); 852 mc = vmalloc(mc_size); 853 if (!mc) 854 break; 855 curr_mc_size = mc_size; 856 } 857 858 memcpy(mc, &mc_header, sizeof(mc_header)); 859 data = mc + sizeof(mc_header); 860 if (!copy_from_iter_full(data, data_size, iter) || 861 microcode_sanity_check(mc, 1) < 0) { 862 break; 863 } 864 865 csig = uci->cpu_sig.sig; 866 cpf = uci->cpu_sig.pf; 867 if (has_newer_microcode(mc, csig, cpf, new_rev)) { 868 vfree(new_mc); 869 new_rev = mc_header.rev; 870 new_mc = mc; 871 new_mc_size = mc_size; 872 mc = NULL; /* trigger new vmalloc */ 873 ret = UCODE_NEW; 874 } 875 } 876 877 vfree(mc); 878 879 if (iov_iter_count(iter)) { 880 vfree(new_mc); 881 return UCODE_ERROR; 882 } 883 884 if (!new_mc) 885 return UCODE_NFOUND; 886 887 vfree(uci->mc); 888 uci->mc = (struct microcode_intel *)new_mc; 889 890 /* 891 * If early loading microcode is supported, save this mc into 892 * permanent memory. So it will be loaded early when a CPU is hot added 893 * or resumes. 894 */ 895 save_mc_for_early(uci, new_mc, new_mc_size); 896 897 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 898 cpu, new_rev, uci->cpu_sig.rev); 899 900 return ret; 901 } 902 903 static bool is_blacklisted(unsigned int cpu) 904 { 905 struct cpuinfo_x86 *c = &cpu_data(cpu); 906 907 /* 908 * Late loading on model 79 with microcode revision less than 0x0b000021 909 * and LLC size per core bigger than 2.5MB may result in a system hang. 910 * This behavior is documented in item BDF90, #334165 (Intel Xeon 911 * Processor E7-8800/4800 v4 Product Family). 912 */ 913 if (c->x86 == 6 && 914 c->x86_model == INTEL_FAM6_BROADWELL_X && 915 c->x86_stepping == 0x01 && 916 llc_size_per_core > 2621440 && 917 c->microcode < 0x0b000021) { 918 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); 919 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 920 return true; 921 } 922 923 return false; 924 } 925 926 static enum ucode_state request_microcode_fw(int cpu, struct device *device, 927 bool refresh_fw) 928 { 929 struct cpuinfo_x86 *c = &cpu_data(cpu); 930 const struct firmware *firmware; 931 struct iov_iter iter; 932 enum ucode_state ret; 933 struct kvec kvec; 934 char name[30]; 935 936 if (is_blacklisted(cpu)) 937 return UCODE_NFOUND; 938 939 sprintf(name, "intel-ucode/%02x-%02x-%02x", 940 c->x86, c->x86_model, c->x86_stepping); 941 942 if (request_firmware_direct(&firmware, name, device)) { 943 pr_debug("data file %s load failed\n", name); 944 return UCODE_NFOUND; 945 } 946 947 kvec.iov_base = (void *)firmware->data; 948 kvec.iov_len = firmware->size; 949 iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size); 950 ret = generic_load_microcode(cpu, &iter); 951 952 release_firmware(firmware); 953 954 return ret; 955 } 956 957 static enum ucode_state 958 request_microcode_user(int cpu, const void __user *buf, size_t size) 959 { 960 struct iov_iter iter; 961 struct iovec iov; 962 963 if (is_blacklisted(cpu)) 964 return UCODE_NFOUND; 965 966 iov.iov_base = (void __user *)buf; 967 iov.iov_len = size; 968 iov_iter_init(&iter, WRITE, &iov, 1, size); 969 970 return generic_load_microcode(cpu, &iter); 971 } 972 973 static struct microcode_ops microcode_intel_ops = { 974 .request_microcode_user = request_microcode_user, 975 .request_microcode_fw = request_microcode_fw, 976 .collect_cpu_info = collect_cpu_info, 977 .apply_microcode = apply_microcode_intel, 978 }; 979 980 static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) 981 { 982 u64 llc_size = c->x86_cache_size * 1024ULL; 983 984 do_div(llc_size, c->x86_max_cores); 985 986 return (int)llc_size; 987 } 988 989 struct microcode_ops * __init init_intel_microcode(void) 990 { 991 struct cpuinfo_x86 *c = &boot_cpu_data; 992 993 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 994 cpu_has(c, X86_FEATURE_IA64)) { 995 pr_err("Intel CPU family 0x%x not supported\n", c->x86); 996 return NULL; 997 } 998 999 llc_size_per_core = calc_llc_size_per_core(c); 1000 1001 return µcode_intel_ops; 1002 } 1003