1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Intel CPU Microcode Update Driver for Linux 4 * 5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> 6 * 2006 Shaohua Li <shaohua.li@intel.com> 7 * 8 * Intel CPU microcode early update for Linux 9 * 10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> 11 * H Peter Anvin" <hpa@zytor.com> 12 */ 13 14 /* 15 * This needs to be before all headers so that pr_debug in printk.h doesn't turn 16 * printk calls into no_printk(). 17 * 18 *#define DEBUG 19 */ 20 #define pr_fmt(fmt) "microcode: " fmt 21 22 #include <linux/earlycpio.h> 23 #include <linux/firmware.h> 24 #include <linux/uaccess.h> 25 #include <linux/vmalloc.h> 26 #include <linux/initrd.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/cpu.h> 30 #include <linux/uio.h> 31 #include <linux/mm.h> 32 33 #include <asm/microcode_intel.h> 34 #include <asm/intel-family.h> 35 #include <asm/processor.h> 36 #include <asm/tlbflush.h> 37 #include <asm/setup.h> 38 #include <asm/msr.h> 39 40 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; 41 42 /* Current microcode patch used in early patching on the APs. */ 43 static struct microcode_intel *intel_ucode_patch; 44 45 /* last level cache size per core */ 46 static int llc_size_per_core; 47 48 /* 49 * Returns 1 if update has been found, 0 otherwise. 50 */ 51 static int find_matching_signature(void *mc, unsigned int csig, int cpf) 52 { 53 struct microcode_header_intel *mc_hdr = mc; 54 struct extended_sigtable *ext_hdr; 55 struct extended_signature *ext_sig; 56 int i; 57 58 if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) 59 return 1; 60 61 /* Look for ext. headers: */ 62 if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) 63 return 0; 64 65 ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; 66 ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; 67 68 for (i = 0; i < ext_hdr->count; i++) { 69 if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) 70 return 1; 71 ext_sig++; 72 } 73 return 0; 74 } 75 76 /* 77 * Returns 1 if update has been found, 0 otherwise. 78 */ 79 static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) 80 { 81 struct microcode_header_intel *mc_hdr = mc; 82 83 if (mc_hdr->rev <= new_rev) 84 return 0; 85 86 return find_matching_signature(mc, csig, cpf); 87 } 88 89 static struct ucode_patch *memdup_patch(void *data, unsigned int size) 90 { 91 struct ucode_patch *p; 92 93 p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); 94 if (!p) 95 return NULL; 96 97 p->data = kmemdup(data, size, GFP_KERNEL); 98 if (!p->data) { 99 kfree(p); 100 return NULL; 101 } 102 103 return p; 104 } 105 106 static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) 107 { 108 struct microcode_header_intel *mc_hdr, *mc_saved_hdr; 109 struct ucode_patch *iter, *tmp, *p = NULL; 110 bool prev_found = false; 111 unsigned int sig, pf; 112 113 mc_hdr = (struct microcode_header_intel *)data; 114 115 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { 116 mc_saved_hdr = (struct microcode_header_intel *)iter->data; 117 sig = mc_saved_hdr->sig; 118 pf = mc_saved_hdr->pf; 119 120 if (find_matching_signature(data, sig, pf)) { 121 prev_found = true; 122 123 if (mc_hdr->rev <= mc_saved_hdr->rev) 124 continue; 125 126 p = memdup_patch(data, size); 127 if (!p) 128 pr_err("Error allocating buffer %p\n", data); 129 else { 130 list_replace(&iter->plist, &p->plist); 131 kfree(iter->data); 132 kfree(iter); 133 } 134 } 135 } 136 137 /* 138 * There weren't any previous patches found in the list cache; save the 139 * newly found. 140 */ 141 if (!prev_found) { 142 p = memdup_patch(data, size); 143 if (!p) 144 pr_err("Error allocating buffer for %p\n", data); 145 else 146 list_add_tail(&p->plist, µcode_cache); 147 } 148 149 if (!p) 150 return; 151 152 if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) 153 return; 154 155 /* 156 * Save for early loading. On 32-bit, that needs to be a physical 157 * address as the APs are running from physical addresses, before 158 * paging has been enabled. 159 */ 160 if (IS_ENABLED(CONFIG_X86_32)) 161 intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); 162 else 163 intel_ucode_patch = p->data; 164 } 165 166 static int microcode_sanity_check(void *mc, int print_err) 167 { 168 unsigned long total_size, data_size, ext_table_size; 169 struct microcode_header_intel *mc_header = mc; 170 struct extended_sigtable *ext_header = NULL; 171 u32 sum, orig_sum, ext_sigcount = 0, i; 172 struct extended_signature *ext_sig; 173 174 total_size = get_totalsize(mc_header); 175 data_size = get_datasize(mc_header); 176 177 if (data_size + MC_HEADER_SIZE > total_size) { 178 if (print_err) 179 pr_err("Error: bad microcode data file size.\n"); 180 return -EINVAL; 181 } 182 183 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { 184 if (print_err) 185 pr_err("Error: invalid/unknown microcode update format.\n"); 186 return -EINVAL; 187 } 188 189 ext_table_size = total_size - (MC_HEADER_SIZE + data_size); 190 if (ext_table_size) { 191 u32 ext_table_sum = 0; 192 u32 *ext_tablep; 193 194 if ((ext_table_size < EXT_HEADER_SIZE) 195 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { 196 if (print_err) 197 pr_err("Error: truncated extended signature table.\n"); 198 return -EINVAL; 199 } 200 201 ext_header = mc + MC_HEADER_SIZE + data_size; 202 if (ext_table_size != exttable_size(ext_header)) { 203 if (print_err) 204 pr_err("Error: extended signature table size mismatch.\n"); 205 return -EFAULT; 206 } 207 208 ext_sigcount = ext_header->count; 209 210 /* 211 * Check extended table checksum: the sum of all dwords that 212 * comprise a valid table must be 0. 213 */ 214 ext_tablep = (u32 *)ext_header; 215 216 i = ext_table_size / sizeof(u32); 217 while (i--) 218 ext_table_sum += ext_tablep[i]; 219 220 if (ext_table_sum) { 221 if (print_err) 222 pr_warn("Bad extended signature table checksum, aborting.\n"); 223 return -EINVAL; 224 } 225 } 226 227 /* 228 * Calculate the checksum of update data and header. The checksum of 229 * valid update data and header including the extended signature table 230 * must be 0. 231 */ 232 orig_sum = 0; 233 i = (MC_HEADER_SIZE + data_size) / sizeof(u32); 234 while (i--) 235 orig_sum += ((u32 *)mc)[i]; 236 237 if (orig_sum) { 238 if (print_err) 239 pr_err("Bad microcode data checksum, aborting.\n"); 240 return -EINVAL; 241 } 242 243 if (!ext_table_size) 244 return 0; 245 246 /* 247 * Check extended signature checksum: 0 => valid. 248 */ 249 for (i = 0; i < ext_sigcount; i++) { 250 ext_sig = (void *)ext_header + EXT_HEADER_SIZE + 251 EXT_SIGNATURE_SIZE * i; 252 253 sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - 254 (ext_sig->sig + ext_sig->pf + ext_sig->cksum); 255 if (sum) { 256 if (print_err) 257 pr_err("Bad extended signature checksum, aborting.\n"); 258 return -EINVAL; 259 } 260 } 261 return 0; 262 } 263 264 /* 265 * Get microcode matching with BSP's model. Only CPUs with the same model as 266 * BSP can stay in the platform. 267 */ 268 static struct microcode_intel * 269 scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) 270 { 271 struct microcode_header_intel *mc_header; 272 struct microcode_intel *patch = NULL; 273 unsigned int mc_size; 274 275 while (size) { 276 if (size < sizeof(struct microcode_header_intel)) 277 break; 278 279 mc_header = (struct microcode_header_intel *)data; 280 281 mc_size = get_totalsize(mc_header); 282 if (!mc_size || 283 mc_size > size || 284 microcode_sanity_check(data, 0) < 0) 285 break; 286 287 size -= mc_size; 288 289 if (!find_matching_signature(data, uci->cpu_sig.sig, 290 uci->cpu_sig.pf)) { 291 data += mc_size; 292 continue; 293 } 294 295 if (save) { 296 save_microcode_patch(uci, data, mc_size); 297 goto next; 298 } 299 300 301 if (!patch) { 302 if (!has_newer_microcode(data, 303 uci->cpu_sig.sig, 304 uci->cpu_sig.pf, 305 uci->cpu_sig.rev)) 306 goto next; 307 308 } else { 309 struct microcode_header_intel *phdr = &patch->hdr; 310 311 if (!has_newer_microcode(data, 312 phdr->sig, 313 phdr->pf, 314 phdr->rev)) 315 goto next; 316 } 317 318 /* We have a newer patch, save it. */ 319 patch = data; 320 321 next: 322 data += mc_size; 323 } 324 325 if (size) 326 return NULL; 327 328 return patch; 329 } 330 331 static void show_saved_mc(void) 332 { 333 #ifdef DEBUG 334 int i = 0, j; 335 unsigned int sig, pf, rev, total_size, data_size, date; 336 struct ucode_cpu_info uci; 337 struct ucode_patch *p; 338 339 if (list_empty(µcode_cache)) { 340 pr_debug("no microcode data saved.\n"); 341 return; 342 } 343 344 intel_cpu_collect_info(&uci); 345 346 sig = uci.cpu_sig.sig; 347 pf = uci.cpu_sig.pf; 348 rev = uci.cpu_sig.rev; 349 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); 350 351 list_for_each_entry(p, µcode_cache, plist) { 352 struct microcode_header_intel *mc_saved_header; 353 struct extended_sigtable *ext_header; 354 struct extended_signature *ext_sig; 355 int ext_sigcount; 356 357 mc_saved_header = (struct microcode_header_intel *)p->data; 358 359 sig = mc_saved_header->sig; 360 pf = mc_saved_header->pf; 361 rev = mc_saved_header->rev; 362 date = mc_saved_header->date; 363 364 total_size = get_totalsize(mc_saved_header); 365 data_size = get_datasize(mc_saved_header); 366 367 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n", 368 i++, sig, pf, rev, total_size, 369 date & 0xffff, 370 date >> 24, 371 (date >> 16) & 0xff); 372 373 /* Look for ext. headers: */ 374 if (total_size <= data_size + MC_HEADER_SIZE) 375 continue; 376 377 ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE; 378 ext_sigcount = ext_header->count; 379 ext_sig = (void *)ext_header + EXT_HEADER_SIZE; 380 381 for (j = 0; j < ext_sigcount; j++) { 382 sig = ext_sig->sig; 383 pf = ext_sig->pf; 384 385 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", 386 j, sig, pf); 387 388 ext_sig++; 389 } 390 } 391 #endif 392 } 393 394 /* 395 * Save this microcode patch. It will be loaded early when a CPU is 396 * hot-added or resumes. 397 */ 398 static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size) 399 { 400 /* Synchronization during CPU hotplug. */ 401 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 402 403 mutex_lock(&x86_cpu_microcode_mutex); 404 405 save_microcode_patch(uci, mc, size); 406 show_saved_mc(); 407 408 mutex_unlock(&x86_cpu_microcode_mutex); 409 } 410 411 static bool load_builtin_intel_microcode(struct cpio_data *cp) 412 { 413 unsigned int eax = 1, ebx, ecx = 0, edx; 414 struct firmware fw; 415 char name[30]; 416 417 if (IS_ENABLED(CONFIG_X86_32)) 418 return false; 419 420 native_cpuid(&eax, &ebx, &ecx, &edx); 421 422 sprintf(name, "intel-ucode/%02x-%02x-%02x", 423 x86_family(eax), x86_model(eax), x86_stepping(eax)); 424 425 if (firmware_request_builtin(&fw, name)) { 426 cp->size = fw.size; 427 cp->data = (void *)fw.data; 428 return true; 429 } 430 431 return false; 432 } 433 434 /* 435 * Print ucode update info. 436 */ 437 static void 438 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 439 { 440 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", 441 uci->cpu_sig.rev, 442 date & 0xffff, 443 date >> 24, 444 (date >> 16) & 0xff); 445 } 446 447 #ifdef CONFIG_X86_32 448 449 static int delay_ucode_info; 450 static int current_mc_date; 451 452 /* 453 * Print early updated ucode info after printk works. This is delayed info dump. 454 */ 455 void show_ucode_info_early(void) 456 { 457 struct ucode_cpu_info uci; 458 459 if (delay_ucode_info) { 460 intel_cpu_collect_info(&uci); 461 print_ucode_info(&uci, current_mc_date); 462 delay_ucode_info = 0; 463 } 464 } 465 466 /* 467 * At this point, we can not call printk() yet. Delay printing microcode info in 468 * show_ucode_info_early() until printk() works. 469 */ 470 static void print_ucode(struct ucode_cpu_info *uci) 471 { 472 struct microcode_intel *mc; 473 int *delay_ucode_info_p; 474 int *current_mc_date_p; 475 476 mc = uci->mc; 477 if (!mc) 478 return; 479 480 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); 481 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); 482 483 *delay_ucode_info_p = 1; 484 *current_mc_date_p = mc->hdr.date; 485 } 486 #else 487 488 static inline void print_ucode(struct ucode_cpu_info *uci) 489 { 490 struct microcode_intel *mc; 491 492 mc = uci->mc; 493 if (!mc) 494 return; 495 496 print_ucode_info(uci, mc->hdr.date); 497 } 498 #endif 499 500 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) 501 { 502 struct microcode_intel *mc; 503 u32 rev; 504 505 mc = uci->mc; 506 if (!mc) 507 return 0; 508 509 /* 510 * Save us the MSR write below - which is a particular expensive 511 * operation - when the other hyperthread has updated the microcode 512 * already. 513 */ 514 rev = intel_get_microcode_revision(); 515 if (rev >= mc->hdr.rev) { 516 uci->cpu_sig.rev = rev; 517 return UCODE_OK; 518 } 519 520 /* 521 * Writeback and invalidate caches before updating microcode to avoid 522 * internal issues depending on what the microcode is updating. 523 */ 524 native_wbinvd(); 525 526 /* write microcode via MSR 0x79 */ 527 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 528 529 rev = intel_get_microcode_revision(); 530 if (rev != mc->hdr.rev) 531 return -1; 532 533 uci->cpu_sig.rev = rev; 534 535 if (early) 536 print_ucode(uci); 537 else 538 print_ucode_info(uci, mc->hdr.date); 539 540 return 0; 541 } 542 543 int __init save_microcode_in_initrd_intel(void) 544 { 545 struct ucode_cpu_info uci; 546 struct cpio_data cp; 547 548 /* 549 * initrd is going away, clear patch ptr. We will scan the microcode one 550 * last time before jettisoning and save a patch, if found. Then we will 551 * update that pointer too, with a stable patch address to use when 552 * resuming the cores. 553 */ 554 intel_ucode_patch = NULL; 555 556 if (!load_builtin_intel_microcode(&cp)) 557 cp = find_microcode_in_initrd(ucode_path, false); 558 559 if (!(cp.data && cp.size)) 560 return 0; 561 562 intel_cpu_collect_info(&uci); 563 564 scan_microcode(cp.data, cp.size, &uci, true); 565 566 show_saved_mc(); 567 568 return 0; 569 } 570 571 /* 572 * @res_patch, output: a pointer to the patch we found. 573 */ 574 static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) 575 { 576 static const char *path; 577 struct cpio_data cp; 578 bool use_pa; 579 580 if (IS_ENABLED(CONFIG_X86_32)) { 581 path = (const char *)__pa_nodebug(ucode_path); 582 use_pa = true; 583 } else { 584 path = ucode_path; 585 use_pa = false; 586 } 587 588 /* try built-in microcode first */ 589 if (!load_builtin_intel_microcode(&cp)) 590 cp = find_microcode_in_initrd(path, use_pa); 591 592 if (!(cp.data && cp.size)) 593 return NULL; 594 595 intel_cpu_collect_info(uci); 596 597 return scan_microcode(cp.data, cp.size, uci, false); 598 } 599 600 void __init load_ucode_intel_bsp(void) 601 { 602 struct microcode_intel *patch; 603 struct ucode_cpu_info uci; 604 605 patch = __load_ucode_intel(&uci); 606 if (!patch) 607 return; 608 609 uci.mc = patch; 610 611 apply_microcode_early(&uci, true); 612 } 613 614 void load_ucode_intel_ap(void) 615 { 616 struct microcode_intel *patch, **iup; 617 struct ucode_cpu_info uci; 618 619 if (IS_ENABLED(CONFIG_X86_32)) 620 iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); 621 else 622 iup = &intel_ucode_patch; 623 624 reget: 625 if (!*iup) { 626 patch = __load_ucode_intel(&uci); 627 if (!patch) 628 return; 629 630 *iup = patch; 631 } 632 633 uci.mc = *iup; 634 635 if (apply_microcode_early(&uci, true)) { 636 /* Mixed-silicon system? Try to refetch the proper patch: */ 637 *iup = NULL; 638 639 goto reget; 640 } 641 } 642 643 static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) 644 { 645 struct microcode_header_intel *phdr; 646 struct ucode_patch *iter, *tmp; 647 648 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { 649 650 phdr = (struct microcode_header_intel *)iter->data; 651 652 if (phdr->rev <= uci->cpu_sig.rev) 653 continue; 654 655 if (!find_matching_signature(phdr, 656 uci->cpu_sig.sig, 657 uci->cpu_sig.pf)) 658 continue; 659 660 return iter->data; 661 } 662 return NULL; 663 } 664 665 void reload_ucode_intel(void) 666 { 667 struct microcode_intel *p; 668 struct ucode_cpu_info uci; 669 670 intel_cpu_collect_info(&uci); 671 672 p = find_patch(&uci); 673 if (!p) 674 return; 675 676 uci.mc = p; 677 678 apply_microcode_early(&uci, false); 679 } 680 681 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) 682 { 683 static struct cpu_signature prev; 684 struct cpuinfo_x86 *c = &cpu_data(cpu_num); 685 unsigned int val[2]; 686 687 memset(csig, 0, sizeof(*csig)); 688 689 csig->sig = cpuid_eax(0x00000001); 690 691 if ((c->x86_model >= 5) || (c->x86 > 6)) { 692 /* get processor flags from MSR 0x17 */ 693 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 694 csig->pf = 1 << ((val[1] >> 18) & 7); 695 } 696 697 csig->rev = c->microcode; 698 699 /* No extra locking on prev, races are harmless. */ 700 if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) { 701 pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n", 702 csig->sig, csig->pf, csig->rev); 703 prev = *csig; 704 } 705 706 return 0; 707 } 708 709 static enum ucode_state apply_microcode_intel(int cpu) 710 { 711 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 712 struct cpuinfo_x86 *c = &cpu_data(cpu); 713 bool bsp = c->cpu_index == boot_cpu_data.cpu_index; 714 struct microcode_intel *mc; 715 enum ucode_state ret; 716 static int prev_rev; 717 u32 rev; 718 719 /* We should bind the task to the CPU */ 720 if (WARN_ON(raw_smp_processor_id() != cpu)) 721 return UCODE_ERROR; 722 723 /* Look for a newer patch in our cache: */ 724 mc = find_patch(uci); 725 if (!mc) { 726 mc = uci->mc; 727 if (!mc) 728 return UCODE_NFOUND; 729 } 730 731 /* 732 * Save us the MSR write below - which is a particular expensive 733 * operation - when the other hyperthread has updated the microcode 734 * already. 735 */ 736 rev = intel_get_microcode_revision(); 737 if (rev >= mc->hdr.rev) { 738 ret = UCODE_OK; 739 goto out; 740 } 741 742 /* 743 * Writeback and invalidate caches before updating microcode to avoid 744 * internal issues depending on what the microcode is updating. 745 */ 746 native_wbinvd(); 747 748 /* write microcode via MSR 0x79 */ 749 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 750 751 rev = intel_get_microcode_revision(); 752 753 if (rev != mc->hdr.rev) { 754 pr_err("CPU%d update to revision 0x%x failed\n", 755 cpu, mc->hdr.rev); 756 return UCODE_ERROR; 757 } 758 759 if (bsp && rev != prev_rev) { 760 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", 761 rev, 762 mc->hdr.date & 0xffff, 763 mc->hdr.date >> 24, 764 (mc->hdr.date >> 16) & 0xff); 765 prev_rev = rev; 766 } 767 768 ret = UCODE_UPDATED; 769 770 out: 771 uci->cpu_sig.rev = rev; 772 c->microcode = rev; 773 774 /* Update boot_cpu_data's revision too, if we're on the BSP: */ 775 if (bsp) 776 boot_cpu_data.microcode = rev; 777 778 return ret; 779 } 780 781 static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) 782 { 783 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 784 unsigned int curr_mc_size = 0, new_mc_size = 0; 785 enum ucode_state ret = UCODE_OK; 786 int new_rev = uci->cpu_sig.rev; 787 u8 *new_mc = NULL, *mc = NULL; 788 unsigned int csig, cpf; 789 790 while (iov_iter_count(iter)) { 791 struct microcode_header_intel mc_header; 792 unsigned int mc_size, data_size; 793 u8 *data; 794 795 if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { 796 pr_err("error! Truncated or inaccessible header in microcode data file\n"); 797 break; 798 } 799 800 mc_size = get_totalsize(&mc_header); 801 if (mc_size < sizeof(mc_header)) { 802 pr_err("error! Bad data in microcode data file (totalsize too small)\n"); 803 break; 804 } 805 data_size = mc_size - sizeof(mc_header); 806 if (data_size > iov_iter_count(iter)) { 807 pr_err("error! Bad data in microcode data file (truncated file?)\n"); 808 break; 809 } 810 811 /* For performance reasons, reuse mc area when possible */ 812 if (!mc || mc_size > curr_mc_size) { 813 vfree(mc); 814 mc = vmalloc(mc_size); 815 if (!mc) 816 break; 817 curr_mc_size = mc_size; 818 } 819 820 memcpy(mc, &mc_header, sizeof(mc_header)); 821 data = mc + sizeof(mc_header); 822 if (!copy_from_iter_full(data, data_size, iter) || 823 microcode_sanity_check(mc, 1) < 0) { 824 break; 825 } 826 827 csig = uci->cpu_sig.sig; 828 cpf = uci->cpu_sig.pf; 829 if (has_newer_microcode(mc, csig, cpf, new_rev)) { 830 vfree(new_mc); 831 new_rev = mc_header.rev; 832 new_mc = mc; 833 new_mc_size = mc_size; 834 mc = NULL; /* trigger new vmalloc */ 835 ret = UCODE_NEW; 836 } 837 } 838 839 vfree(mc); 840 841 if (iov_iter_count(iter)) { 842 vfree(new_mc); 843 return UCODE_ERROR; 844 } 845 846 if (!new_mc) 847 return UCODE_NFOUND; 848 849 vfree(uci->mc); 850 uci->mc = (struct microcode_intel *)new_mc; 851 852 /* 853 * If early loading microcode is supported, save this mc into 854 * permanent memory. So it will be loaded early when a CPU is hot added 855 * or resumes. 856 */ 857 save_mc_for_early(uci, new_mc, new_mc_size); 858 859 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 860 cpu, new_rev, uci->cpu_sig.rev); 861 862 return ret; 863 } 864 865 static bool is_blacklisted(unsigned int cpu) 866 { 867 struct cpuinfo_x86 *c = &cpu_data(cpu); 868 869 /* 870 * Late loading on model 79 with microcode revision less than 0x0b000021 871 * and LLC size per core bigger than 2.5MB may result in a system hang. 872 * This behavior is documented in item BDF90, #334165 (Intel Xeon 873 * Processor E7-8800/4800 v4 Product Family). 874 */ 875 if (c->x86 == 6 && 876 c->x86_model == INTEL_FAM6_BROADWELL_X && 877 c->x86_stepping == 0x01 && 878 llc_size_per_core > 2621440 && 879 c->microcode < 0x0b000021) { 880 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); 881 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 882 return true; 883 } 884 885 return false; 886 } 887 888 static enum ucode_state request_microcode_fw(int cpu, struct device *device, 889 bool refresh_fw) 890 { 891 struct cpuinfo_x86 *c = &cpu_data(cpu); 892 const struct firmware *firmware; 893 struct iov_iter iter; 894 enum ucode_state ret; 895 struct kvec kvec; 896 char name[30]; 897 898 if (is_blacklisted(cpu)) 899 return UCODE_NFOUND; 900 901 sprintf(name, "intel-ucode/%02x-%02x-%02x", 902 c->x86, c->x86_model, c->x86_stepping); 903 904 if (request_firmware_direct(&firmware, name, device)) { 905 pr_debug("data file %s load failed\n", name); 906 return UCODE_NFOUND; 907 } 908 909 kvec.iov_base = (void *)firmware->data; 910 kvec.iov_len = firmware->size; 911 iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size); 912 ret = generic_load_microcode(cpu, &iter); 913 914 release_firmware(firmware); 915 916 return ret; 917 } 918 919 static enum ucode_state 920 request_microcode_user(int cpu, const void __user *buf, size_t size) 921 { 922 struct iov_iter iter; 923 struct iovec iov; 924 925 if (is_blacklisted(cpu)) 926 return UCODE_NFOUND; 927 928 iov.iov_base = (void __user *)buf; 929 iov.iov_len = size; 930 iov_iter_init(&iter, WRITE, &iov, 1, size); 931 932 return generic_load_microcode(cpu, &iter); 933 } 934 935 static struct microcode_ops microcode_intel_ops = { 936 .request_microcode_user = request_microcode_user, 937 .request_microcode_fw = request_microcode_fw, 938 .collect_cpu_info = collect_cpu_info, 939 .apply_microcode = apply_microcode_intel, 940 }; 941 942 static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) 943 { 944 u64 llc_size = c->x86_cache_size * 1024ULL; 945 946 do_div(llc_size, c->x86_max_cores); 947 948 return (int)llc_size; 949 } 950 951 struct microcode_ops * __init init_intel_microcode(void) 952 { 953 struct cpuinfo_x86 *c = &boot_cpu_data; 954 955 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 956 cpu_has(c, X86_FEATURE_IA64)) { 957 pr_err("Intel CPU family 0x%x not supported\n", c->x86); 958 return NULL; 959 } 960 961 llc_size_per_core = calc_llc_size_per_core(c); 962 963 return µcode_intel_ops; 964 } 965