1 /* 2 * Intel CPU Microcode Update Driver for Linux 3 * 4 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> 5 * 2006 Shaohua Li <shaohua.li@intel.com> 6 * 7 * Intel CPU microcode early update for Linux 8 * 9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> 10 * H Peter Anvin" <hpa@zytor.com> 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 /* 19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn 20 * printk calls into no_printk(). 21 * 22 *#define DEBUG 23 */ 24 #define pr_fmt(fmt) "microcode: " fmt 25 26 #include <linux/earlycpio.h> 27 #include <linux/firmware.h> 28 #include <linux/uaccess.h> 29 #include <linux/vmalloc.h> 30 #include <linux/initrd.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/cpu.h> 34 #include <linux/uio.h> 35 #include <linux/mm.h> 36 37 #include <asm/microcode_intel.h> 38 #include <asm/intel-family.h> 39 #include <asm/processor.h> 40 #include <asm/tlbflush.h> 41 #include <asm/setup.h> 42 #include <asm/msr.h> 43 44 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; 45 46 /* Current microcode patch used in early patching on the APs. */ 47 static struct microcode_intel *intel_ucode_patch; 48 49 /* last level cache size per core */ 50 static int llc_size_per_core; 51 52 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, 53 unsigned int s2, unsigned int p2) 54 { 55 if (s1 != s2) 56 return false; 57 58 /* Processor flags are either both 0 ... */ 59 if (!p1 && !p2) 60 return true; 61 62 /* ... or they intersect. */ 63 return p1 & p2; 64 } 65 66 /* 67 * Returns 1 if update has been found, 0 otherwise. 68 */ 69 static int find_matching_signature(void *mc, unsigned int csig, int cpf) 70 { 71 struct microcode_header_intel *mc_hdr = mc; 72 struct extended_sigtable *ext_hdr; 73 struct extended_signature *ext_sig; 74 int i; 75 76 if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) 77 return 1; 78 79 /* Look for ext. headers: */ 80 if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) 81 return 0; 82 83 ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; 84 ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; 85 86 for (i = 0; i < ext_hdr->count; i++) { 87 if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) 88 return 1; 89 ext_sig++; 90 } 91 return 0; 92 } 93 94 /* 95 * Returns 1 if update has been found, 0 otherwise. 96 */ 97 static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) 98 { 99 struct microcode_header_intel *mc_hdr = mc; 100 101 if (mc_hdr->rev <= new_rev) 102 return 0; 103 104 return find_matching_signature(mc, csig, cpf); 105 } 106 107 /* 108 * Given CPU signature and a microcode patch, this function finds if the 109 * microcode patch has matching family and model with the CPU. 110 * 111 * %true - if there's a match 112 * %false - otherwise 113 */ 114 static bool microcode_matches(struct microcode_header_intel *mc_header, 115 unsigned long sig) 116 { 117 unsigned long total_size = get_totalsize(mc_header); 118 unsigned long data_size = get_datasize(mc_header); 119 struct extended_sigtable *ext_header; 120 unsigned int fam_ucode, model_ucode; 121 struct extended_signature *ext_sig; 122 unsigned int fam, model; 123 int ext_sigcount, i; 124 125 fam = x86_family(sig); 126 model = x86_model(sig); 127 128 fam_ucode = x86_family(mc_header->sig); 129 model_ucode = x86_model(mc_header->sig); 130 131 if (fam == fam_ucode && model == model_ucode) 132 return true; 133 134 /* Look for ext. headers: */ 135 if (total_size <= data_size + MC_HEADER_SIZE) 136 return false; 137 138 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; 139 ext_sig = (void *)ext_header + EXT_HEADER_SIZE; 140 ext_sigcount = ext_header->count; 141 142 for (i = 0; i < ext_sigcount; i++) { 143 fam_ucode = x86_family(ext_sig->sig); 144 model_ucode = x86_model(ext_sig->sig); 145 146 if (fam == fam_ucode && model == model_ucode) 147 return true; 148 149 ext_sig++; 150 } 151 return false; 152 } 153 154 static struct ucode_patch *memdup_patch(void *data, unsigned int size) 155 { 156 struct ucode_patch *p; 157 158 p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); 159 if (!p) 160 return NULL; 161 162 p->data = kmemdup(data, size, GFP_KERNEL); 163 if (!p->data) { 164 kfree(p); 165 return NULL; 166 } 167 168 return p; 169 } 170 171 static void save_microcode_patch(void *data, unsigned int size) 172 { 173 struct microcode_header_intel *mc_hdr, *mc_saved_hdr; 174 struct ucode_patch *iter, *tmp, *p = NULL; 175 bool prev_found = false; 176 unsigned int sig, pf; 177 178 mc_hdr = (struct microcode_header_intel *)data; 179 180 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { 181 mc_saved_hdr = (struct microcode_header_intel *)iter->data; 182 sig = mc_saved_hdr->sig; 183 pf = mc_saved_hdr->pf; 184 185 if (find_matching_signature(data, sig, pf)) { 186 prev_found = true; 187 188 if (mc_hdr->rev <= mc_saved_hdr->rev) 189 continue; 190 191 p = memdup_patch(data, size); 192 if (!p) 193 pr_err("Error allocating buffer %p\n", data); 194 else { 195 list_replace(&iter->plist, &p->plist); 196 kfree(iter->data); 197 kfree(iter); 198 } 199 } 200 } 201 202 /* 203 * There weren't any previous patches found in the list cache; save the 204 * newly found. 205 */ 206 if (!prev_found) { 207 p = memdup_patch(data, size); 208 if (!p) 209 pr_err("Error allocating buffer for %p\n", data); 210 else 211 list_add_tail(&p->plist, µcode_cache); 212 } 213 214 if (!p) 215 return; 216 217 /* 218 * Save for early loading. On 32-bit, that needs to be a physical 219 * address as the APs are running from physical addresses, before 220 * paging has been enabled. 221 */ 222 if (IS_ENABLED(CONFIG_X86_32)) 223 intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); 224 else 225 intel_ucode_patch = p->data; 226 } 227 228 static int microcode_sanity_check(void *mc, int print_err) 229 { 230 unsigned long total_size, data_size, ext_table_size; 231 struct microcode_header_intel *mc_header = mc; 232 struct extended_sigtable *ext_header = NULL; 233 u32 sum, orig_sum, ext_sigcount = 0, i; 234 struct extended_signature *ext_sig; 235 236 total_size = get_totalsize(mc_header); 237 data_size = get_datasize(mc_header); 238 239 if (data_size + MC_HEADER_SIZE > total_size) { 240 if (print_err) 241 pr_err("Error: bad microcode data file size.\n"); 242 return -EINVAL; 243 } 244 245 if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { 246 if (print_err) 247 pr_err("Error: invalid/unknown microcode update format.\n"); 248 return -EINVAL; 249 } 250 251 ext_table_size = total_size - (MC_HEADER_SIZE + data_size); 252 if (ext_table_size) { 253 u32 ext_table_sum = 0; 254 u32 *ext_tablep; 255 256 if ((ext_table_size < EXT_HEADER_SIZE) 257 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { 258 if (print_err) 259 pr_err("Error: truncated extended signature table.\n"); 260 return -EINVAL; 261 } 262 263 ext_header = mc + MC_HEADER_SIZE + data_size; 264 if (ext_table_size != exttable_size(ext_header)) { 265 if (print_err) 266 pr_err("Error: extended signature table size mismatch.\n"); 267 return -EFAULT; 268 } 269 270 ext_sigcount = ext_header->count; 271 272 /* 273 * Check extended table checksum: the sum of all dwords that 274 * comprise a valid table must be 0. 275 */ 276 ext_tablep = (u32 *)ext_header; 277 278 i = ext_table_size / sizeof(u32); 279 while (i--) 280 ext_table_sum += ext_tablep[i]; 281 282 if (ext_table_sum) { 283 if (print_err) 284 pr_warn("Bad extended signature table checksum, aborting.\n"); 285 return -EINVAL; 286 } 287 } 288 289 /* 290 * Calculate the checksum of update data and header. The checksum of 291 * valid update data and header including the extended signature table 292 * must be 0. 293 */ 294 orig_sum = 0; 295 i = (MC_HEADER_SIZE + data_size) / sizeof(u32); 296 while (i--) 297 orig_sum += ((u32 *)mc)[i]; 298 299 if (orig_sum) { 300 if (print_err) 301 pr_err("Bad microcode data checksum, aborting.\n"); 302 return -EINVAL; 303 } 304 305 if (!ext_table_size) 306 return 0; 307 308 /* 309 * Check extended signature checksum: 0 => valid. 310 */ 311 for (i = 0; i < ext_sigcount; i++) { 312 ext_sig = (void *)ext_header + EXT_HEADER_SIZE + 313 EXT_SIGNATURE_SIZE * i; 314 315 sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - 316 (ext_sig->sig + ext_sig->pf + ext_sig->cksum); 317 if (sum) { 318 if (print_err) 319 pr_err("Bad extended signature checksum, aborting.\n"); 320 return -EINVAL; 321 } 322 } 323 return 0; 324 } 325 326 /* 327 * Get microcode matching with BSP's model. Only CPUs with the same model as 328 * BSP can stay in the platform. 329 */ 330 static struct microcode_intel * 331 scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) 332 { 333 struct microcode_header_intel *mc_header; 334 struct microcode_intel *patch = NULL; 335 unsigned int mc_size; 336 337 while (size) { 338 if (size < sizeof(struct microcode_header_intel)) 339 break; 340 341 mc_header = (struct microcode_header_intel *)data; 342 343 mc_size = get_totalsize(mc_header); 344 if (!mc_size || 345 mc_size > size || 346 microcode_sanity_check(data, 0) < 0) 347 break; 348 349 size -= mc_size; 350 351 if (!microcode_matches(mc_header, uci->cpu_sig.sig)) { 352 data += mc_size; 353 continue; 354 } 355 356 if (save) { 357 save_microcode_patch(data, mc_size); 358 goto next; 359 } 360 361 362 if (!patch) { 363 if (!has_newer_microcode(data, 364 uci->cpu_sig.sig, 365 uci->cpu_sig.pf, 366 uci->cpu_sig.rev)) 367 goto next; 368 369 } else { 370 struct microcode_header_intel *phdr = &patch->hdr; 371 372 if (!has_newer_microcode(data, 373 phdr->sig, 374 phdr->pf, 375 phdr->rev)) 376 goto next; 377 } 378 379 /* We have a newer patch, save it. */ 380 patch = data; 381 382 next: 383 data += mc_size; 384 } 385 386 if (size) 387 return NULL; 388 389 return patch; 390 } 391 392 static int collect_cpu_info_early(struct ucode_cpu_info *uci) 393 { 394 unsigned int val[2]; 395 unsigned int family, model; 396 struct cpu_signature csig = { 0 }; 397 unsigned int eax, ebx, ecx, edx; 398 399 memset(uci, 0, sizeof(*uci)); 400 401 eax = 0x00000001; 402 ecx = 0; 403 native_cpuid(&eax, &ebx, &ecx, &edx); 404 csig.sig = eax; 405 406 family = x86_family(eax); 407 model = x86_model(eax); 408 409 if ((model >= 5) || (family > 6)) { 410 /* get processor flags from MSR 0x17 */ 411 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 412 csig.pf = 1 << ((val[1] >> 18) & 7); 413 } 414 415 csig.rev = intel_get_microcode_revision(); 416 417 uci->cpu_sig = csig; 418 uci->valid = 1; 419 420 return 0; 421 } 422 423 static void show_saved_mc(void) 424 { 425 #ifdef DEBUG 426 int i = 0, j; 427 unsigned int sig, pf, rev, total_size, data_size, date; 428 struct ucode_cpu_info uci; 429 struct ucode_patch *p; 430 431 if (list_empty(µcode_cache)) { 432 pr_debug("no microcode data saved.\n"); 433 return; 434 } 435 436 collect_cpu_info_early(&uci); 437 438 sig = uci.cpu_sig.sig; 439 pf = uci.cpu_sig.pf; 440 rev = uci.cpu_sig.rev; 441 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); 442 443 list_for_each_entry(p, µcode_cache, plist) { 444 struct microcode_header_intel *mc_saved_header; 445 struct extended_sigtable *ext_header; 446 struct extended_signature *ext_sig; 447 int ext_sigcount; 448 449 mc_saved_header = (struct microcode_header_intel *)p->data; 450 451 sig = mc_saved_header->sig; 452 pf = mc_saved_header->pf; 453 rev = mc_saved_header->rev; 454 date = mc_saved_header->date; 455 456 total_size = get_totalsize(mc_saved_header); 457 data_size = get_datasize(mc_saved_header); 458 459 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n", 460 i++, sig, pf, rev, total_size, 461 date & 0xffff, 462 date >> 24, 463 (date >> 16) & 0xff); 464 465 /* Look for ext. headers: */ 466 if (total_size <= data_size + MC_HEADER_SIZE) 467 continue; 468 469 ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE; 470 ext_sigcount = ext_header->count; 471 ext_sig = (void *)ext_header + EXT_HEADER_SIZE; 472 473 for (j = 0; j < ext_sigcount; j++) { 474 sig = ext_sig->sig; 475 pf = ext_sig->pf; 476 477 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", 478 j, sig, pf); 479 480 ext_sig++; 481 } 482 } 483 #endif 484 } 485 486 /* 487 * Save this microcode patch. It will be loaded early when a CPU is 488 * hot-added or resumes. 489 */ 490 static void save_mc_for_early(u8 *mc, unsigned int size) 491 { 492 /* Synchronization during CPU hotplug. */ 493 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 494 495 mutex_lock(&x86_cpu_microcode_mutex); 496 497 save_microcode_patch(mc, size); 498 show_saved_mc(); 499 500 mutex_unlock(&x86_cpu_microcode_mutex); 501 } 502 503 static bool load_builtin_intel_microcode(struct cpio_data *cp) 504 { 505 unsigned int eax = 1, ebx, ecx = 0, edx; 506 char name[30]; 507 508 if (IS_ENABLED(CONFIG_X86_32)) 509 return false; 510 511 native_cpuid(&eax, &ebx, &ecx, &edx); 512 513 sprintf(name, "intel-ucode/%02x-%02x-%02x", 514 x86_family(eax), x86_model(eax), x86_stepping(eax)); 515 516 return get_builtin_firmware(cp, name); 517 } 518 519 /* 520 * Print ucode update info. 521 */ 522 static void 523 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 524 { 525 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", 526 uci->cpu_sig.rev, 527 date & 0xffff, 528 date >> 24, 529 (date >> 16) & 0xff); 530 } 531 532 #ifdef CONFIG_X86_32 533 534 static int delay_ucode_info; 535 static int current_mc_date; 536 537 /* 538 * Print early updated ucode info after printk works. This is delayed info dump. 539 */ 540 void show_ucode_info_early(void) 541 { 542 struct ucode_cpu_info uci; 543 544 if (delay_ucode_info) { 545 collect_cpu_info_early(&uci); 546 print_ucode_info(&uci, current_mc_date); 547 delay_ucode_info = 0; 548 } 549 } 550 551 /* 552 * At this point, we can not call printk() yet. Delay printing microcode info in 553 * show_ucode_info_early() until printk() works. 554 */ 555 static void print_ucode(struct ucode_cpu_info *uci) 556 { 557 struct microcode_intel *mc; 558 int *delay_ucode_info_p; 559 int *current_mc_date_p; 560 561 mc = uci->mc; 562 if (!mc) 563 return; 564 565 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); 566 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); 567 568 *delay_ucode_info_p = 1; 569 *current_mc_date_p = mc->hdr.date; 570 } 571 #else 572 573 static inline void print_ucode(struct ucode_cpu_info *uci) 574 { 575 struct microcode_intel *mc; 576 577 mc = uci->mc; 578 if (!mc) 579 return; 580 581 print_ucode_info(uci, mc->hdr.date); 582 } 583 #endif 584 585 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) 586 { 587 struct microcode_intel *mc; 588 u32 rev; 589 590 mc = uci->mc; 591 if (!mc) 592 return 0; 593 594 /* 595 * Save us the MSR write below - which is a particular expensive 596 * operation - when the other hyperthread has updated the microcode 597 * already. 598 */ 599 rev = intel_get_microcode_revision(); 600 if (rev >= mc->hdr.rev) { 601 uci->cpu_sig.rev = rev; 602 return UCODE_OK; 603 } 604 605 /* 606 * Writeback and invalidate caches before updating microcode to avoid 607 * internal issues depending on what the microcode is updating. 608 */ 609 native_wbinvd(); 610 611 /* write microcode via MSR 0x79 */ 612 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 613 614 rev = intel_get_microcode_revision(); 615 if (rev != mc->hdr.rev) 616 return -1; 617 618 uci->cpu_sig.rev = rev; 619 620 if (early) 621 print_ucode(uci); 622 else 623 print_ucode_info(uci, mc->hdr.date); 624 625 return 0; 626 } 627 628 int __init save_microcode_in_initrd_intel(void) 629 { 630 struct ucode_cpu_info uci; 631 struct cpio_data cp; 632 633 /* 634 * initrd is going away, clear patch ptr. We will scan the microcode one 635 * last time before jettisoning and save a patch, if found. Then we will 636 * update that pointer too, with a stable patch address to use when 637 * resuming the cores. 638 */ 639 intel_ucode_patch = NULL; 640 641 if (!load_builtin_intel_microcode(&cp)) 642 cp = find_microcode_in_initrd(ucode_path, false); 643 644 if (!(cp.data && cp.size)) 645 return 0; 646 647 collect_cpu_info_early(&uci); 648 649 scan_microcode(cp.data, cp.size, &uci, true); 650 651 show_saved_mc(); 652 653 return 0; 654 } 655 656 /* 657 * @res_patch, output: a pointer to the patch we found. 658 */ 659 static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) 660 { 661 static const char *path; 662 struct cpio_data cp; 663 bool use_pa; 664 665 if (IS_ENABLED(CONFIG_X86_32)) { 666 path = (const char *)__pa_nodebug(ucode_path); 667 use_pa = true; 668 } else { 669 path = ucode_path; 670 use_pa = false; 671 } 672 673 /* try built-in microcode first */ 674 if (!load_builtin_intel_microcode(&cp)) 675 cp = find_microcode_in_initrd(path, use_pa); 676 677 if (!(cp.data && cp.size)) 678 return NULL; 679 680 collect_cpu_info_early(uci); 681 682 return scan_microcode(cp.data, cp.size, uci, false); 683 } 684 685 void __init load_ucode_intel_bsp(void) 686 { 687 struct microcode_intel *patch; 688 struct ucode_cpu_info uci; 689 690 patch = __load_ucode_intel(&uci); 691 if (!patch) 692 return; 693 694 uci.mc = patch; 695 696 apply_microcode_early(&uci, true); 697 } 698 699 void load_ucode_intel_ap(void) 700 { 701 struct microcode_intel *patch, **iup; 702 struct ucode_cpu_info uci; 703 704 if (IS_ENABLED(CONFIG_X86_32)) 705 iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); 706 else 707 iup = &intel_ucode_patch; 708 709 reget: 710 if (!*iup) { 711 patch = __load_ucode_intel(&uci); 712 if (!patch) 713 return; 714 715 *iup = patch; 716 } 717 718 uci.mc = *iup; 719 720 if (apply_microcode_early(&uci, true)) { 721 /* Mixed-silicon system? Try to refetch the proper patch: */ 722 *iup = NULL; 723 724 goto reget; 725 } 726 } 727 728 static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) 729 { 730 struct microcode_header_intel *phdr; 731 struct ucode_patch *iter, *tmp; 732 733 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { 734 735 phdr = (struct microcode_header_intel *)iter->data; 736 737 if (phdr->rev <= uci->cpu_sig.rev) 738 continue; 739 740 if (!find_matching_signature(phdr, 741 uci->cpu_sig.sig, 742 uci->cpu_sig.pf)) 743 continue; 744 745 return iter->data; 746 } 747 return NULL; 748 } 749 750 void reload_ucode_intel(void) 751 { 752 struct microcode_intel *p; 753 struct ucode_cpu_info uci; 754 755 collect_cpu_info_early(&uci); 756 757 p = find_patch(&uci); 758 if (!p) 759 return; 760 761 uci.mc = p; 762 763 apply_microcode_early(&uci, false); 764 } 765 766 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) 767 { 768 static struct cpu_signature prev; 769 struct cpuinfo_x86 *c = &cpu_data(cpu_num); 770 unsigned int val[2]; 771 772 memset(csig, 0, sizeof(*csig)); 773 774 csig->sig = cpuid_eax(0x00000001); 775 776 if ((c->x86_model >= 5) || (c->x86 > 6)) { 777 /* get processor flags from MSR 0x17 */ 778 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 779 csig->pf = 1 << ((val[1] >> 18) & 7); 780 } 781 782 csig->rev = c->microcode; 783 784 /* No extra locking on prev, races are harmless. */ 785 if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) { 786 pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n", 787 csig->sig, csig->pf, csig->rev); 788 prev = *csig; 789 } 790 791 return 0; 792 } 793 794 static enum ucode_state apply_microcode_intel(int cpu) 795 { 796 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 797 struct cpuinfo_x86 *c = &cpu_data(cpu); 798 struct microcode_intel *mc; 799 enum ucode_state ret; 800 static int prev_rev; 801 u32 rev; 802 803 /* We should bind the task to the CPU */ 804 if (WARN_ON(raw_smp_processor_id() != cpu)) 805 return UCODE_ERROR; 806 807 /* Look for a newer patch in our cache: */ 808 mc = find_patch(uci); 809 if (!mc) { 810 mc = uci->mc; 811 if (!mc) 812 return UCODE_NFOUND; 813 } 814 815 /* 816 * Save us the MSR write below - which is a particular expensive 817 * operation - when the other hyperthread has updated the microcode 818 * already. 819 */ 820 rev = intel_get_microcode_revision(); 821 if (rev >= mc->hdr.rev) { 822 ret = UCODE_OK; 823 goto out; 824 } 825 826 /* 827 * Writeback and invalidate caches before updating microcode to avoid 828 * internal issues depending on what the microcode is updating. 829 */ 830 native_wbinvd(); 831 832 /* write microcode via MSR 0x79 */ 833 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 834 835 rev = intel_get_microcode_revision(); 836 837 if (rev != mc->hdr.rev) { 838 pr_err("CPU%d update to revision 0x%x failed\n", 839 cpu, mc->hdr.rev); 840 return UCODE_ERROR; 841 } 842 843 if (rev != prev_rev) { 844 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", 845 rev, 846 mc->hdr.date & 0xffff, 847 mc->hdr.date >> 24, 848 (mc->hdr.date >> 16) & 0xff); 849 prev_rev = rev; 850 } 851 852 ret = UCODE_UPDATED; 853 854 out: 855 uci->cpu_sig.rev = rev; 856 c->microcode = rev; 857 858 /* Update boot_cpu_data's revision too, if we're on the BSP: */ 859 if (c->cpu_index == boot_cpu_data.cpu_index) 860 boot_cpu_data.microcode = rev; 861 862 return ret; 863 } 864 865 static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) 866 { 867 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 868 unsigned int curr_mc_size = 0, new_mc_size = 0; 869 enum ucode_state ret = UCODE_OK; 870 int new_rev = uci->cpu_sig.rev; 871 u8 *new_mc = NULL, *mc = NULL; 872 unsigned int csig, cpf; 873 874 while (iov_iter_count(iter)) { 875 struct microcode_header_intel mc_header; 876 unsigned int mc_size, data_size; 877 u8 *data; 878 879 if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { 880 pr_err("error! Truncated or inaccessible header in microcode data file\n"); 881 break; 882 } 883 884 mc_size = get_totalsize(&mc_header); 885 if (mc_size < sizeof(mc_header)) { 886 pr_err("error! Bad data in microcode data file (totalsize too small)\n"); 887 break; 888 } 889 data_size = mc_size - sizeof(mc_header); 890 if (data_size > iov_iter_count(iter)) { 891 pr_err("error! Bad data in microcode data file (truncated file?)\n"); 892 break; 893 } 894 895 /* For performance reasons, reuse mc area when possible */ 896 if (!mc || mc_size > curr_mc_size) { 897 vfree(mc); 898 mc = vmalloc(mc_size); 899 if (!mc) 900 break; 901 curr_mc_size = mc_size; 902 } 903 904 memcpy(mc, &mc_header, sizeof(mc_header)); 905 data = mc + sizeof(mc_header); 906 if (!copy_from_iter_full(data, data_size, iter) || 907 microcode_sanity_check(mc, 1) < 0) { 908 break; 909 } 910 911 csig = uci->cpu_sig.sig; 912 cpf = uci->cpu_sig.pf; 913 if (has_newer_microcode(mc, csig, cpf, new_rev)) { 914 vfree(new_mc); 915 new_rev = mc_header.rev; 916 new_mc = mc; 917 new_mc_size = mc_size; 918 mc = NULL; /* trigger new vmalloc */ 919 ret = UCODE_NEW; 920 } 921 } 922 923 vfree(mc); 924 925 if (iov_iter_count(iter)) { 926 vfree(new_mc); 927 return UCODE_ERROR; 928 } 929 930 if (!new_mc) 931 return UCODE_NFOUND; 932 933 vfree(uci->mc); 934 uci->mc = (struct microcode_intel *)new_mc; 935 936 /* 937 * If early loading microcode is supported, save this mc into 938 * permanent memory. So it will be loaded early when a CPU is hot added 939 * or resumes. 940 */ 941 save_mc_for_early(new_mc, new_mc_size); 942 943 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 944 cpu, new_rev, uci->cpu_sig.rev); 945 946 return ret; 947 } 948 949 static bool is_blacklisted(unsigned int cpu) 950 { 951 struct cpuinfo_x86 *c = &cpu_data(cpu); 952 953 /* 954 * Late loading on model 79 with microcode revision less than 0x0b000021 955 * and LLC size per core bigger than 2.5MB may result in a system hang. 956 * This behavior is documented in item BDF90, #334165 (Intel Xeon 957 * Processor E7-8800/4800 v4 Product Family). 958 */ 959 if (c->x86 == 6 && 960 c->x86_model == INTEL_FAM6_BROADWELL_X && 961 c->x86_stepping == 0x01 && 962 llc_size_per_core > 2621440 && 963 c->microcode < 0x0b000021) { 964 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); 965 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 966 return true; 967 } 968 969 return false; 970 } 971 972 static enum ucode_state request_microcode_fw(int cpu, struct device *device, 973 bool refresh_fw) 974 { 975 struct cpuinfo_x86 *c = &cpu_data(cpu); 976 const struct firmware *firmware; 977 struct iov_iter iter; 978 enum ucode_state ret; 979 struct kvec kvec; 980 char name[30]; 981 982 if (is_blacklisted(cpu)) 983 return UCODE_NFOUND; 984 985 sprintf(name, "intel-ucode/%02x-%02x-%02x", 986 c->x86, c->x86_model, c->x86_stepping); 987 988 if (request_firmware_direct(&firmware, name, device)) { 989 pr_debug("data file %s load failed\n", name); 990 return UCODE_NFOUND; 991 } 992 993 kvec.iov_base = (void *)firmware->data; 994 kvec.iov_len = firmware->size; 995 iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size); 996 ret = generic_load_microcode(cpu, &iter); 997 998 release_firmware(firmware); 999 1000 return ret; 1001 } 1002 1003 static enum ucode_state 1004 request_microcode_user(int cpu, const void __user *buf, size_t size) 1005 { 1006 struct iov_iter iter; 1007 struct iovec iov; 1008 1009 if (is_blacklisted(cpu)) 1010 return UCODE_NFOUND; 1011 1012 iov.iov_base = (void __user *)buf; 1013 iov.iov_len = size; 1014 iov_iter_init(&iter, WRITE, &iov, 1, size); 1015 1016 return generic_load_microcode(cpu, &iter); 1017 } 1018 1019 static struct microcode_ops microcode_intel_ops = { 1020 .request_microcode_user = request_microcode_user, 1021 .request_microcode_fw = request_microcode_fw, 1022 .collect_cpu_info = collect_cpu_info, 1023 .apply_microcode = apply_microcode_intel, 1024 }; 1025 1026 static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) 1027 { 1028 u64 llc_size = c->x86_cache_size * 1024ULL; 1029 1030 do_div(llc_size, c->x86_max_cores); 1031 1032 return (int)llc_size; 1033 } 1034 1035 struct microcode_ops * __init init_intel_microcode(void) 1036 { 1037 struct cpuinfo_x86 *c = &boot_cpu_data; 1038 1039 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 1040 cpu_has(c, X86_FEATURE_IA64)) { 1041 pr_err("Intel CPU family 0x%x not supported\n", c->x86); 1042 return NULL; 1043 } 1044 1045 llc_size_per_core = calc_llc_size_per_core(c); 1046 1047 return µcode_intel_ops; 1048 } 1049