1 /* 2 * Intel CPU Microcode Update Driver for Linux 3 * 4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 5 * 2006 Shaohua Li <shaohua.li@intel.com> 6 * 7 * Intel CPU microcode early update for Linux 8 * 9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> 10 * H Peter Anvin" <hpa@zytor.com> 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 */ 17 18 /* 19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn 20 * printk calls into no_printk(). 21 * 22 *#define DEBUG 23 */ 24 #define pr_fmt(fmt) "microcode: " fmt 25 26 #include <linux/earlycpio.h> 27 #include <linux/firmware.h> 28 #include <linux/uaccess.h> 29 #include <linux/vmalloc.h> 30 #include <linux/initrd.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/cpu.h> 34 #include <linux/mm.h> 35 36 #include <asm/microcode_intel.h> 37 #include <asm/processor.h> 38 #include <asm/tlbflush.h> 39 #include <asm/setup.h> 40 #include <asm/msr.h> 41 42 static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT]; 43 static struct mc_saved_data { 44 unsigned int mc_saved_count; 45 struct microcode_intel **mc_saved; 46 } mc_saved_data; 47 48 static enum ucode_state 49 load_microcode_early(struct microcode_intel **saved, 50 unsigned int num_saved, struct ucode_cpu_info *uci) 51 { 52 struct microcode_intel *ucode_ptr, *new_mc = NULL; 53 struct microcode_header_intel *mc_hdr; 54 int new_rev, ret, i; 55 56 new_rev = uci->cpu_sig.rev; 57 58 for (i = 0; i < num_saved; i++) { 59 ucode_ptr = saved[i]; 60 mc_hdr = (struct microcode_header_intel *)ucode_ptr; 61 62 ret = has_newer_microcode(ucode_ptr, 63 uci->cpu_sig.sig, 64 uci->cpu_sig.pf, 65 new_rev); 66 if (!ret) 67 continue; 68 69 new_rev = mc_hdr->rev; 70 new_mc = ucode_ptr; 71 } 72 73 if (!new_mc) 74 return UCODE_NFOUND; 75 76 uci->mc = (struct microcode_intel *)new_mc; 77 return UCODE_OK; 78 } 79 80 static inline void 81 copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd, 82 unsigned long off, int num_saved) 83 { 84 int i; 85 86 for (i = 0; i < num_saved; i++) 87 mc_saved[i] = (struct microcode_intel *)(initrd[i] + off); 88 } 89 90 #ifdef CONFIG_X86_32 91 static void 92 microcode_phys(struct microcode_intel **mc_saved_tmp, 93 struct mc_saved_data *mc_saved_data) 94 { 95 int i; 96 struct microcode_intel ***mc_saved; 97 98 mc_saved = (struct microcode_intel ***) 99 __pa_nodebug(&mc_saved_data->mc_saved); 100 for (i = 0; i < mc_saved_data->mc_saved_count; i++) { 101 struct microcode_intel *p; 102 103 p = *(struct microcode_intel **) 104 __pa_nodebug(mc_saved_data->mc_saved + i); 105 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p); 106 } 107 } 108 #endif 109 110 static enum ucode_state 111 load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd, 112 unsigned long initrd_start, struct ucode_cpu_info *uci) 113 { 114 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; 115 unsigned int count = mc_saved_data->mc_saved_count; 116 117 if (!mc_saved_data->mc_saved) { 118 copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count); 119 120 return load_microcode_early(mc_saved_tmp, count, uci); 121 } else { 122 #ifdef CONFIG_X86_32 123 microcode_phys(mc_saved_tmp, mc_saved_data); 124 return load_microcode_early(mc_saved_tmp, count, uci); 125 #else 126 return load_microcode_early(mc_saved_data->mc_saved, 127 count, uci); 128 #endif 129 } 130 } 131 132 /* 133 * Given CPU signature and a microcode patch, this function finds if the 134 * microcode patch has matching family and model with the CPU. 135 */ 136 static enum ucode_state 137 matching_model_microcode(struct microcode_header_intel *mc_header, 138 unsigned long sig) 139 { 140 unsigned int fam, model; 141 unsigned int fam_ucode, model_ucode; 142 struct extended_sigtable *ext_header; 143 unsigned long total_size = get_totalsize(mc_header); 144 unsigned long data_size = get_datasize(mc_header); 145 int ext_sigcount, i; 146 struct extended_signature *ext_sig; 147 148 fam = __x86_family(sig); 149 model = x86_model(sig); 150 151 fam_ucode = __x86_family(mc_header->sig); 152 model_ucode = x86_model(mc_header->sig); 153 154 if (fam == fam_ucode && model == model_ucode) 155 return UCODE_OK; 156 157 /* Look for ext. headers: */ 158 if (total_size <= data_size + MC_HEADER_SIZE) 159 return UCODE_NFOUND; 160 161 ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; 162 ext_sig = (void *)ext_header + EXT_HEADER_SIZE; 163 ext_sigcount = ext_header->count; 164 165 for (i = 0; i < ext_sigcount; i++) { 166 fam_ucode = __x86_family(ext_sig->sig); 167 model_ucode = x86_model(ext_sig->sig); 168 169 if (fam == fam_ucode && model == model_ucode) 170 return UCODE_OK; 171 172 ext_sig++; 173 } 174 return UCODE_NFOUND; 175 } 176 177 static int 178 save_microcode(struct mc_saved_data *mc_saved_data, 179 struct microcode_intel **mc_saved_src, 180 unsigned int mc_saved_count) 181 { 182 int i, j; 183 struct microcode_intel **saved_ptr; 184 int ret; 185 186 if (!mc_saved_count) 187 return -EINVAL; 188 189 /* 190 * Copy new microcode data. 191 */ 192 saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL); 193 if (!saved_ptr) 194 return -ENOMEM; 195 196 for (i = 0; i < mc_saved_count; i++) { 197 struct microcode_header_intel *mc_hdr; 198 struct microcode_intel *mc; 199 unsigned long size; 200 201 if (!mc_saved_src[i]) { 202 ret = -EINVAL; 203 goto err; 204 } 205 206 mc = mc_saved_src[i]; 207 mc_hdr = &mc->hdr; 208 size = get_totalsize(mc_hdr); 209 210 saved_ptr[i] = kmalloc(size, GFP_KERNEL); 211 if (!saved_ptr[i]) { 212 ret = -ENOMEM; 213 goto err; 214 } 215 216 memcpy(saved_ptr[i], mc, size); 217 } 218 219 /* 220 * Point to newly saved microcode. 221 */ 222 mc_saved_data->mc_saved = saved_ptr; 223 mc_saved_data->mc_saved_count = mc_saved_count; 224 225 return 0; 226 227 err: 228 for (j = 0; j <= i; j++) 229 kfree(saved_ptr[j]); 230 kfree(saved_ptr); 231 232 return ret; 233 } 234 235 /* 236 * A microcode patch in ucode_ptr is saved into mc_saved 237 * - if it has matching signature and newer revision compared to an existing 238 * patch mc_saved. 239 * - or if it is a newly discovered microcode patch. 240 * 241 * The microcode patch should have matching model with CPU. 242 * 243 * Returns: The updated number @num_saved of saved microcode patches. 244 */ 245 static unsigned int _save_mc(struct microcode_intel **mc_saved, 246 u8 *ucode_ptr, unsigned int num_saved) 247 { 248 struct microcode_header_intel *mc_hdr, *mc_saved_hdr; 249 unsigned int sig, pf; 250 int found = 0, i; 251 252 mc_hdr = (struct microcode_header_intel *)ucode_ptr; 253 254 for (i = 0; i < num_saved; i++) { 255 mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i]; 256 sig = mc_saved_hdr->sig; 257 pf = mc_saved_hdr->pf; 258 259 if (!find_matching_signature(ucode_ptr, sig, pf)) 260 continue; 261 262 found = 1; 263 264 if (mc_hdr->rev <= mc_saved_hdr->rev) 265 continue; 266 267 /* 268 * Found an older ucode saved earlier. Replace it with 269 * this newer one. 270 */ 271 mc_saved[i] = (struct microcode_intel *)ucode_ptr; 272 break; 273 } 274 275 /* Newly detected microcode, save it to memory. */ 276 if (i >= num_saved && !found) 277 mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr; 278 279 return num_saved; 280 } 281 282 /* 283 * Get microcode matching with BSP's model. Only CPUs with the same model as 284 * BSP can stay in the platform. 285 */ 286 static enum ucode_state __init 287 get_matching_model_microcode(int cpu, unsigned long start, 288 void *data, size_t size, 289 struct mc_saved_data *mc_saved_data, 290 unsigned long *mc_saved_in_initrd, 291 struct ucode_cpu_info *uci) 292 { 293 u8 *ucode_ptr = data; 294 unsigned int leftover = size; 295 enum ucode_state state = UCODE_OK; 296 unsigned int mc_size; 297 struct microcode_header_intel *mc_header; 298 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; 299 unsigned int mc_saved_count = mc_saved_data->mc_saved_count; 300 int i; 301 302 while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { 303 304 if (leftover < sizeof(mc_header)) 305 break; 306 307 mc_header = (struct microcode_header_intel *)ucode_ptr; 308 309 mc_size = get_totalsize(mc_header); 310 if (!mc_size || mc_size > leftover || 311 microcode_sanity_check(ucode_ptr, 0) < 0) 312 break; 313 314 leftover -= mc_size; 315 316 /* 317 * Since APs with same family and model as the BSP may boot in 318 * the platform, we need to find and save microcode patches 319 * with the same family and model as the BSP. 320 */ 321 if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != 322 UCODE_OK) { 323 ucode_ptr += mc_size; 324 continue; 325 } 326 327 mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count); 328 329 ucode_ptr += mc_size; 330 } 331 332 if (leftover) { 333 state = UCODE_ERROR; 334 goto out; 335 } 336 337 if (mc_saved_count == 0) { 338 state = UCODE_NFOUND; 339 goto out; 340 } 341 342 for (i = 0; i < mc_saved_count; i++) 343 mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start; 344 345 mc_saved_data->mc_saved_count = mc_saved_count; 346 out: 347 return state; 348 } 349 350 static int collect_cpu_info_early(struct ucode_cpu_info *uci) 351 { 352 unsigned int val[2]; 353 unsigned int family, model; 354 struct cpu_signature csig; 355 unsigned int eax, ebx, ecx, edx; 356 357 csig.sig = 0; 358 csig.pf = 0; 359 csig.rev = 0; 360 361 memset(uci, 0, sizeof(*uci)); 362 363 eax = 0x00000001; 364 ecx = 0; 365 native_cpuid(&eax, &ebx, &ecx, &edx); 366 csig.sig = eax; 367 368 family = __x86_family(csig.sig); 369 model = x86_model(csig.sig); 370 371 if ((model >= 5) || (family > 6)) { 372 /* get processor flags from MSR 0x17 */ 373 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 374 csig.pf = 1 << ((val[1] >> 18) & 7); 375 } 376 native_wrmsr(MSR_IA32_UCODE_REV, 0, 0); 377 378 /* As documented in the SDM: Do a CPUID 1 here */ 379 sync_core(); 380 381 /* get the current revision from MSR 0x8B */ 382 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 383 384 csig.rev = val[1]; 385 386 uci->cpu_sig = csig; 387 uci->valid = 1; 388 389 return 0; 390 } 391 392 static void show_saved_mc(void) 393 { 394 #ifdef DEBUG 395 int i, j; 396 unsigned int sig, pf, rev, total_size, data_size, date; 397 struct ucode_cpu_info uci; 398 399 if (mc_saved_data.mc_saved_count == 0) { 400 pr_debug("no microcode data saved.\n"); 401 return; 402 } 403 pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count); 404 405 collect_cpu_info_early(&uci); 406 407 sig = uci.cpu_sig.sig; 408 pf = uci.cpu_sig.pf; 409 rev = uci.cpu_sig.rev; 410 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); 411 412 for (i = 0; i < mc_saved_data.mc_saved_count; i++) { 413 struct microcode_header_intel *mc_saved_header; 414 struct extended_sigtable *ext_header; 415 int ext_sigcount; 416 struct extended_signature *ext_sig; 417 418 mc_saved_header = (struct microcode_header_intel *) 419 mc_saved_data.mc_saved[i]; 420 sig = mc_saved_header->sig; 421 pf = mc_saved_header->pf; 422 rev = mc_saved_header->rev; 423 total_size = get_totalsize(mc_saved_header); 424 data_size = get_datasize(mc_saved_header); 425 date = mc_saved_header->date; 426 427 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n", 428 i, sig, pf, rev, total_size, 429 date & 0xffff, 430 date >> 24, 431 (date >> 16) & 0xff); 432 433 /* Look for ext. headers: */ 434 if (total_size <= data_size + MC_HEADER_SIZE) 435 continue; 436 437 ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE; 438 ext_sigcount = ext_header->count; 439 ext_sig = (void *)ext_header + EXT_HEADER_SIZE; 440 441 for (j = 0; j < ext_sigcount; j++) { 442 sig = ext_sig->sig; 443 pf = ext_sig->pf; 444 445 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", 446 j, sig, pf); 447 448 ext_sig++; 449 } 450 451 } 452 #endif 453 } 454 455 #ifdef CONFIG_HOTPLUG_CPU 456 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 457 /* 458 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is 459 * hot added or resumes. 460 * 461 * Please make sure this mc should be a valid microcode patch before calling 462 * this function. 463 */ 464 int save_mc_for_early(u8 *mc) 465 { 466 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; 467 unsigned int mc_saved_count_init; 468 unsigned int mc_saved_count; 469 struct microcode_intel **mc_saved; 470 int ret = 0; 471 int i; 472 473 /* 474 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in 475 * hotplug. 476 */ 477 mutex_lock(&x86_cpu_microcode_mutex); 478 479 mc_saved_count_init = mc_saved_data.mc_saved_count; 480 mc_saved_count = mc_saved_data.mc_saved_count; 481 mc_saved = mc_saved_data.mc_saved; 482 483 if (mc_saved && mc_saved_count) 484 memcpy(mc_saved_tmp, mc_saved, 485 mc_saved_count * sizeof(struct microcode_intel *)); 486 /* 487 * Save the microcode patch mc in mc_save_tmp structure if it's a newer 488 * version. 489 */ 490 mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count); 491 492 /* 493 * Save the mc_save_tmp in global mc_saved_data. 494 */ 495 ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count); 496 if (ret) { 497 pr_err("Cannot save microcode patch.\n"); 498 goto out; 499 } 500 501 show_saved_mc(); 502 503 /* 504 * Free old saved microcode data. 505 */ 506 if (mc_saved) { 507 for (i = 0; i < mc_saved_count_init; i++) 508 kfree(mc_saved[i]); 509 kfree(mc_saved); 510 } 511 512 out: 513 mutex_unlock(&x86_cpu_microcode_mutex); 514 515 return ret; 516 } 517 EXPORT_SYMBOL_GPL(save_mc_for_early); 518 #endif 519 520 static bool __init load_builtin_intel_microcode(struct cpio_data *cp) 521 { 522 #ifdef CONFIG_X86_64 523 unsigned int eax = 0x00000001, ebx, ecx = 0, edx; 524 unsigned int family, model, stepping; 525 char name[30]; 526 527 native_cpuid(&eax, &ebx, &ecx, &edx); 528 529 family = __x86_family(eax); 530 model = x86_model(eax); 531 stepping = eax & 0xf; 532 533 sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping); 534 535 return get_builtin_firmware(cp, name); 536 #else 537 return false; 538 #endif 539 } 540 541 static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin"; 542 static __init enum ucode_state 543 scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd, 544 unsigned long start, unsigned long size, 545 struct ucode_cpu_info *uci) 546 { 547 struct cpio_data cd; 548 long offset = 0; 549 #ifdef CONFIG_X86_32 550 char *p = (char *)__pa_nodebug(ucode_name); 551 #else 552 char *p = ucode_name; 553 #endif 554 555 cd.data = NULL; 556 cd.size = 0; 557 558 cd = find_cpio_data(p, (void *)start, size, &offset); 559 if (!cd.data) { 560 if (!load_builtin_intel_microcode(&cd)) 561 return UCODE_ERROR; 562 } 563 564 return get_matching_model_microcode(0, start, cd.data, cd.size, 565 mc_saved_data, initrd, uci); 566 } 567 568 /* 569 * Print ucode update info. 570 */ 571 static void 572 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 573 { 574 int cpu = smp_processor_id(); 575 576 pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", 577 cpu, 578 uci->cpu_sig.rev, 579 date & 0xffff, 580 date >> 24, 581 (date >> 16) & 0xff); 582 } 583 584 #ifdef CONFIG_X86_32 585 586 static int delay_ucode_info; 587 static int current_mc_date; 588 589 /* 590 * Print early updated ucode info after printk works. This is delayed info dump. 591 */ 592 void show_ucode_info_early(void) 593 { 594 struct ucode_cpu_info uci; 595 596 if (delay_ucode_info) { 597 collect_cpu_info_early(&uci); 598 print_ucode_info(&uci, current_mc_date); 599 delay_ucode_info = 0; 600 } 601 } 602 603 /* 604 * At this point, we can not call printk() yet. Keep microcode patch number in 605 * mc_saved_data.mc_saved and delay printing microcode info in 606 * show_ucode_info_early() until printk() works. 607 */ 608 static void print_ucode(struct ucode_cpu_info *uci) 609 { 610 struct microcode_intel *mc_intel; 611 int *delay_ucode_info_p; 612 int *current_mc_date_p; 613 614 mc_intel = uci->mc; 615 if (mc_intel == NULL) 616 return; 617 618 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); 619 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); 620 621 *delay_ucode_info_p = 1; 622 *current_mc_date_p = mc_intel->hdr.date; 623 } 624 #else 625 626 /* 627 * Flush global tlb. We only do this in x86_64 where paging has been enabled 628 * already and PGE should be enabled as well. 629 */ 630 static inline void flush_tlb_early(void) 631 { 632 __native_flush_tlb_global_irq_disabled(); 633 } 634 635 static inline void print_ucode(struct ucode_cpu_info *uci) 636 { 637 struct microcode_intel *mc_intel; 638 639 mc_intel = uci->mc; 640 if (mc_intel == NULL) 641 return; 642 643 print_ucode_info(uci, mc_intel->hdr.date); 644 } 645 #endif 646 647 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) 648 { 649 struct microcode_intel *mc_intel; 650 unsigned int val[2]; 651 652 mc_intel = uci->mc; 653 if (mc_intel == NULL) 654 return 0; 655 656 /* write microcode via MSR 0x79 */ 657 native_wrmsr(MSR_IA32_UCODE_WRITE, 658 (unsigned long) mc_intel->bits, 659 (unsigned long) mc_intel->bits >> 16 >> 16); 660 native_wrmsr(MSR_IA32_UCODE_REV, 0, 0); 661 662 /* As documented in the SDM: Do a CPUID 1 here */ 663 sync_core(); 664 665 /* get the current revision from MSR 0x8B */ 666 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 667 if (val[1] != mc_intel->hdr.rev) 668 return -1; 669 670 #ifdef CONFIG_X86_64 671 /* Flush global tlb. This is precaution. */ 672 flush_tlb_early(); 673 #endif 674 uci->cpu_sig.rev = val[1]; 675 676 if (early) 677 print_ucode(uci); 678 else 679 print_ucode_info(uci, mc_intel->hdr.date); 680 681 return 0; 682 } 683 684 /* 685 * This function converts microcode patch offsets previously stored in 686 * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data. 687 */ 688 int __init save_microcode_in_initrd_intel(void) 689 { 690 unsigned int count = mc_saved_data.mc_saved_count; 691 struct microcode_intel *mc_saved[MAX_UCODE_COUNT]; 692 int ret = 0; 693 694 if (count == 0) 695 return ret; 696 697 copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count); 698 ret = save_microcode(&mc_saved_data, mc_saved, count); 699 if (ret) 700 pr_err("Cannot save microcode patches from initrd.\n"); 701 702 show_saved_mc(); 703 704 return ret; 705 } 706 707 static void __init 708 _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data, 709 unsigned long *initrd, 710 unsigned long start, unsigned long size) 711 { 712 struct ucode_cpu_info uci; 713 enum ucode_state ret; 714 715 collect_cpu_info_early(&uci); 716 717 ret = scan_microcode(mc_saved_data, initrd, start, size, &uci); 718 if (ret != UCODE_OK) 719 return; 720 721 ret = load_microcode(mc_saved_data, initrd, start, &uci); 722 if (ret != UCODE_OK) 723 return; 724 725 apply_microcode_early(&uci, true); 726 } 727 728 void __init load_ucode_intel_bsp(void) 729 { 730 u64 start, size; 731 #ifdef CONFIG_X86_32 732 struct boot_params *p; 733 734 p = (struct boot_params *)__pa_nodebug(&boot_params); 735 start = p->hdr.ramdisk_image; 736 size = p->hdr.ramdisk_size; 737 738 _load_ucode_intel_bsp( 739 (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), 740 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), 741 start, size); 742 #else 743 start = boot_params.hdr.ramdisk_image + PAGE_OFFSET; 744 size = boot_params.hdr.ramdisk_size; 745 746 _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size); 747 #endif 748 } 749 750 void load_ucode_intel_ap(void) 751 { 752 struct mc_saved_data *mc_saved_data_p; 753 struct ucode_cpu_info uci; 754 unsigned long *mc_saved_in_initrd_p; 755 unsigned long initrd_start_addr; 756 enum ucode_state ret; 757 #ifdef CONFIG_X86_32 758 unsigned long *initrd_start_p; 759 760 mc_saved_in_initrd_p = 761 (unsigned long *)__pa_nodebug(mc_saved_in_initrd); 762 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data); 763 initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start); 764 initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p); 765 #else 766 mc_saved_data_p = &mc_saved_data; 767 mc_saved_in_initrd_p = mc_saved_in_initrd; 768 initrd_start_addr = initrd_start; 769 #endif 770 771 /* 772 * If there is no valid ucode previously saved in memory, no need to 773 * update ucode on this AP. 774 */ 775 if (mc_saved_data_p->mc_saved_count == 0) 776 return; 777 778 collect_cpu_info_early(&uci); 779 ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p, 780 initrd_start_addr, &uci); 781 782 if (ret != UCODE_OK) 783 return; 784 785 apply_microcode_early(&uci, true); 786 } 787 788 void reload_ucode_intel(void) 789 { 790 struct ucode_cpu_info uci; 791 enum ucode_state ret; 792 793 if (!mc_saved_data.mc_saved_count) 794 return; 795 796 collect_cpu_info_early(&uci); 797 798 ret = load_microcode_early(mc_saved_data.mc_saved, 799 mc_saved_data.mc_saved_count, &uci); 800 if (ret != UCODE_OK) 801 return; 802 803 apply_microcode_early(&uci, false); 804 } 805 806 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) 807 { 808 struct cpuinfo_x86 *c = &cpu_data(cpu_num); 809 unsigned int val[2]; 810 811 memset(csig, 0, sizeof(*csig)); 812 813 csig->sig = cpuid_eax(0x00000001); 814 815 if ((c->x86_model >= 5) || (c->x86 > 6)) { 816 /* get processor flags from MSR 0x17 */ 817 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 818 csig->pf = 1 << ((val[1] >> 18) & 7); 819 } 820 821 csig->rev = c->microcode; 822 pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n", 823 cpu_num, csig->sig, csig->pf, csig->rev); 824 825 return 0; 826 } 827 828 /* 829 * return 0 - no update found 830 * return 1 - found update 831 */ 832 static int get_matching_mc(struct microcode_intel *mc_intel, int cpu) 833 { 834 struct cpu_signature cpu_sig; 835 unsigned int csig, cpf, crev; 836 837 collect_cpu_info(cpu, &cpu_sig); 838 839 csig = cpu_sig.sig; 840 cpf = cpu_sig.pf; 841 crev = cpu_sig.rev; 842 843 return has_newer_microcode(mc_intel, csig, cpf, crev); 844 } 845 846 static int apply_microcode_intel(int cpu) 847 { 848 struct microcode_intel *mc_intel; 849 struct ucode_cpu_info *uci; 850 unsigned int val[2]; 851 int cpu_num = raw_smp_processor_id(); 852 struct cpuinfo_x86 *c = &cpu_data(cpu_num); 853 854 uci = ucode_cpu_info + cpu; 855 mc_intel = uci->mc; 856 857 /* We should bind the task to the CPU */ 858 BUG_ON(cpu_num != cpu); 859 860 if (mc_intel == NULL) 861 return 0; 862 863 /* 864 * Microcode on this CPU could be updated earlier. Only apply the 865 * microcode patch in mc_intel when it is newer than the one on this 866 * CPU. 867 */ 868 if (get_matching_mc(mc_intel, cpu) == 0) 869 return 0; 870 871 /* write microcode via MSR 0x79 */ 872 wrmsr(MSR_IA32_UCODE_WRITE, 873 (unsigned long) mc_intel->bits, 874 (unsigned long) mc_intel->bits >> 16 >> 16); 875 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 876 877 /* As documented in the SDM: Do a CPUID 1 here */ 878 sync_core(); 879 880 /* get the current revision from MSR 0x8B */ 881 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 882 883 if (val[1] != mc_intel->hdr.rev) { 884 pr_err("CPU%d update to revision 0x%x failed\n", 885 cpu_num, mc_intel->hdr.rev); 886 return -1; 887 } 888 pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n", 889 cpu_num, val[1], 890 mc_intel->hdr.date & 0xffff, 891 mc_intel->hdr.date >> 24, 892 (mc_intel->hdr.date >> 16) & 0xff); 893 894 uci->cpu_sig.rev = val[1]; 895 c->microcode = val[1]; 896 897 return 0; 898 } 899 900 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, 901 int (*get_ucode_data)(void *, const void *, size_t)) 902 { 903 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 904 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL; 905 int new_rev = uci->cpu_sig.rev; 906 unsigned int leftover = size; 907 enum ucode_state state = UCODE_OK; 908 unsigned int curr_mc_size = 0; 909 unsigned int csig, cpf; 910 911 while (leftover) { 912 struct microcode_header_intel mc_header; 913 unsigned int mc_size; 914 915 if (leftover < sizeof(mc_header)) { 916 pr_err("error! Truncated header in microcode data file\n"); 917 break; 918 } 919 920 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header))) 921 break; 922 923 mc_size = get_totalsize(&mc_header); 924 if (!mc_size || mc_size > leftover) { 925 pr_err("error! Bad data in microcode data file\n"); 926 break; 927 } 928 929 /* For performance reasons, reuse mc area when possible */ 930 if (!mc || mc_size > curr_mc_size) { 931 vfree(mc); 932 mc = vmalloc(mc_size); 933 if (!mc) 934 break; 935 curr_mc_size = mc_size; 936 } 937 938 if (get_ucode_data(mc, ucode_ptr, mc_size) || 939 microcode_sanity_check(mc, 1) < 0) { 940 break; 941 } 942 943 csig = uci->cpu_sig.sig; 944 cpf = uci->cpu_sig.pf; 945 if (has_newer_microcode(mc, csig, cpf, new_rev)) { 946 vfree(new_mc); 947 new_rev = mc_header.rev; 948 new_mc = mc; 949 mc = NULL; /* trigger new vmalloc */ 950 } 951 952 ucode_ptr += mc_size; 953 leftover -= mc_size; 954 } 955 956 vfree(mc); 957 958 if (leftover) { 959 vfree(new_mc); 960 state = UCODE_ERROR; 961 goto out; 962 } 963 964 if (!new_mc) { 965 state = UCODE_NFOUND; 966 goto out; 967 } 968 969 vfree(uci->mc); 970 uci->mc = (struct microcode_intel *)new_mc; 971 972 /* 973 * If early loading microcode is supported, save this mc into 974 * permanent memory. So it will be loaded early when a CPU is hot added 975 * or resumes. 976 */ 977 save_mc_for_early(new_mc); 978 979 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 980 cpu, new_rev, uci->cpu_sig.rev); 981 out: 982 return state; 983 } 984 985 static int get_ucode_fw(void *to, const void *from, size_t n) 986 { 987 memcpy(to, from, n); 988 return 0; 989 } 990 991 static enum ucode_state request_microcode_fw(int cpu, struct device *device, 992 bool refresh_fw) 993 { 994 char name[30]; 995 struct cpuinfo_x86 *c = &cpu_data(cpu); 996 const struct firmware *firmware; 997 enum ucode_state ret; 998 999 sprintf(name, "intel-ucode/%02x-%02x-%02x", 1000 c->x86, c->x86_model, c->x86_mask); 1001 1002 if (request_firmware_direct(&firmware, name, device)) { 1003 pr_debug("data file %s load failed\n", name); 1004 return UCODE_NFOUND; 1005 } 1006 1007 ret = generic_load_microcode(cpu, (void *)firmware->data, 1008 firmware->size, &get_ucode_fw); 1009 1010 release_firmware(firmware); 1011 1012 return ret; 1013 } 1014 1015 static int get_ucode_user(void *to, const void *from, size_t n) 1016 { 1017 return copy_from_user(to, from, n); 1018 } 1019 1020 static enum ucode_state 1021 request_microcode_user(int cpu, const void __user *buf, size_t size) 1022 { 1023 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); 1024 } 1025 1026 static void microcode_fini_cpu(int cpu) 1027 { 1028 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 1029 1030 vfree(uci->mc); 1031 uci->mc = NULL; 1032 } 1033 1034 static struct microcode_ops microcode_intel_ops = { 1035 .request_microcode_user = request_microcode_user, 1036 .request_microcode_fw = request_microcode_fw, 1037 .collect_cpu_info = collect_cpu_info, 1038 .apply_microcode = apply_microcode_intel, 1039 .microcode_fini_cpu = microcode_fini_cpu, 1040 }; 1041 1042 struct microcode_ops * __init init_intel_microcode(void) 1043 { 1044 struct cpuinfo_x86 *c = &boot_cpu_data; 1045 1046 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 1047 cpu_has(c, X86_FEATURE_IA64)) { 1048 pr_err("Intel CPU family 0x%x not supported\n", c->x86); 1049 return NULL; 1050 } 1051 1052 return µcode_intel_ops; 1053 } 1054 1055