1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Intel CPU Microcode Update Driver for Linux 4 * 5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> 6 * 2006 Shaohua Li <shaohua.li@intel.com> 7 * 8 * Intel CPU microcode early update for Linux 9 * 10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> 11 * H Peter Anvin" <hpa@zytor.com> 12 */ 13 14 /* 15 * This needs to be before all headers so that pr_debug in printk.h doesn't turn 16 * printk calls into no_printk(). 17 * 18 *#define DEBUG 19 */ 20 #define pr_fmt(fmt) "microcode: " fmt 21 22 #include <linux/earlycpio.h> 23 #include <linux/firmware.h> 24 #include <linux/uaccess.h> 25 #include <linux/vmalloc.h> 26 #include <linux/initrd.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/cpu.h> 30 #include <linux/uio.h> 31 #include <linux/mm.h> 32 33 #include <asm/microcode_intel.h> 34 #include <asm/intel-family.h> 35 #include <asm/processor.h> 36 #include <asm/tlbflush.h> 37 #include <asm/setup.h> 38 #include <asm/msr.h> 39 40 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; 41 42 /* Current microcode patch used in early patching on the APs. */ 43 static struct microcode_intel *intel_ucode_patch; 44 45 /* last level cache size per core */ 46 static int llc_size_per_core; 47 48 /* 49 * Returns 1 if update has been found, 0 otherwise. 50 */ 51 static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) 52 { 53 struct microcode_header_intel *mc_hdr = mc; 54 55 if (mc_hdr->rev <= new_rev) 56 return 0; 57 58 return intel_find_matching_signature(mc, csig, cpf); 59 } 60 61 static struct ucode_patch *memdup_patch(void *data, unsigned int size) 62 { 63 struct ucode_patch *p; 64 65 p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); 66 if (!p) 67 return NULL; 68 69 p->data = kmemdup(data, size, GFP_KERNEL); 70 if (!p->data) { 71 kfree(p); 72 return NULL; 73 } 74 75 return p; 76 } 77 78 static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) 79 { 80 struct microcode_header_intel *mc_hdr, *mc_saved_hdr; 81 struct ucode_patch *iter, *tmp, *p = NULL; 82 bool prev_found = false; 83 unsigned int sig, pf; 84 85 mc_hdr = (struct microcode_header_intel *)data; 86 87 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { 88 mc_saved_hdr = (struct microcode_header_intel *)iter->data; 89 sig = mc_saved_hdr->sig; 90 pf = mc_saved_hdr->pf; 91 92 if (intel_find_matching_signature(data, sig, pf)) { 93 prev_found = true; 94 95 if (mc_hdr->rev <= mc_saved_hdr->rev) 96 continue; 97 98 p = memdup_patch(data, size); 99 if (!p) 100 pr_err("Error allocating buffer %p\n", data); 101 else { 102 list_replace(&iter->plist, &p->plist); 103 kfree(iter->data); 104 kfree(iter); 105 } 106 } 107 } 108 109 /* 110 * There weren't any previous patches found in the list cache; save the 111 * newly found. 112 */ 113 if (!prev_found) { 114 p = memdup_patch(data, size); 115 if (!p) 116 pr_err("Error allocating buffer for %p\n", data); 117 else 118 list_add_tail(&p->plist, µcode_cache); 119 } 120 121 if (!p) 122 return; 123 124 if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) 125 return; 126 127 /* 128 * Save for early loading. On 32-bit, that needs to be a physical 129 * address as the APs are running from physical addresses, before 130 * paging has been enabled. 131 */ 132 if (IS_ENABLED(CONFIG_X86_32)) 133 intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); 134 else 135 intel_ucode_patch = p->data; 136 } 137 138 /* 139 * Get microcode matching with BSP's model. Only CPUs with the same model as 140 * BSP can stay in the platform. 141 */ 142 static struct microcode_intel * 143 scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) 144 { 145 struct microcode_header_intel *mc_header; 146 struct microcode_intel *patch = NULL; 147 unsigned int mc_size; 148 149 while (size) { 150 if (size < sizeof(struct microcode_header_intel)) 151 break; 152 153 mc_header = (struct microcode_header_intel *)data; 154 155 mc_size = get_totalsize(mc_header); 156 if (!mc_size || 157 mc_size > size || 158 intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) 159 break; 160 161 size -= mc_size; 162 163 if (!intel_find_matching_signature(data, uci->cpu_sig.sig, 164 uci->cpu_sig.pf)) { 165 data += mc_size; 166 continue; 167 } 168 169 if (save) { 170 save_microcode_patch(uci, data, mc_size); 171 goto next; 172 } 173 174 175 if (!patch) { 176 if (!has_newer_microcode(data, 177 uci->cpu_sig.sig, 178 uci->cpu_sig.pf, 179 uci->cpu_sig.rev)) 180 goto next; 181 182 } else { 183 struct microcode_header_intel *phdr = &patch->hdr; 184 185 if (!has_newer_microcode(data, 186 phdr->sig, 187 phdr->pf, 188 phdr->rev)) 189 goto next; 190 } 191 192 /* We have a newer patch, save it. */ 193 patch = data; 194 195 next: 196 data += mc_size; 197 } 198 199 if (size) 200 return NULL; 201 202 return patch; 203 } 204 205 static void show_saved_mc(void) 206 { 207 #ifdef DEBUG 208 int i = 0, j; 209 unsigned int sig, pf, rev, total_size, data_size, date; 210 struct ucode_cpu_info uci; 211 struct ucode_patch *p; 212 213 if (list_empty(µcode_cache)) { 214 pr_debug("no microcode data saved.\n"); 215 return; 216 } 217 218 intel_cpu_collect_info(&uci); 219 220 sig = uci.cpu_sig.sig; 221 pf = uci.cpu_sig.pf; 222 rev = uci.cpu_sig.rev; 223 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); 224 225 list_for_each_entry(p, µcode_cache, plist) { 226 struct microcode_header_intel *mc_saved_header; 227 struct extended_sigtable *ext_header; 228 struct extended_signature *ext_sig; 229 int ext_sigcount; 230 231 mc_saved_header = (struct microcode_header_intel *)p->data; 232 233 sig = mc_saved_header->sig; 234 pf = mc_saved_header->pf; 235 rev = mc_saved_header->rev; 236 date = mc_saved_header->date; 237 238 total_size = get_totalsize(mc_saved_header); 239 data_size = get_datasize(mc_saved_header); 240 241 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n", 242 i++, sig, pf, rev, total_size, 243 date & 0xffff, 244 date >> 24, 245 (date >> 16) & 0xff); 246 247 /* Look for ext. headers: */ 248 if (total_size <= data_size + MC_HEADER_SIZE) 249 continue; 250 251 ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE; 252 ext_sigcount = ext_header->count; 253 ext_sig = (void *)ext_header + EXT_HEADER_SIZE; 254 255 for (j = 0; j < ext_sigcount; j++) { 256 sig = ext_sig->sig; 257 pf = ext_sig->pf; 258 259 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", 260 j, sig, pf); 261 262 ext_sig++; 263 } 264 } 265 #endif 266 } 267 268 /* 269 * Save this microcode patch. It will be loaded early when a CPU is 270 * hot-added or resumes. 271 */ 272 static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size) 273 { 274 /* Synchronization during CPU hotplug. */ 275 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 276 277 mutex_lock(&x86_cpu_microcode_mutex); 278 279 save_microcode_patch(uci, mc, size); 280 show_saved_mc(); 281 282 mutex_unlock(&x86_cpu_microcode_mutex); 283 } 284 285 static bool load_builtin_intel_microcode(struct cpio_data *cp) 286 { 287 unsigned int eax = 1, ebx, ecx = 0, edx; 288 struct firmware fw; 289 char name[30]; 290 291 if (IS_ENABLED(CONFIG_X86_32)) 292 return false; 293 294 native_cpuid(&eax, &ebx, &ecx, &edx); 295 296 sprintf(name, "intel-ucode/%02x-%02x-%02x", 297 x86_family(eax), x86_model(eax), x86_stepping(eax)); 298 299 if (firmware_request_builtin(&fw, name)) { 300 cp->size = fw.size; 301 cp->data = (void *)fw.data; 302 return true; 303 } 304 305 return false; 306 } 307 308 static void print_ucode_info(int old_rev, int new_rev, unsigned int date) 309 { 310 pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", 311 old_rev, 312 new_rev, 313 date & 0xffff, 314 date >> 24, 315 (date >> 16) & 0xff); 316 } 317 318 #ifdef CONFIG_X86_32 319 320 static int delay_ucode_info; 321 static int current_mc_date; 322 static int early_old_rev; 323 324 /* 325 * Print early updated ucode info after printk works. This is delayed info dump. 326 */ 327 void show_ucode_info_early(void) 328 { 329 struct ucode_cpu_info uci; 330 331 if (delay_ucode_info) { 332 intel_cpu_collect_info(&uci); 333 print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date); 334 delay_ucode_info = 0; 335 } 336 } 337 338 /* 339 * At this point, we can not call printk() yet. Delay printing microcode info in 340 * show_ucode_info_early() until printk() works. 341 */ 342 static void print_ucode(int old_rev, int new_rev, int date) 343 { 344 int *delay_ucode_info_p; 345 int *current_mc_date_p; 346 int *early_old_rev_p; 347 348 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); 349 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); 350 early_old_rev_p = (int *)__pa_nodebug(&early_old_rev); 351 352 *delay_ucode_info_p = 1; 353 *current_mc_date_p = date; 354 *early_old_rev_p = old_rev; 355 } 356 #else 357 358 static inline void print_ucode(int old_rev, int new_rev, int date) 359 { 360 print_ucode_info(old_rev, new_rev, date); 361 } 362 #endif 363 364 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) 365 { 366 struct microcode_intel *mc; 367 u32 rev, old_rev; 368 369 mc = uci->mc; 370 if (!mc) 371 return 0; 372 373 /* 374 * Save us the MSR write below - which is a particular expensive 375 * operation - when the other hyperthread has updated the microcode 376 * already. 377 */ 378 rev = intel_get_microcode_revision(); 379 if (rev >= mc->hdr.rev) { 380 uci->cpu_sig.rev = rev; 381 return UCODE_OK; 382 } 383 384 old_rev = rev; 385 386 /* 387 * Writeback and invalidate caches before updating microcode to avoid 388 * internal issues depending on what the microcode is updating. 389 */ 390 native_wbinvd(); 391 392 /* write microcode via MSR 0x79 */ 393 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 394 395 rev = intel_get_microcode_revision(); 396 if (rev != mc->hdr.rev) 397 return -1; 398 399 uci->cpu_sig.rev = rev; 400 401 if (early) 402 print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date); 403 else 404 print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date); 405 406 return 0; 407 } 408 409 int __init save_microcode_in_initrd_intel(void) 410 { 411 struct ucode_cpu_info uci; 412 struct cpio_data cp; 413 414 /* 415 * initrd is going away, clear patch ptr. We will scan the microcode one 416 * last time before jettisoning and save a patch, if found. Then we will 417 * update that pointer too, with a stable patch address to use when 418 * resuming the cores. 419 */ 420 intel_ucode_patch = NULL; 421 422 if (!load_builtin_intel_microcode(&cp)) 423 cp = find_microcode_in_initrd(ucode_path, false); 424 425 if (!(cp.data && cp.size)) 426 return 0; 427 428 intel_cpu_collect_info(&uci); 429 430 scan_microcode(cp.data, cp.size, &uci, true); 431 432 show_saved_mc(); 433 434 return 0; 435 } 436 437 /* 438 * @res_patch, output: a pointer to the patch we found. 439 */ 440 static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) 441 { 442 static const char *path; 443 struct cpio_data cp; 444 bool use_pa; 445 446 if (IS_ENABLED(CONFIG_X86_32)) { 447 path = (const char *)__pa_nodebug(ucode_path); 448 use_pa = true; 449 } else { 450 path = ucode_path; 451 use_pa = false; 452 } 453 454 /* try built-in microcode first */ 455 if (!load_builtin_intel_microcode(&cp)) 456 cp = find_microcode_in_initrd(path, use_pa); 457 458 if (!(cp.data && cp.size)) 459 return NULL; 460 461 intel_cpu_collect_info(uci); 462 463 return scan_microcode(cp.data, cp.size, uci, false); 464 } 465 466 void __init load_ucode_intel_bsp(void) 467 { 468 struct microcode_intel *patch; 469 struct ucode_cpu_info uci; 470 471 patch = __load_ucode_intel(&uci); 472 if (!patch) 473 return; 474 475 uci.mc = patch; 476 477 apply_microcode_early(&uci, true); 478 } 479 480 void load_ucode_intel_ap(void) 481 { 482 struct microcode_intel *patch, **iup; 483 struct ucode_cpu_info uci; 484 485 if (IS_ENABLED(CONFIG_X86_32)) 486 iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); 487 else 488 iup = &intel_ucode_patch; 489 490 if (!*iup) { 491 patch = __load_ucode_intel(&uci); 492 if (!patch) 493 return; 494 495 *iup = patch; 496 } 497 498 uci.mc = *iup; 499 500 apply_microcode_early(&uci, true); 501 } 502 503 static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) 504 { 505 struct microcode_header_intel *phdr; 506 struct ucode_patch *iter, *tmp; 507 508 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { 509 510 phdr = (struct microcode_header_intel *)iter->data; 511 512 if (phdr->rev <= uci->cpu_sig.rev) 513 continue; 514 515 if (!intel_find_matching_signature(phdr, 516 uci->cpu_sig.sig, 517 uci->cpu_sig.pf)) 518 continue; 519 520 return iter->data; 521 } 522 return NULL; 523 } 524 525 void reload_ucode_intel(void) 526 { 527 struct microcode_intel *p; 528 struct ucode_cpu_info uci; 529 530 intel_cpu_collect_info(&uci); 531 532 p = find_patch(&uci); 533 if (!p) 534 return; 535 536 uci.mc = p; 537 538 apply_microcode_early(&uci, false); 539 } 540 541 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) 542 { 543 struct cpuinfo_x86 *c = &cpu_data(cpu_num); 544 unsigned int val[2]; 545 546 memset(csig, 0, sizeof(*csig)); 547 548 csig->sig = cpuid_eax(0x00000001); 549 550 if ((c->x86_model >= 5) || (c->x86 > 6)) { 551 /* get processor flags from MSR 0x17 */ 552 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 553 csig->pf = 1 << ((val[1] >> 18) & 7); 554 } 555 556 csig->rev = c->microcode; 557 558 return 0; 559 } 560 561 static enum ucode_state apply_microcode_intel(int cpu) 562 { 563 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 564 struct cpuinfo_x86 *c = &cpu_data(cpu); 565 bool bsp = c->cpu_index == boot_cpu_data.cpu_index; 566 struct microcode_intel *mc; 567 enum ucode_state ret; 568 static int prev_rev; 569 u32 rev; 570 571 /* We should bind the task to the CPU */ 572 if (WARN_ON(raw_smp_processor_id() != cpu)) 573 return UCODE_ERROR; 574 575 /* Look for a newer patch in our cache: */ 576 mc = find_patch(uci); 577 if (!mc) { 578 mc = uci->mc; 579 if (!mc) 580 return UCODE_NFOUND; 581 } 582 583 /* 584 * Save us the MSR write below - which is a particular expensive 585 * operation - when the other hyperthread has updated the microcode 586 * already. 587 */ 588 rev = intel_get_microcode_revision(); 589 if (rev >= mc->hdr.rev) { 590 ret = UCODE_OK; 591 goto out; 592 } 593 594 /* 595 * Writeback and invalidate caches before updating microcode to avoid 596 * internal issues depending on what the microcode is updating. 597 */ 598 native_wbinvd(); 599 600 /* write microcode via MSR 0x79 */ 601 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 602 603 rev = intel_get_microcode_revision(); 604 605 if (rev != mc->hdr.rev) { 606 pr_err("CPU%d update to revision 0x%x failed\n", 607 cpu, mc->hdr.rev); 608 return UCODE_ERROR; 609 } 610 611 if (bsp && rev != prev_rev) { 612 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", 613 rev, 614 mc->hdr.date & 0xffff, 615 mc->hdr.date >> 24, 616 (mc->hdr.date >> 16) & 0xff); 617 prev_rev = rev; 618 } 619 620 ret = UCODE_UPDATED; 621 622 out: 623 uci->cpu_sig.rev = rev; 624 c->microcode = rev; 625 626 /* Update boot_cpu_data's revision too, if we're on the BSP: */ 627 if (bsp) 628 boot_cpu_data.microcode = rev; 629 630 return ret; 631 } 632 633 static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) 634 { 635 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 636 unsigned int curr_mc_size = 0, new_mc_size = 0; 637 enum ucode_state ret = UCODE_OK; 638 int new_rev = uci->cpu_sig.rev; 639 u8 *new_mc = NULL, *mc = NULL; 640 unsigned int csig, cpf; 641 642 while (iov_iter_count(iter)) { 643 struct microcode_header_intel mc_header; 644 unsigned int mc_size, data_size; 645 u8 *data; 646 647 if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { 648 pr_err("error! Truncated or inaccessible header in microcode data file\n"); 649 break; 650 } 651 652 mc_size = get_totalsize(&mc_header); 653 if (mc_size < sizeof(mc_header)) { 654 pr_err("error! Bad data in microcode data file (totalsize too small)\n"); 655 break; 656 } 657 data_size = mc_size - sizeof(mc_header); 658 if (data_size > iov_iter_count(iter)) { 659 pr_err("error! Bad data in microcode data file (truncated file?)\n"); 660 break; 661 } 662 663 /* For performance reasons, reuse mc area when possible */ 664 if (!mc || mc_size > curr_mc_size) { 665 vfree(mc); 666 mc = vmalloc(mc_size); 667 if (!mc) 668 break; 669 curr_mc_size = mc_size; 670 } 671 672 memcpy(mc, &mc_header, sizeof(mc_header)); 673 data = mc + sizeof(mc_header); 674 if (!copy_from_iter_full(data, data_size, iter) || 675 intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) { 676 break; 677 } 678 679 csig = uci->cpu_sig.sig; 680 cpf = uci->cpu_sig.pf; 681 if (has_newer_microcode(mc, csig, cpf, new_rev)) { 682 vfree(new_mc); 683 new_rev = mc_header.rev; 684 new_mc = mc; 685 new_mc_size = mc_size; 686 mc = NULL; /* trigger new vmalloc */ 687 ret = UCODE_NEW; 688 } 689 } 690 691 vfree(mc); 692 693 if (iov_iter_count(iter)) { 694 vfree(new_mc); 695 return UCODE_ERROR; 696 } 697 698 if (!new_mc) 699 return UCODE_NFOUND; 700 701 vfree(uci->mc); 702 uci->mc = (struct microcode_intel *)new_mc; 703 704 /* 705 * If early loading microcode is supported, save this mc into 706 * permanent memory. So it will be loaded early when a CPU is hot added 707 * or resumes. 708 */ 709 save_mc_for_early(uci, new_mc, new_mc_size); 710 711 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 712 cpu, new_rev, uci->cpu_sig.rev); 713 714 return ret; 715 } 716 717 static bool is_blacklisted(unsigned int cpu) 718 { 719 struct cpuinfo_x86 *c = &cpu_data(cpu); 720 721 /* 722 * Late loading on model 79 with microcode revision less than 0x0b000021 723 * and LLC size per core bigger than 2.5MB may result in a system hang. 724 * This behavior is documented in item BDF90, #334165 (Intel Xeon 725 * Processor E7-8800/4800 v4 Product Family). 726 */ 727 if (c->x86 == 6 && 728 c->x86_model == INTEL_FAM6_BROADWELL_X && 729 c->x86_stepping == 0x01 && 730 llc_size_per_core > 2621440 && 731 c->microcode < 0x0b000021) { 732 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); 733 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 734 return true; 735 } 736 737 return false; 738 } 739 740 static enum ucode_state request_microcode_fw(int cpu, struct device *device) 741 { 742 struct cpuinfo_x86 *c = &cpu_data(cpu); 743 const struct firmware *firmware; 744 struct iov_iter iter; 745 enum ucode_state ret; 746 struct kvec kvec; 747 char name[30]; 748 749 if (is_blacklisted(cpu)) 750 return UCODE_NFOUND; 751 752 sprintf(name, "intel-ucode/%02x-%02x-%02x", 753 c->x86, c->x86_model, c->x86_stepping); 754 755 if (request_firmware_direct(&firmware, name, device)) { 756 pr_debug("data file %s load failed\n", name); 757 return UCODE_NFOUND; 758 } 759 760 kvec.iov_base = (void *)firmware->data; 761 kvec.iov_len = firmware->size; 762 iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size); 763 ret = generic_load_microcode(cpu, &iter); 764 765 release_firmware(firmware); 766 767 return ret; 768 } 769 770 static struct microcode_ops microcode_intel_ops = { 771 .request_microcode_fw = request_microcode_fw, 772 .collect_cpu_info = collect_cpu_info, 773 .apply_microcode = apply_microcode_intel, 774 }; 775 776 static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) 777 { 778 u64 llc_size = c->x86_cache_size * 1024ULL; 779 780 do_div(llc_size, c->x86_max_cores); 781 782 return (int)llc_size; 783 } 784 785 struct microcode_ops * __init init_intel_microcode(void) 786 { 787 struct cpuinfo_x86 *c = &boot_cpu_data; 788 789 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 790 cpu_has(c, X86_FEATURE_IA64)) { 791 pr_err("Intel CPU family 0x%x not supported\n", c->x86); 792 return NULL; 793 } 794 795 llc_size_per_core = calc_llc_size_per_core(c); 796 797 return µcode_intel_ops; 798 } 799