1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD CPU Microcode Update Driver for Linux
4 *
5 * This driver allows to upgrade microcode on F10h AMD
6 * CPUs and later.
7 *
8 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
9 * 2013-2018 Borislav Petkov <bp@alien8.de>
10 *
11 * Author: Peter Oruba <peter.oruba@amd.com>
12 *
13 * Based on work by:
14 * Tigran Aivazian <aivazian.tigran@gmail.com>
15 *
16 * early loader:
17 * Copyright (C) 2013 Advanced Micro Devices, Inc.
18 *
19 * Author: Jacob Shin <jacob.shin@amd.com>
20 * Fixes: Borislav Petkov <bp@suse.de>
21 */
22 #define pr_fmt(fmt) "microcode: " fmt
23
24 #include <linux/earlycpio.h>
25 #include <linux/firmware.h>
26 #include <linux/bsearch.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/initrd.h>
30 #include <linux/kernel.h>
31 #include <linux/pci.h>
32
33 #include <crypto/sha2.h>
34
35 #include <asm/microcode.h>
36 #include <asm/processor.h>
37 #include <asm/cmdline.h>
38 #include <asm/setup.h>
39 #include <asm/cpu.h>
40 #include <asm/msr.h>
41 #include <asm/tlb.h>
42
43 #include "internal.h"
44
45 struct ucode_patch {
46 struct list_head plist;
47 void *data;
48 unsigned int size;
49 u32 patch_id;
50 u16 equiv_cpu;
51 };
52
53 static LIST_HEAD(microcode_cache);
54
55 #define UCODE_MAGIC 0x00414d44
56 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
57 #define UCODE_UCODE_TYPE 0x00000001
58
59 #define SECTION_HDR_SIZE 8
60 #define CONTAINER_HDR_SZ 12
61
62 struct equiv_cpu_entry {
63 u32 installed_cpu;
64 u32 fixed_errata_mask;
65 u32 fixed_errata_compare;
66 u16 equiv_cpu;
67 u16 res;
68 } __packed;
69
70 struct microcode_header_amd {
71 u32 data_code;
72 u32 patch_id;
73 u16 mc_patch_data_id;
74 u8 mc_patch_data_len;
75 u8 init_flag;
76 u32 mc_patch_data_checksum;
77 u32 nb_dev_id;
78 u32 sb_dev_id;
79 u16 processor_rev_id;
80 u8 nb_rev_id;
81 u8 sb_rev_id;
82 u8 bios_api_rev;
83 u8 reserved1[3];
84 u32 match_reg[8];
85 } __packed;
86
87 struct microcode_amd {
88 struct microcode_header_amd hdr;
89 unsigned int mpb[];
90 };
91
92 #define PATCH_MAX_SIZE (3 * PAGE_SIZE)
93
94 static struct equiv_cpu_table {
95 unsigned int num_entries;
96 struct equiv_cpu_entry *entry;
97 } equiv_table;
98
99 union zen_patch_rev {
100 struct {
101 __u32 rev : 8,
102 stepping : 4,
103 model : 4,
104 __reserved : 4,
105 ext_model : 4,
106 ext_fam : 8;
107 };
108 __u32 ucode_rev;
109 };
110
111 union cpuid_1_eax {
112 struct {
113 __u32 stepping : 4,
114 model : 4,
115 family : 4,
116 __reserved0 : 4,
117 ext_model : 4,
118 ext_fam : 8,
119 __reserved1 : 4;
120 };
121 __u32 full;
122 };
123
124 /*
125 * This points to the current valid container of microcode patches which we will
126 * save from the initrd/builtin before jettisoning its contents. @mc is the
127 * microcode patch we found to match.
128 */
129 struct cont_desc {
130 struct microcode_amd *mc;
131 u32 psize;
132 u8 *data;
133 size_t size;
134 };
135
136 /*
137 * Microcode patch container file is prepended to the initrd in cpio
138 * format. See Documentation/arch/x86/microcode.rst
139 */
140 static const char
141 ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
142
143 /*
144 * This is CPUID(1).EAX on the BSP. It is used in two ways:
145 *
146 * 1. To ignore the equivalence table on Zen1 and newer.
147 *
148 * 2. To match which patches to load because the patch revision ID
149 * already contains the f/m/s for which the microcode is destined
150 * for.
151 */
152 static u32 bsp_cpuid_1_eax __ro_after_init;
153
154 static bool sha_check = true;
155
156 struct patch_digest {
157 u32 patch_id;
158 u8 sha256[SHA256_DIGEST_SIZE];
159 };
160
161 #include "amd_shas.c"
162
cmp_id(const void * key,const void * elem)163 static int cmp_id(const void *key, const void *elem)
164 {
165 struct patch_digest *pd = (struct patch_digest *)elem;
166 u32 patch_id = *(u32 *)key;
167
168 if (patch_id == pd->patch_id)
169 return 0;
170 else if (patch_id < pd->patch_id)
171 return -1;
172 else
173 return 1;
174 }
175
need_sha_check(u32 cur_rev)176 static bool need_sha_check(u32 cur_rev)
177 {
178 switch (cur_rev >> 8) {
179 case 0x80012: return cur_rev <= 0x800126f; break;
180 case 0x80082: return cur_rev <= 0x800820f; break;
181 case 0x83010: return cur_rev <= 0x830107c; break;
182 case 0x86001: return cur_rev <= 0x860010e; break;
183 case 0x86081: return cur_rev <= 0x8608108; break;
184 case 0x87010: return cur_rev <= 0x8701034; break;
185 case 0x8a000: return cur_rev <= 0x8a0000a; break;
186 case 0xa0010: return cur_rev <= 0xa00107a; break;
187 case 0xa0011: return cur_rev <= 0xa0011da; break;
188 case 0xa0012: return cur_rev <= 0xa001243; break;
189 case 0xa0082: return cur_rev <= 0xa00820e; break;
190 case 0xa1011: return cur_rev <= 0xa101153; break;
191 case 0xa1012: return cur_rev <= 0xa10124e; break;
192 case 0xa1081: return cur_rev <= 0xa108109; break;
193 case 0xa2010: return cur_rev <= 0xa20102f; break;
194 case 0xa2012: return cur_rev <= 0xa201212; break;
195 case 0xa4041: return cur_rev <= 0xa404109; break;
196 case 0xa5000: return cur_rev <= 0xa500013; break;
197 case 0xa6012: return cur_rev <= 0xa60120a; break;
198 case 0xa7041: return cur_rev <= 0xa704109; break;
199 case 0xa7052: return cur_rev <= 0xa705208; break;
200 case 0xa7080: return cur_rev <= 0xa708009; break;
201 case 0xa70c0: return cur_rev <= 0xa70C009; break;
202 case 0xaa001: return cur_rev <= 0xaa00116; break;
203 case 0xaa002: return cur_rev <= 0xaa00218; break;
204 case 0xb0021: return cur_rev <= 0xb002146; break;
205 case 0xb1010: return cur_rev <= 0xb101046; break;
206 case 0xb2040: return cur_rev <= 0xb204031; break;
207 case 0xb4040: return cur_rev <= 0xb404031; break;
208 case 0xb6000: return cur_rev <= 0xb600031; break;
209 case 0xb7000: return cur_rev <= 0xb700031; break;
210 default: break;
211 }
212
213 pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
214 pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
215 return true;
216 }
217
verify_sha256_digest(u32 patch_id,u32 cur_rev,const u8 * data,unsigned int len)218 static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
219 {
220 struct patch_digest *pd = NULL;
221 u8 digest[SHA256_DIGEST_SIZE];
222 struct sha256_state s;
223 int i;
224
225 if (x86_family(bsp_cpuid_1_eax) < 0x17)
226 return true;
227
228 if (!need_sha_check(cur_rev))
229 return true;
230
231 if (!sha_check)
232 return true;
233
234 pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
235 if (!pd) {
236 pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
237 return false;
238 }
239
240 sha256_init(&s);
241 sha256_update(&s, data, len);
242 sha256_final(&s, digest);
243
244 if (memcmp(digest, pd->sha256, sizeof(digest))) {
245 pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
246
247 for (i = 0; i < SHA256_DIGEST_SIZE; i++)
248 pr_cont("0x%x ", digest[i]);
249 pr_info("\n");
250
251 return false;
252 }
253
254 return true;
255 }
256
get_patch_level(void)257 static u32 get_patch_level(void)
258 {
259 u32 rev, dummy __always_unused;
260
261 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
262
263 return rev;
264 }
265
ucode_rev_to_cpuid(unsigned int val)266 static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
267 {
268 union zen_patch_rev p;
269 union cpuid_1_eax c;
270
271 p.ucode_rev = val;
272 c.full = 0;
273
274 c.stepping = p.stepping;
275 c.model = p.model;
276 c.ext_model = p.ext_model;
277 c.family = 0xf;
278 c.ext_fam = p.ext_fam;
279
280 return c;
281 }
282
find_equiv_id(struct equiv_cpu_table * et,u32 sig)283 static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
284 {
285 unsigned int i;
286
287 /* Zen and newer do not need an equivalence table. */
288 if (x86_family(bsp_cpuid_1_eax) >= 0x17)
289 return 0;
290
291 if (!et || !et->num_entries)
292 return 0;
293
294 for (i = 0; i < et->num_entries; i++) {
295 struct equiv_cpu_entry *e = &et->entry[i];
296
297 if (sig == e->installed_cpu)
298 return e->equiv_cpu;
299 }
300 return 0;
301 }
302
303 /*
304 * Check whether there is a valid microcode container file at the beginning
305 * of @buf of size @buf_size.
306 */
verify_container(const u8 * buf,size_t buf_size)307 static bool verify_container(const u8 *buf, size_t buf_size)
308 {
309 u32 cont_magic;
310
311 if (buf_size <= CONTAINER_HDR_SZ) {
312 pr_debug("Truncated microcode container header.\n");
313 return false;
314 }
315
316 cont_magic = *(const u32 *)buf;
317 if (cont_magic != UCODE_MAGIC) {
318 pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
319 return false;
320 }
321
322 return true;
323 }
324
325 /*
326 * Check whether there is a valid, non-truncated CPU equivalence table at the
327 * beginning of @buf of size @buf_size.
328 */
verify_equivalence_table(const u8 * buf,size_t buf_size)329 static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
330 {
331 const u32 *hdr = (const u32 *)buf;
332 u32 cont_type, equiv_tbl_len;
333
334 if (!verify_container(buf, buf_size))
335 return false;
336
337 /* Zen and newer do not need an equivalence table. */
338 if (x86_family(bsp_cpuid_1_eax) >= 0x17)
339 return true;
340
341 cont_type = hdr[1];
342 if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
343 pr_debug("Wrong microcode container equivalence table type: %u.\n",
344 cont_type);
345 return false;
346 }
347
348 buf_size -= CONTAINER_HDR_SZ;
349
350 equiv_tbl_len = hdr[2];
351 if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
352 buf_size < equiv_tbl_len) {
353 pr_debug("Truncated equivalence table.\n");
354 return false;
355 }
356
357 return true;
358 }
359
360 /*
361 * Check whether there is a valid, non-truncated microcode patch section at the
362 * beginning of @buf of size @buf_size.
363 *
364 * On success, @sh_psize returns the patch size according to the section header,
365 * to the caller.
366 */
367 static bool
__verify_patch_section(const u8 * buf,size_t buf_size,u32 * sh_psize)368 __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
369 {
370 u32 p_type, p_size;
371 const u32 *hdr;
372
373 if (buf_size < SECTION_HDR_SIZE) {
374 pr_debug("Truncated patch section.\n");
375 return false;
376 }
377
378 hdr = (const u32 *)buf;
379 p_type = hdr[0];
380 p_size = hdr[1];
381
382 if (p_type != UCODE_UCODE_TYPE) {
383 pr_debug("Invalid type field (0x%x) in container file section header.\n",
384 p_type);
385 return false;
386 }
387
388 if (p_size < sizeof(struct microcode_header_amd)) {
389 pr_debug("Patch of size %u too short.\n", p_size);
390 return false;
391 }
392
393 *sh_psize = p_size;
394
395 return true;
396 }
397
398 /*
399 * Check whether the passed remaining file @buf_size is large enough to contain
400 * a patch of the indicated @sh_psize (and also whether this size does not
401 * exceed the per-family maximum). @sh_psize is the size read from the section
402 * header.
403 */
__verify_patch_size(u32 sh_psize,size_t buf_size)404 static bool __verify_patch_size(u32 sh_psize, size_t buf_size)
405 {
406 u8 family = x86_family(bsp_cpuid_1_eax);
407 u32 max_size;
408
409 if (family >= 0x15)
410 goto ret;
411
412 #define F1XH_MPB_MAX_SIZE 2048
413 #define F14H_MPB_MAX_SIZE 1824
414
415 switch (family) {
416 case 0x10 ... 0x12:
417 max_size = F1XH_MPB_MAX_SIZE;
418 break;
419 case 0x14:
420 max_size = F14H_MPB_MAX_SIZE;
421 break;
422 default:
423 WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
424 return false;
425 }
426
427 if (sh_psize > max_size)
428 return false;
429
430 ret:
431 /* Working with the whole buffer so < is ok. */
432 return sh_psize <= buf_size;
433 }
434
435 /*
436 * Verify the patch in @buf.
437 *
438 * Returns:
439 * negative: on error
440 * positive: patch is not for this family, skip it
441 * 0: success
442 */
verify_patch(const u8 * buf,size_t buf_size,u32 * patch_size)443 static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
444 {
445 u8 family = x86_family(bsp_cpuid_1_eax);
446 struct microcode_header_amd *mc_hdr;
447 u32 sh_psize;
448 u16 proc_id;
449 u8 patch_fam;
450
451 if (!__verify_patch_section(buf, buf_size, &sh_psize))
452 return -1;
453
454 /*
455 * The section header length is not included in this indicated size
456 * but is present in the leftover file length so we need to subtract
457 * it before passing this value to the function below.
458 */
459 buf_size -= SECTION_HDR_SIZE;
460
461 /*
462 * Check if the remaining buffer is big enough to contain a patch of
463 * size sh_psize, as the section claims.
464 */
465 if (buf_size < sh_psize) {
466 pr_debug("Patch of size %u truncated.\n", sh_psize);
467 return -1;
468 }
469
470 if (!__verify_patch_size(sh_psize, buf_size)) {
471 pr_debug("Per-family patch size mismatch.\n");
472 return -1;
473 }
474
475 *patch_size = sh_psize;
476
477 mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
478 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
479 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
480 return -1;
481 }
482
483 proc_id = mc_hdr->processor_rev_id;
484 patch_fam = 0xf + (proc_id >> 12);
485 if (patch_fam != family)
486 return 1;
487
488 return 0;
489 }
490
mc_patch_matches(struct microcode_amd * mc,u16 eq_id)491 static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
492 {
493 /* Zen and newer do not need an equivalence table. */
494 if (x86_family(bsp_cpuid_1_eax) >= 0x17)
495 return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
496 else
497 return eq_id == mc->hdr.processor_rev_id;
498 }
499
500 /*
501 * This scans the ucode blob for the proper container as we can have multiple
502 * containers glued together. Returns the equivalence ID from the equivalence
503 * table or 0 if none found.
504 * Returns the amount of bytes consumed while scanning. @desc contains all the
505 * data we're going to use in later stages of the application.
506 */
parse_container(u8 * ucode,size_t size,struct cont_desc * desc)507 static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
508 {
509 struct equiv_cpu_table table;
510 size_t orig_size = size;
511 u32 *hdr = (u32 *)ucode;
512 u16 eq_id;
513 u8 *buf;
514
515 if (!verify_equivalence_table(ucode, size))
516 return 0;
517
518 buf = ucode;
519
520 table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
521 table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
522
523 /*
524 * Find the equivalence ID of our CPU in this table. Even if this table
525 * doesn't contain a patch for the CPU, scan through the whole container
526 * so that it can be skipped in case there are other containers appended.
527 */
528 eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
529
530 buf += hdr[2] + CONTAINER_HDR_SZ;
531 size -= hdr[2] + CONTAINER_HDR_SZ;
532
533 /*
534 * Scan through the rest of the container to find where it ends. We do
535 * some basic sanity-checking too.
536 */
537 while (size > 0) {
538 struct microcode_amd *mc;
539 u32 patch_size;
540 int ret;
541
542 ret = verify_patch(buf, size, &patch_size);
543 if (ret < 0) {
544 /*
545 * Patch verification failed, skip to the next container, if
546 * there is one. Before exit, check whether that container has
547 * found a patch already. If so, use it.
548 */
549 goto out;
550 } else if (ret > 0) {
551 goto skip;
552 }
553
554 mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
555 if (mc_patch_matches(mc, eq_id)) {
556 desc->psize = patch_size;
557 desc->mc = mc;
558 }
559
560 skip:
561 /* Skip patch section header too: */
562 buf += patch_size + SECTION_HDR_SIZE;
563 size -= patch_size + SECTION_HDR_SIZE;
564 }
565
566 out:
567 /*
568 * If we have found a patch (desc->mc), it means we're looking at the
569 * container which has a patch for this CPU so return 0 to mean, @ucode
570 * already points to the proper container. Otherwise, we return the size
571 * we scanned so that we can advance to the next container in the
572 * buffer.
573 */
574 if (desc->mc) {
575 desc->data = ucode;
576 desc->size = orig_size - size;
577
578 return 0;
579 }
580
581 return orig_size - size;
582 }
583
584 /*
585 * Scan the ucode blob for the proper container as we can have multiple
586 * containers glued together.
587 */
scan_containers(u8 * ucode,size_t size,struct cont_desc * desc)588 static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
589 {
590 while (size) {
591 size_t s = parse_container(ucode, size, desc);
592 if (!s)
593 return;
594
595 /* catch wraparound */
596 if (size >= s) {
597 ucode += s;
598 size -= s;
599 } else {
600 return;
601 }
602 }
603 }
604
__apply_microcode_amd(struct microcode_amd * mc,u32 * cur_rev,unsigned int psize)605 static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
606 unsigned int psize)
607 {
608 unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
609
610 if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
611 return false;
612
613 native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
614
615 if (x86_family(bsp_cpuid_1_eax) == 0x17) {
616 unsigned long p_addr_end = p_addr + psize - 1;
617
618 invlpg(p_addr);
619
620 /*
621 * Flush next page too if patch image is crossing a page
622 * boundary.
623 */
624 if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
625 invlpg(p_addr_end);
626 }
627
628 /* verify patch application was successful */
629 *cur_rev = get_patch_level();
630 if (*cur_rev != mc->hdr.patch_id)
631 return false;
632
633 return true;
634 }
635
636
get_builtin_microcode(struct cpio_data * cp)637 static bool get_builtin_microcode(struct cpio_data *cp)
638 {
639 char fw_name[36] = "amd-ucode/microcode_amd.bin";
640 u8 family = x86_family(bsp_cpuid_1_eax);
641 struct firmware fw;
642
643 if (IS_ENABLED(CONFIG_X86_32))
644 return false;
645
646 if (family >= 0x15)
647 snprintf(fw_name, sizeof(fw_name),
648 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
649
650 if (firmware_request_builtin(&fw, fw_name)) {
651 cp->size = fw.size;
652 cp->data = (void *)fw.data;
653 return true;
654 }
655
656 return false;
657 }
658
find_blobs_in_containers(struct cpio_data * ret)659 static bool __init find_blobs_in_containers(struct cpio_data *ret)
660 {
661 struct cpio_data cp;
662 bool found;
663
664 if (!get_builtin_microcode(&cp))
665 cp = find_microcode_in_initrd(ucode_path);
666
667 found = cp.data && cp.size;
668 if (found)
669 *ret = cp;
670
671 return found;
672 }
673
674 /*
675 * Early load occurs before we can vmalloc(). So we look for the microcode
676 * patch container file in initrd, traverse equivalent cpu table, look for a
677 * matching microcode patch, and update, all in initrd memory in place.
678 * When vmalloc() is available for use later -- on 64-bit during first AP load,
679 * and on 32-bit during save_microcode_in_initrd() -- we can call
680 * load_microcode_amd() to save equivalent cpu table and microcode patches in
681 * kernel heap memory.
682 */
load_ucode_amd_bsp(struct early_load_data * ed,unsigned int cpuid_1_eax)683 void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
684 {
685 struct cont_desc desc = { };
686 struct microcode_amd *mc;
687 struct cpio_data cp = { };
688 char buf[4];
689 u32 rev;
690
691 if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
692 if (!strncmp(buf, "off", 3)) {
693 sha_check = false;
694 pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
695 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
696 }
697 }
698
699 bsp_cpuid_1_eax = cpuid_1_eax;
700
701 rev = get_patch_level();
702 ed->old_rev = rev;
703
704 /* Needed in load_microcode_amd() */
705 ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
706
707 if (!find_blobs_in_containers(&cp))
708 return;
709
710 scan_containers(cp.data, cp.size, &desc);
711
712 mc = desc.mc;
713 if (!mc)
714 return;
715
716 /*
717 * Allow application of the same revision to pick up SMT-specific
718 * changes even if the revision of the other SMT thread is already
719 * up-to-date.
720 */
721 if (ed->old_rev > mc->hdr.patch_id)
722 return;
723
724 if (__apply_microcode_amd(mc, &rev, desc.psize))
725 ed->new_rev = rev;
726 }
727
patch_cpus_equivalent(struct ucode_patch * p,struct ucode_patch * n,bool ignore_stepping)728 static inline bool patch_cpus_equivalent(struct ucode_patch *p,
729 struct ucode_patch *n,
730 bool ignore_stepping)
731 {
732 /* Zen and newer hardcode the f/m/s in the patch ID */
733 if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
734 union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
735 union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
736
737 if (ignore_stepping) {
738 p_cid.stepping = 0;
739 n_cid.stepping = 0;
740 }
741
742 return p_cid.full == n_cid.full;
743 } else {
744 return p->equiv_cpu == n->equiv_cpu;
745 }
746 }
747
748 /*
749 * a small, trivial cache of per-family ucode patches
750 */
cache_find_patch(struct ucode_cpu_info * uci,u16 equiv_cpu)751 static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
752 {
753 struct ucode_patch *p;
754 struct ucode_patch n;
755
756 n.equiv_cpu = equiv_cpu;
757 n.patch_id = uci->cpu_sig.rev;
758
759 WARN_ON_ONCE(!n.patch_id);
760
761 list_for_each_entry(p, µcode_cache, plist)
762 if (patch_cpus_equivalent(p, &n, false))
763 return p;
764
765 return NULL;
766 }
767
patch_newer(struct ucode_patch * p,struct ucode_patch * n)768 static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
769 {
770 /* Zen and newer hardcode the f/m/s in the patch ID */
771 if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
772 union zen_patch_rev zp, zn;
773
774 zp.ucode_rev = p->patch_id;
775 zn.ucode_rev = n->patch_id;
776
777 if (zn.stepping != zp.stepping)
778 return -1;
779
780 return zn.rev > zp.rev;
781 } else {
782 return n->patch_id > p->patch_id;
783 }
784 }
785
update_cache(struct ucode_patch * new_patch)786 static void update_cache(struct ucode_patch *new_patch)
787 {
788 struct ucode_patch *p;
789 int ret;
790
791 list_for_each_entry(p, µcode_cache, plist) {
792 if (patch_cpus_equivalent(p, new_patch, true)) {
793 ret = patch_newer(p, new_patch);
794 if (ret < 0)
795 continue;
796 else if (!ret) {
797 /* we already have the latest patch */
798 kfree(new_patch->data);
799 kfree(new_patch);
800 return;
801 }
802
803 list_replace(&p->plist, &new_patch->plist);
804 kfree(p->data);
805 kfree(p);
806 return;
807 }
808 }
809 /* no patch found, add it */
810 list_add_tail(&new_patch->plist, µcode_cache);
811 }
812
free_cache(void)813 static void free_cache(void)
814 {
815 struct ucode_patch *p, *tmp;
816
817 list_for_each_entry_safe(p, tmp, µcode_cache, plist) {
818 __list_del(p->plist.prev, p->plist.next);
819 kfree(p->data);
820 kfree(p);
821 }
822 }
823
find_patch(unsigned int cpu)824 static struct ucode_patch *find_patch(unsigned int cpu)
825 {
826 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
827 u16 equiv_id = 0;
828
829 uci->cpu_sig.rev = get_patch_level();
830
831 if (x86_family(bsp_cpuid_1_eax) < 0x17) {
832 equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
833 if (!equiv_id)
834 return NULL;
835 }
836
837 return cache_find_patch(uci, equiv_id);
838 }
839
reload_ucode_amd(unsigned int cpu)840 void reload_ucode_amd(unsigned int cpu)
841 {
842 u32 rev, dummy __always_unused;
843 struct microcode_amd *mc;
844 struct ucode_patch *p;
845
846 p = find_patch(cpu);
847 if (!p)
848 return;
849
850 mc = p->data;
851
852 rev = get_patch_level();
853 if (rev < mc->hdr.patch_id) {
854 if (__apply_microcode_amd(mc, &rev, p->size))
855 pr_info_once("reload revision: 0x%08x\n", rev);
856 }
857 }
858
collect_cpu_info_amd(int cpu,struct cpu_signature * csig)859 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
860 {
861 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
862 struct ucode_patch *p;
863
864 csig->sig = cpuid_eax(0x00000001);
865 csig->rev = get_patch_level();
866
867 /*
868 * a patch could have been loaded early, set uci->mc so that
869 * mc_bp_resume() can call apply_microcode()
870 */
871 p = find_patch(cpu);
872 if (p && (p->patch_id == csig->rev))
873 uci->mc = p->data;
874
875 return 0;
876 }
877
apply_microcode_amd(int cpu)878 static enum ucode_state apply_microcode_amd(int cpu)
879 {
880 struct cpuinfo_x86 *c = &cpu_data(cpu);
881 struct microcode_amd *mc_amd;
882 struct ucode_cpu_info *uci;
883 struct ucode_patch *p;
884 enum ucode_state ret;
885 u32 rev;
886
887 BUG_ON(raw_smp_processor_id() != cpu);
888
889 uci = ucode_cpu_info + cpu;
890
891 p = find_patch(cpu);
892 if (!p)
893 return UCODE_NFOUND;
894
895 rev = uci->cpu_sig.rev;
896
897 mc_amd = p->data;
898 uci->mc = p->data;
899
900 /* need to apply patch? */
901 if (rev > mc_amd->hdr.patch_id) {
902 ret = UCODE_OK;
903 goto out;
904 }
905
906 if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
907 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
908 cpu, mc_amd->hdr.patch_id);
909 return UCODE_ERROR;
910 }
911
912 rev = mc_amd->hdr.patch_id;
913 ret = UCODE_UPDATED;
914
915 out:
916 uci->cpu_sig.rev = rev;
917 c->microcode = rev;
918
919 /* Update boot_cpu_data's revision too, if we're on the BSP: */
920 if (c->cpu_index == boot_cpu_data.cpu_index)
921 boot_cpu_data.microcode = rev;
922
923 return ret;
924 }
925
load_ucode_amd_ap(unsigned int cpuid_1_eax)926 void load_ucode_amd_ap(unsigned int cpuid_1_eax)
927 {
928 unsigned int cpu = smp_processor_id();
929
930 ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax;
931 apply_microcode_amd(cpu);
932 }
933
install_equiv_cpu_table(const u8 * buf,size_t buf_size)934 static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
935 {
936 u32 equiv_tbl_len;
937 const u32 *hdr;
938
939 if (!verify_equivalence_table(buf, buf_size))
940 return 0;
941
942 hdr = (const u32 *)buf;
943 equiv_tbl_len = hdr[2];
944
945 /* Zen and newer do not need an equivalence table. */
946 if (x86_family(bsp_cpuid_1_eax) >= 0x17)
947 goto out;
948
949 equiv_table.entry = vmalloc(equiv_tbl_len);
950 if (!equiv_table.entry) {
951 pr_err("failed to allocate equivalent CPU table\n");
952 return 0;
953 }
954
955 memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
956 equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
957
958 out:
959 /* add header length */
960 return equiv_tbl_len + CONTAINER_HDR_SZ;
961 }
962
free_equiv_cpu_table(void)963 static void free_equiv_cpu_table(void)
964 {
965 if (x86_family(bsp_cpuid_1_eax) >= 0x17)
966 return;
967
968 vfree(equiv_table.entry);
969 memset(&equiv_table, 0, sizeof(equiv_table));
970 }
971
cleanup(void)972 static void cleanup(void)
973 {
974 free_equiv_cpu_table();
975 free_cache();
976 }
977
978 /*
979 * Return a non-negative value even if some of the checks failed so that
980 * we can skip over the next patch. If we return a negative value, we
981 * signal a grave error like a memory allocation has failed and the
982 * driver cannot continue functioning normally. In such cases, we tear
983 * down everything we've used up so far and exit.
984 */
verify_and_add_patch(u8 family,u8 * fw,unsigned int leftover,unsigned int * patch_size)985 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
986 unsigned int *patch_size)
987 {
988 struct microcode_header_amd *mc_hdr;
989 struct ucode_patch *patch;
990 u16 proc_id;
991 int ret;
992
993 ret = verify_patch(fw, leftover, patch_size);
994 if (ret)
995 return ret;
996
997 patch = kzalloc(sizeof(*patch), GFP_KERNEL);
998 if (!patch) {
999 pr_err("Patch allocation failure.\n");
1000 return -EINVAL;
1001 }
1002
1003 patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
1004 if (!patch->data) {
1005 pr_err("Patch data allocation failure.\n");
1006 kfree(patch);
1007 return -EINVAL;
1008 }
1009 patch->size = *patch_size;
1010
1011 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
1012 proc_id = mc_hdr->processor_rev_id;
1013
1014 INIT_LIST_HEAD(&patch->plist);
1015 patch->patch_id = mc_hdr->patch_id;
1016 patch->equiv_cpu = proc_id;
1017
1018 pr_debug("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
1019 __func__, patch->patch_id, proc_id);
1020
1021 /* ... and add to cache. */
1022 update_cache(patch);
1023
1024 return 0;
1025 }
1026
1027 /* Scan the blob in @data and add microcode patches to the cache. */
__load_microcode_amd(u8 family,const u8 * data,size_t size)1028 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
1029 {
1030 u8 *fw = (u8 *)data;
1031 size_t offset;
1032
1033 offset = install_equiv_cpu_table(data, size);
1034 if (!offset)
1035 return UCODE_ERROR;
1036
1037 fw += offset;
1038 size -= offset;
1039
1040 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
1041 pr_err("invalid type field in container file section header\n");
1042 free_equiv_cpu_table();
1043 return UCODE_ERROR;
1044 }
1045
1046 while (size > 0) {
1047 unsigned int crnt_size = 0;
1048 int ret;
1049
1050 ret = verify_and_add_patch(family, fw, size, &crnt_size);
1051 if (ret < 0)
1052 return UCODE_ERROR;
1053
1054 fw += crnt_size + SECTION_HDR_SIZE;
1055 size -= (crnt_size + SECTION_HDR_SIZE);
1056 }
1057
1058 return UCODE_OK;
1059 }
1060
_load_microcode_amd(u8 family,const u8 * data,size_t size)1061 static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
1062 {
1063 enum ucode_state ret;
1064
1065 /* free old equiv table */
1066 free_equiv_cpu_table();
1067
1068 ret = __load_microcode_amd(family, data, size);
1069 if (ret != UCODE_OK)
1070 cleanup();
1071
1072 return ret;
1073 }
1074
load_microcode_amd(u8 family,const u8 * data,size_t size)1075 static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
1076 {
1077 struct cpuinfo_x86 *c;
1078 unsigned int nid, cpu;
1079 struct ucode_patch *p;
1080 enum ucode_state ret;
1081
1082 ret = _load_microcode_amd(family, data, size);
1083 if (ret != UCODE_OK)
1084 return ret;
1085
1086 for_each_node_with_cpus(nid) {
1087 cpu = cpumask_first(cpumask_of_node(nid));
1088 c = &cpu_data(cpu);
1089
1090 p = find_patch(cpu);
1091 if (!p)
1092 continue;
1093
1094 if (c->microcode >= p->patch_id)
1095 continue;
1096
1097 ret = UCODE_NEW;
1098 }
1099
1100 return ret;
1101 }
1102
save_microcode_in_initrd(void)1103 static int __init save_microcode_in_initrd(void)
1104 {
1105 struct cpuinfo_x86 *c = &boot_cpu_data;
1106 struct cont_desc desc = { 0 };
1107 unsigned int cpuid_1_eax;
1108 enum ucode_state ret;
1109 struct cpio_data cp;
1110
1111 if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
1112 return 0;
1113
1114 cpuid_1_eax = native_cpuid_eax(1);
1115
1116 if (!find_blobs_in_containers(&cp))
1117 return -EINVAL;
1118
1119 scan_containers(cp.data, cp.size, &desc);
1120 if (!desc.mc)
1121 return -EINVAL;
1122
1123 ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
1124 if (ret > UCODE_UPDATED)
1125 return -EINVAL;
1126
1127 return 0;
1128 }
1129 early_initcall(save_microcode_in_initrd);
1130
1131 /*
1132 * AMD microcode firmware naming convention, up to family 15h they are in
1133 * the legacy file:
1134 *
1135 * amd-ucode/microcode_amd.bin
1136 *
1137 * This legacy file is always smaller than 2K in size.
1138 *
1139 * Beginning with family 15h, they are in family-specific firmware files:
1140 *
1141 * amd-ucode/microcode_amd_fam15h.bin
1142 * amd-ucode/microcode_amd_fam16h.bin
1143 * ...
1144 *
1145 * These might be larger than 2K.
1146 */
request_microcode_amd(int cpu,struct device * device)1147 static enum ucode_state request_microcode_amd(int cpu, struct device *device)
1148 {
1149 char fw_name[36] = "amd-ucode/microcode_amd.bin";
1150 struct cpuinfo_x86 *c = &cpu_data(cpu);
1151 enum ucode_state ret = UCODE_NFOUND;
1152 const struct firmware *fw;
1153
1154 if (force_minrev)
1155 return UCODE_NFOUND;
1156
1157 if (c->x86 >= 0x15)
1158 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
1159
1160 if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
1161 pr_debug("failed to load file %s\n", fw_name);
1162 goto out;
1163 }
1164
1165 ret = UCODE_ERROR;
1166 if (!verify_container(fw->data, fw->size))
1167 goto fw_release;
1168
1169 ret = load_microcode_amd(c->x86, fw->data, fw->size);
1170
1171 fw_release:
1172 release_firmware(fw);
1173
1174 out:
1175 return ret;
1176 }
1177
microcode_fini_cpu_amd(int cpu)1178 static void microcode_fini_cpu_amd(int cpu)
1179 {
1180 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1181
1182 uci->mc = NULL;
1183 }
1184
1185 static struct microcode_ops microcode_amd_ops = {
1186 .request_microcode_fw = request_microcode_amd,
1187 .collect_cpu_info = collect_cpu_info_amd,
1188 .apply_microcode = apply_microcode_amd,
1189 .microcode_fini_cpu = microcode_fini_cpu_amd,
1190 .nmi_safe = true,
1191 };
1192
init_amd_microcode(void)1193 struct microcode_ops * __init init_amd_microcode(void)
1194 {
1195 struct cpuinfo_x86 *c = &boot_cpu_data;
1196
1197 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
1198 pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
1199 return NULL;
1200 }
1201 return µcode_amd_ops;
1202 }
1203
exit_amd_microcode(void)1204 void __exit exit_amd_microcode(void)
1205 {
1206 cleanup();
1207 }
1208