xref: /openbmc/linux/arch/x86/kernel/cpu/microcode/amd.c (revision bc5aa3a0)
1 /*
2  *  AMD CPU Microcode Update Driver for Linux
3  *
4  *  This driver allows to upgrade microcode on F10h AMD
5  *  CPUs and later.
6  *
7  *  Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8  *
9  *  Author: Peter Oruba <peter.oruba@amd.com>
10  *
11  *  Based on work by:
12  *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
13  *
14  *  early loader:
15  *  Copyright (C) 2013 Advanced Micro Devices, Inc.
16  *
17  *  Author: Jacob Shin <jacob.shin@amd.com>
18  *  Fixes: Borislav Petkov <bp@suse.de>
19  *
20  *  Licensed under the terms of the GNU General Public
21  *  License version 2. See file COPYING for details.
22  */
23 #define pr_fmt(fmt) "microcode: " fmt
24 
25 #include <linux/earlycpio.h>
26 #include <linux/firmware.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/initrd.h>
30 #include <linux/kernel.h>
31 #include <linux/pci.h>
32 
33 #include <asm/microcode_amd.h>
34 #include <asm/microcode.h>
35 #include <asm/processor.h>
36 #include <asm/setup.h>
37 #include <asm/cpu.h>
38 #include <asm/msr.h>
39 
40 static struct equiv_cpu_entry *equiv_cpu_table;
41 
42 struct ucode_patch {
43 	struct list_head plist;
44 	void *data;
45 	u32 patch_id;
46 	u16 equiv_cpu;
47 };
48 
49 static LIST_HEAD(pcache);
50 
51 /*
52  * This points to the current valid container of microcode patches which we will
53  * save from the initrd before jettisoning its contents.
54  */
55 static u8 *container;
56 static size_t container_size;
57 
58 static u32 ucode_new_rev;
59 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
60 static u16 this_equiv_id;
61 
62 static struct cpio_data ucode_cpio;
63 
64 static struct cpio_data __init find_ucode_in_initrd(void)
65 {
66 #ifdef CONFIG_BLK_DEV_INITRD
67 	char *path;
68 	void *start;
69 	size_t size;
70 
71 	/*
72 	 * Microcode patch container file is prepended to the initrd in cpio
73 	 * format. See Documentation/x86/early-microcode.txt
74 	 */
75 	static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
76 
77 #ifdef CONFIG_X86_32
78 	struct boot_params *p;
79 
80 	/*
81 	 * On 32-bit, early load occurs before paging is turned on so we need
82 	 * to use physical addresses.
83 	 */
84 	p       = (struct boot_params *)__pa_nodebug(&boot_params);
85 	path    = (char *)__pa_nodebug(ucode_path);
86 	start   = (void *)p->hdr.ramdisk_image;
87 	size    = p->hdr.ramdisk_size;
88 #else
89 	path    = ucode_path;
90 	start   = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
91 	size    = boot_params.hdr.ramdisk_size;
92 #endif /* !CONFIG_X86_32 */
93 
94 	return find_cpio_data(path, start, size, NULL);
95 #else
96 	return (struct cpio_data){ NULL, 0, "" };
97 #endif
98 }
99 
100 static size_t compute_container_size(u8 *data, u32 total_size)
101 {
102 	size_t size = 0;
103 	u32 *header = (u32 *)data;
104 
105 	if (header[0] != UCODE_MAGIC ||
106 	    header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
107 	    header[2] == 0)                            /* size */
108 		return size;
109 
110 	size = header[2] + CONTAINER_HDR_SZ;
111 	total_size -= size;
112 	data += size;
113 
114 	while (total_size) {
115 		u16 patch_size;
116 
117 		header = (u32 *)data;
118 
119 		if (header[0] != UCODE_UCODE_TYPE)
120 			break;
121 
122 		/*
123 		 * Sanity-check patch size.
124 		 */
125 		patch_size = header[1];
126 		if (patch_size > PATCH_MAX_SIZE)
127 			break;
128 
129 		size	   += patch_size + SECTION_HDR_SIZE;
130 		data	   += patch_size + SECTION_HDR_SIZE;
131 		total_size -= patch_size + SECTION_HDR_SIZE;
132 	}
133 
134 	return size;
135 }
136 
137 /*
138  * Early load occurs before we can vmalloc(). So we look for the microcode
139  * patch container file in initrd, traverse equivalent cpu table, look for a
140  * matching microcode patch, and update, all in initrd memory in place.
141  * When vmalloc() is available for use later -- on 64-bit during first AP load,
142  * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
143  * load_microcode_amd() to save equivalent cpu table and microcode patches in
144  * kernel heap memory.
145  */
146 static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
147 {
148 	struct equiv_cpu_entry *eq;
149 	size_t *cont_sz;
150 	u32 *header;
151 	u8  *data, **cont;
152 	u8 (*patch)[PATCH_MAX_SIZE];
153 	u16 eq_id = 0;
154 	int offset, left;
155 	u32 rev, eax, ebx, ecx, edx;
156 	u32 *new_rev;
157 
158 #ifdef CONFIG_X86_32
159 	new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
160 	cont_sz = (size_t *)__pa_nodebug(&container_size);
161 	cont	= (u8 **)__pa_nodebug(&container);
162 	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
163 #else
164 	new_rev = &ucode_new_rev;
165 	cont_sz = &container_size;
166 	cont	= &container;
167 	patch	= &amd_ucode_patch;
168 #endif
169 
170 	data   = ucode;
171 	left   = size;
172 	header = (u32 *)data;
173 
174 	/* find equiv cpu table */
175 	if (header[0] != UCODE_MAGIC ||
176 	    header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
177 	    header[2] == 0)                            /* size */
178 		return;
179 
180 	eax = 0x00000001;
181 	ecx = 0;
182 	native_cpuid(&eax, &ebx, &ecx, &edx);
183 
184 	while (left > 0) {
185 		eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
186 
187 		*cont = data;
188 
189 		/* Advance past the container header */
190 		offset = header[2] + CONTAINER_HDR_SZ;
191 		data  += offset;
192 		left  -= offset;
193 
194 		eq_id = find_equiv_id(eq, eax);
195 		if (eq_id) {
196 			this_equiv_id = eq_id;
197 			*cont_sz = compute_container_size(*cont, left + offset);
198 
199 			/*
200 			 * truncate how much we need to iterate over in the
201 			 * ucode update loop below
202 			 */
203 			left = *cont_sz - offset;
204 			break;
205 		}
206 
207 		/*
208 		 * support multiple container files appended together. if this
209 		 * one does not have a matching equivalent cpu entry, we fast
210 		 * forward to the next container file.
211 		 */
212 		while (left > 0) {
213 			header = (u32 *)data;
214 			if (header[0] == UCODE_MAGIC &&
215 			    header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
216 				break;
217 
218 			offset = header[1] + SECTION_HDR_SIZE;
219 			data  += offset;
220 			left  -= offset;
221 		}
222 
223 		/* mark where the next microcode container file starts */
224 		offset    = data - (u8 *)ucode;
225 		ucode     = data;
226 	}
227 
228 	if (!eq_id) {
229 		*cont = NULL;
230 		*cont_sz = 0;
231 		return;
232 	}
233 
234 	if (check_current_patch_level(&rev, true))
235 		return;
236 
237 	while (left > 0) {
238 		struct microcode_amd *mc;
239 
240 		header = (u32 *)data;
241 		if (header[0] != UCODE_UCODE_TYPE || /* type */
242 		    header[1] == 0)                  /* size */
243 			break;
244 
245 		mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
246 
247 		if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
248 
249 			if (!__apply_microcode_amd(mc)) {
250 				rev = mc->hdr.patch_id;
251 				*new_rev = rev;
252 
253 				if (save_patch)
254 					memcpy(patch, mc,
255 					       min_t(u32, header[1], PATCH_MAX_SIZE));
256 			}
257 		}
258 
259 		offset  = header[1] + SECTION_HDR_SIZE;
260 		data   += offset;
261 		left   -= offset;
262 	}
263 }
264 
265 static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
266 					      unsigned int family)
267 {
268 #ifdef CONFIG_X86_64
269 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
270 
271 	if (family >= 0x15)
272 		snprintf(fw_name, sizeof(fw_name),
273 			 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
274 
275 	return get_builtin_firmware(cp, fw_name);
276 #else
277 	return false;
278 #endif
279 }
280 
281 void __init load_ucode_amd_bsp(unsigned int family)
282 {
283 	struct cpio_data cp;
284 	void **data;
285 	size_t *size;
286 
287 #ifdef CONFIG_X86_32
288 	data =  (void **)__pa_nodebug(&ucode_cpio.data);
289 	size = (size_t *)__pa_nodebug(&ucode_cpio.size);
290 #else
291 	data = &ucode_cpio.data;
292 	size = &ucode_cpio.size;
293 #endif
294 
295 	if (!load_builtin_amd_microcode(&cp, family))
296 		cp = find_ucode_in_initrd();
297 
298 	if (!(cp.data && cp.size))
299 		return;
300 
301 	*data = cp.data;
302 	*size = cp.size;
303 
304 	apply_ucode_in_initrd(cp.data, cp.size, true);
305 }
306 
307 #ifdef CONFIG_X86_32
308 /*
309  * On 32-bit, since AP's early load occurs before paging is turned on, we
310  * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
311  * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
312  * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
313  * which is used upon resume from suspend.
314  */
315 void load_ucode_amd_ap(void)
316 {
317 	struct microcode_amd *mc;
318 	size_t *usize;
319 	void **ucode;
320 
321 	mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
322 	if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
323 		__apply_microcode_amd(mc);
324 		return;
325 	}
326 
327 	ucode = (void *)__pa_nodebug(&container);
328 	usize = (size_t *)__pa_nodebug(&container_size);
329 
330 	if (!*ucode || !*usize)
331 		return;
332 
333 	apply_ucode_in_initrd(*ucode, *usize, false);
334 }
335 
336 static void __init collect_cpu_sig_on_bsp(void *arg)
337 {
338 	unsigned int cpu = smp_processor_id();
339 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
340 
341 	uci->cpu_sig.sig = cpuid_eax(0x00000001);
342 }
343 
344 static void __init get_bsp_sig(void)
345 {
346 	unsigned int bsp = boot_cpu_data.cpu_index;
347 	struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
348 
349 	if (!uci->cpu_sig.sig)
350 		smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
351 }
352 #else
353 void load_ucode_amd_ap(void)
354 {
355 	unsigned int cpu = smp_processor_id();
356 	struct equiv_cpu_entry *eq;
357 	struct microcode_amd *mc;
358 	u8 *cont = container;
359 	u32 rev, eax;
360 	u16 eq_id;
361 
362 	/* Exit if called on the BSP. */
363 	if (!cpu)
364 		return;
365 
366 	if (!container)
367 		return;
368 
369 	/*
370 	 * 64-bit runs with paging enabled, thus early==false.
371 	 */
372 	if (check_current_patch_level(&rev, false))
373 		return;
374 
375 	/* Add CONFIG_RANDOMIZE_MEMORY offset. */
376 	cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
377 
378 	eax = cpuid_eax(0x00000001);
379 	eq  = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
380 
381 	eq_id = find_equiv_id(eq, eax);
382 	if (!eq_id)
383 		return;
384 
385 	if (eq_id == this_equiv_id) {
386 		mc = (struct microcode_amd *)amd_ucode_patch;
387 
388 		if (mc && rev < mc->hdr.patch_id) {
389 			if (!__apply_microcode_amd(mc))
390 				ucode_new_rev = mc->hdr.patch_id;
391 		}
392 
393 	} else {
394 		if (!ucode_cpio.data)
395 			return;
396 
397 		/*
398 		 * AP has a different equivalence ID than BSP, looks like
399 		 * mixed-steppings silicon so go through the ucode blob anew.
400 		 */
401 		apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
402 	}
403 }
404 #endif
405 
406 int __init save_microcode_in_initrd_amd(void)
407 {
408 	unsigned long cont;
409 	int retval = 0;
410 	enum ucode_state ret;
411 	u8 *cont_va;
412 	u32 eax;
413 
414 	if (!container)
415 		return -EINVAL;
416 
417 #ifdef CONFIG_X86_32
418 	get_bsp_sig();
419 	cont	= (unsigned long)container;
420 	cont_va = __va(container);
421 #else
422 	/*
423 	 * We need the physical address of the container for both bitness since
424 	 * boot_params.hdr.ramdisk_image is a physical address.
425 	 */
426 	cont    = __pa(container);
427 	cont_va = container;
428 #endif
429 
430 	/*
431 	 * Take into account the fact that the ramdisk might get relocated and
432 	 * therefore we need to recompute the container's position in virtual
433 	 * memory space.
434 	 */
435 	if (relocated_ramdisk)
436 		container = (u8 *)(__va(relocated_ramdisk) +
437 			     (cont - boot_params.hdr.ramdisk_image));
438 	else
439 		container = cont_va;
440 
441 	/* Add CONFIG_RANDOMIZE_MEMORY offset. */
442 	container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
443 
444 	eax   = cpuid_eax(0x00000001);
445 	eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
446 
447 	ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
448 	if (ret != UCODE_OK)
449 		retval = -EINVAL;
450 
451 	/*
452 	 * This will be freed any msec now, stash patches for the current
453 	 * family and switch to patch cache for cpu hotplug, etc later.
454 	 */
455 	container = NULL;
456 	container_size = 0;
457 
458 	return retval;
459 }
460 
461 void reload_ucode_amd(void)
462 {
463 	struct microcode_amd *mc;
464 	u32 rev;
465 
466 	/*
467 	 * early==false because this is a syscore ->resume path and by
468 	 * that time paging is long enabled.
469 	 */
470 	if (check_current_patch_level(&rev, false))
471 		return;
472 
473 	mc = (struct microcode_amd *)amd_ucode_patch;
474 
475 	if (mc && rev < mc->hdr.patch_id) {
476 		if (!__apply_microcode_amd(mc)) {
477 			ucode_new_rev = mc->hdr.patch_id;
478 			pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
479 		}
480 	}
481 }
482 static u16 __find_equiv_id(unsigned int cpu)
483 {
484 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
485 	return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
486 }
487 
488 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
489 {
490 	int i = 0;
491 
492 	BUG_ON(!equiv_cpu_table);
493 
494 	while (equiv_cpu_table[i].equiv_cpu != 0) {
495 		if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
496 			return equiv_cpu_table[i].installed_cpu;
497 		i++;
498 	}
499 	return 0;
500 }
501 
502 /*
503  * a small, trivial cache of per-family ucode patches
504  */
505 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
506 {
507 	struct ucode_patch *p;
508 
509 	list_for_each_entry(p, &pcache, plist)
510 		if (p->equiv_cpu == equiv_cpu)
511 			return p;
512 	return NULL;
513 }
514 
515 static void update_cache(struct ucode_patch *new_patch)
516 {
517 	struct ucode_patch *p;
518 
519 	list_for_each_entry(p, &pcache, plist) {
520 		if (p->equiv_cpu == new_patch->equiv_cpu) {
521 			if (p->patch_id >= new_patch->patch_id)
522 				/* we already have the latest patch */
523 				return;
524 
525 			list_replace(&p->plist, &new_patch->plist);
526 			kfree(p->data);
527 			kfree(p);
528 			return;
529 		}
530 	}
531 	/* no patch found, add it */
532 	list_add_tail(&new_patch->plist, &pcache);
533 }
534 
535 static void free_cache(void)
536 {
537 	struct ucode_patch *p, *tmp;
538 
539 	list_for_each_entry_safe(p, tmp, &pcache, plist) {
540 		__list_del(p->plist.prev, p->plist.next);
541 		kfree(p->data);
542 		kfree(p);
543 	}
544 }
545 
546 static struct ucode_patch *find_patch(unsigned int cpu)
547 {
548 	u16 equiv_id;
549 
550 	equiv_id = __find_equiv_id(cpu);
551 	if (!equiv_id)
552 		return NULL;
553 
554 	return cache_find_patch(equiv_id);
555 }
556 
557 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
558 {
559 	struct cpuinfo_x86 *c = &cpu_data(cpu);
560 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
561 	struct ucode_patch *p;
562 
563 	csig->sig = cpuid_eax(0x00000001);
564 	csig->rev = c->microcode;
565 
566 	/*
567 	 * a patch could have been loaded early, set uci->mc so that
568 	 * mc_bp_resume() can call apply_microcode()
569 	 */
570 	p = find_patch(cpu);
571 	if (p && (p->patch_id == csig->rev))
572 		uci->mc = p->data;
573 
574 	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
575 
576 	return 0;
577 }
578 
579 static unsigned int verify_patch_size(u8 family, u32 patch_size,
580 				      unsigned int size)
581 {
582 	u32 max_size;
583 
584 #define F1XH_MPB_MAX_SIZE 2048
585 #define F14H_MPB_MAX_SIZE 1824
586 #define F15H_MPB_MAX_SIZE 4096
587 #define F16H_MPB_MAX_SIZE 3458
588 
589 	switch (family) {
590 	case 0x14:
591 		max_size = F14H_MPB_MAX_SIZE;
592 		break;
593 	case 0x15:
594 		max_size = F15H_MPB_MAX_SIZE;
595 		break;
596 	case 0x16:
597 		max_size = F16H_MPB_MAX_SIZE;
598 		break;
599 	default:
600 		max_size = F1XH_MPB_MAX_SIZE;
601 		break;
602 	}
603 
604 	if (patch_size > min_t(u32, size, max_size)) {
605 		pr_err("patch size mismatch\n");
606 		return 0;
607 	}
608 
609 	return patch_size;
610 }
611 
612 /*
613  * Those patch levels cannot be updated to newer ones and thus should be final.
614  */
615 static u32 final_levels[] = {
616 	0x01000098,
617 	0x0100009f,
618 	0x010000af,
619 	0, /* T-101 terminator */
620 };
621 
622 /*
623  * Check the current patch level on this CPU.
624  *
625  * @rev: Use it to return the patch level. It is set to 0 in the case of
626  * error.
627  *
628  * Returns:
629  *  - true: if update should stop
630  *  - false: otherwise
631  */
632 bool check_current_patch_level(u32 *rev, bool early)
633 {
634 	u32 lvl, dummy, i;
635 	bool ret = false;
636 	u32 *levels;
637 
638 	native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
639 
640 	if (IS_ENABLED(CONFIG_X86_32) && early)
641 		levels = (u32 *)__pa_nodebug(&final_levels);
642 	else
643 		levels = final_levels;
644 
645 	for (i = 0; levels[i]; i++) {
646 		if (lvl == levels[i]) {
647 			lvl = 0;
648 			ret = true;
649 			break;
650 		}
651 	}
652 
653 	if (rev)
654 		*rev = lvl;
655 
656 	return ret;
657 }
658 
659 int __apply_microcode_amd(struct microcode_amd *mc_amd)
660 {
661 	u32 rev, dummy;
662 
663 	native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
664 
665 	/* verify patch application was successful */
666 	native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
667 	if (rev != mc_amd->hdr.patch_id)
668 		return -1;
669 
670 	return 0;
671 }
672 
673 int apply_microcode_amd(int cpu)
674 {
675 	struct cpuinfo_x86 *c = &cpu_data(cpu);
676 	struct microcode_amd *mc_amd;
677 	struct ucode_cpu_info *uci;
678 	struct ucode_patch *p;
679 	u32 rev;
680 
681 	BUG_ON(raw_smp_processor_id() != cpu);
682 
683 	uci = ucode_cpu_info + cpu;
684 
685 	p = find_patch(cpu);
686 	if (!p)
687 		return 0;
688 
689 	mc_amd  = p->data;
690 	uci->mc = p->data;
691 
692 	if (check_current_patch_level(&rev, false))
693 		return -1;
694 
695 	/* need to apply patch? */
696 	if (rev >= mc_amd->hdr.patch_id) {
697 		c->microcode = rev;
698 		uci->cpu_sig.rev = rev;
699 		return 0;
700 	}
701 
702 	if (__apply_microcode_amd(mc_amd)) {
703 		pr_err("CPU%d: update failed for patch_level=0x%08x\n",
704 			cpu, mc_amd->hdr.patch_id);
705 		return -1;
706 	}
707 	pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
708 		mc_amd->hdr.patch_id);
709 
710 	uci->cpu_sig.rev = mc_amd->hdr.patch_id;
711 	c->microcode = mc_amd->hdr.patch_id;
712 
713 	return 0;
714 }
715 
716 static int install_equiv_cpu_table(const u8 *buf)
717 {
718 	unsigned int *ibuf = (unsigned int *)buf;
719 	unsigned int type = ibuf[1];
720 	unsigned int size = ibuf[2];
721 
722 	if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
723 		pr_err("empty section/"
724 		       "invalid type field in container file section header\n");
725 		return -EINVAL;
726 	}
727 
728 	equiv_cpu_table = vmalloc(size);
729 	if (!equiv_cpu_table) {
730 		pr_err("failed to allocate equivalent CPU table\n");
731 		return -ENOMEM;
732 	}
733 
734 	memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
735 
736 	/* add header length */
737 	return size + CONTAINER_HDR_SZ;
738 }
739 
740 static void free_equiv_cpu_table(void)
741 {
742 	vfree(equiv_cpu_table);
743 	equiv_cpu_table = NULL;
744 }
745 
746 static void cleanup(void)
747 {
748 	free_equiv_cpu_table();
749 	free_cache();
750 }
751 
752 /*
753  * We return the current size even if some of the checks failed so that
754  * we can skip over the next patch. If we return a negative value, we
755  * signal a grave error like a memory allocation has failed and the
756  * driver cannot continue functioning normally. In such cases, we tear
757  * down everything we've used up so far and exit.
758  */
759 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
760 {
761 	struct microcode_header_amd *mc_hdr;
762 	struct ucode_patch *patch;
763 	unsigned int patch_size, crnt_size, ret;
764 	u32 proc_fam;
765 	u16 proc_id;
766 
767 	patch_size  = *(u32 *)(fw + 4);
768 	crnt_size   = patch_size + SECTION_HDR_SIZE;
769 	mc_hdr	    = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
770 	proc_id	    = mc_hdr->processor_rev_id;
771 
772 	proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
773 	if (!proc_fam) {
774 		pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
775 		return crnt_size;
776 	}
777 
778 	/* check if patch is for the current family */
779 	proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
780 	if (proc_fam != family)
781 		return crnt_size;
782 
783 	if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
784 		pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
785 			mc_hdr->patch_id);
786 		return crnt_size;
787 	}
788 
789 	ret = verify_patch_size(family, patch_size, leftover);
790 	if (!ret) {
791 		pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
792 		return crnt_size;
793 	}
794 
795 	patch = kzalloc(sizeof(*patch), GFP_KERNEL);
796 	if (!patch) {
797 		pr_err("Patch allocation failure.\n");
798 		return -EINVAL;
799 	}
800 
801 	patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
802 	if (!patch->data) {
803 		pr_err("Patch data allocation failure.\n");
804 		kfree(patch);
805 		return -EINVAL;
806 	}
807 
808 	INIT_LIST_HEAD(&patch->plist);
809 	patch->patch_id  = mc_hdr->patch_id;
810 	patch->equiv_cpu = proc_id;
811 
812 	pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
813 		 __func__, patch->patch_id, proc_id);
814 
815 	/* ... and add to cache. */
816 	update_cache(patch);
817 
818 	return crnt_size;
819 }
820 
821 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
822 					     size_t size)
823 {
824 	enum ucode_state ret = UCODE_ERROR;
825 	unsigned int leftover;
826 	u8 *fw = (u8 *)data;
827 	int crnt_size = 0;
828 	int offset;
829 
830 	offset = install_equiv_cpu_table(data);
831 	if (offset < 0) {
832 		pr_err("failed to create equivalent cpu table\n");
833 		return ret;
834 	}
835 	fw += offset;
836 	leftover = size - offset;
837 
838 	if (*(u32 *)fw != UCODE_UCODE_TYPE) {
839 		pr_err("invalid type field in container file section header\n");
840 		free_equiv_cpu_table();
841 		return ret;
842 	}
843 
844 	while (leftover) {
845 		crnt_size = verify_and_add_patch(family, fw, leftover);
846 		if (crnt_size < 0)
847 			return ret;
848 
849 		fw	 += crnt_size;
850 		leftover -= crnt_size;
851 	}
852 
853 	return UCODE_OK;
854 }
855 
856 enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
857 {
858 	enum ucode_state ret;
859 
860 	/* free old equiv table */
861 	free_equiv_cpu_table();
862 
863 	ret = __load_microcode_amd(family, data, size);
864 
865 	if (ret != UCODE_OK)
866 		cleanup();
867 
868 #ifdef CONFIG_X86_32
869 	/* save BSP's matching patch for early load */
870 	if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
871 		struct ucode_patch *p = find_patch(cpu);
872 		if (p) {
873 			memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
874 			memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
875 							       PATCH_MAX_SIZE));
876 		}
877 	}
878 #endif
879 	return ret;
880 }
881 
882 /*
883  * AMD microcode firmware naming convention, up to family 15h they are in
884  * the legacy file:
885  *
886  *    amd-ucode/microcode_amd.bin
887  *
888  * This legacy file is always smaller than 2K in size.
889  *
890  * Beginning with family 15h, they are in family-specific firmware files:
891  *
892  *    amd-ucode/microcode_amd_fam15h.bin
893  *    amd-ucode/microcode_amd_fam16h.bin
894  *    ...
895  *
896  * These might be larger than 2K.
897  */
898 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
899 					      bool refresh_fw)
900 {
901 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
902 	struct cpuinfo_x86 *c = &cpu_data(cpu);
903 	enum ucode_state ret = UCODE_NFOUND;
904 	const struct firmware *fw;
905 
906 	/* reload ucode container only on the boot cpu */
907 	if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
908 		return UCODE_OK;
909 
910 	if (c->x86 >= 0x15)
911 		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
912 
913 	if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
914 		pr_debug("failed to load file %s\n", fw_name);
915 		goto out;
916 	}
917 
918 	ret = UCODE_ERROR;
919 	if (*(u32 *)fw->data != UCODE_MAGIC) {
920 		pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
921 		goto fw_release;
922 	}
923 
924 	ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
925 
926  fw_release:
927 	release_firmware(fw);
928 
929  out:
930 	return ret;
931 }
932 
933 static enum ucode_state
934 request_microcode_user(int cpu, const void __user *buf, size_t size)
935 {
936 	return UCODE_ERROR;
937 }
938 
939 static void microcode_fini_cpu_amd(int cpu)
940 {
941 	struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
942 
943 	uci->mc = NULL;
944 }
945 
946 static struct microcode_ops microcode_amd_ops = {
947 	.request_microcode_user           = request_microcode_user,
948 	.request_microcode_fw             = request_microcode_amd,
949 	.collect_cpu_info                 = collect_cpu_info_amd,
950 	.apply_microcode                  = apply_microcode_amd,
951 	.microcode_fini_cpu               = microcode_fini_cpu_amd,
952 };
953 
954 struct microcode_ops * __init init_amd_microcode(void)
955 {
956 	struct cpuinfo_x86 *c = &boot_cpu_data;
957 
958 	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
959 		pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
960 		return NULL;
961 	}
962 
963 	if (ucode_new_rev)
964 		pr_info_once("microcode updated early to new patch_level=0x%08x\n",
965 			     ucode_new_rev);
966 
967 	return &microcode_amd_ops;
968 }
969 
970 void __exit exit_amd_microcode(void)
971 {
972 	cleanup();
973 }
974