1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * CPU Microcode Update Driver for Linux
4 *
5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
6 * 2006 Shaohua Li <shaohua.li@intel.com>
7 * 2013-2016 Borislav Petkov <bp@alien8.de>
8 *
9 * X86 CPU microcode early update for Linux:
10 *
11 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
12 * H Peter Anvin" <hpa@zytor.com>
13 * (C) 2015 Borislav Petkov <bp@alien8.de>
14 *
15 * This driver allows to upgrade microcode on x86 processors.
16 */
17
18 #define pr_fmt(fmt) "microcode: " fmt
19
20 #include <linux/platform_device.h>
21 #include <linux/stop_machine.h>
22 #include <linux/syscore_ops.h>
23 #include <linux/miscdevice.h>
24 #include <linux/capability.h>
25 #include <linux/firmware.h>
26 #include <linux/cpumask.h>
27 #include <linux/kernel.h>
28 #include <linux/delay.h>
29 #include <linux/mutex.h>
30 #include <linux/cpu.h>
31 #include <linux/nmi.h>
32 #include <linux/fs.h>
33 #include <linux/mm.h>
34
35 #include <asm/apic.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/perf_event.h>
38 #include <asm/processor.h>
39 #include <asm/cmdline.h>
40 #include <asm/setup.h>
41
42 #include "internal.h"
43
44 #define DRIVER_VERSION "2.2"
45
46 static struct microcode_ops *microcode_ops;
47 static bool dis_ucode_ldr = false;
48
49 bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
50 module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
51
52 /*
53 * Synchronization.
54 *
55 * All non cpu-hotplug-callback call sites use:
56 *
57 * - cpus_read_lock/unlock() to synchronize with
58 * the cpu-hotplug-callback call sites.
59 *
60 * We guarantee that only a single cpu is being
61 * updated at any particular moment of time.
62 */
63 struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
64
65 struct cpu_info_ctx {
66 struct cpu_signature *cpu_sig;
67 int err;
68 };
69
70 /*
71 * Those patch levels cannot be updated to newer ones and thus should be final.
72 */
73 static u32 final_levels[] = {
74 0x01000098,
75 0x0100009f,
76 0x010000af,
77 0, /* T-101 terminator */
78 };
79
80 struct early_load_data early_data;
81
82 /*
83 * Check the current patch level on this CPU.
84 *
85 * Returns:
86 * - true: if update should stop
87 * - false: otherwise
88 */
amd_check_current_patch_level(void)89 static bool amd_check_current_patch_level(void)
90 {
91 u32 lvl, dummy, i;
92 u32 *levels;
93
94 if (x86_cpuid_vendor() != X86_VENDOR_AMD)
95 return false;
96
97 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
98
99 levels = final_levels;
100
101 for (i = 0; levels[i]; i++) {
102 if (lvl == levels[i])
103 return true;
104 }
105 return false;
106 }
107
microcode_loader_disabled(void)108 bool __init microcode_loader_disabled(void)
109 {
110 if (dis_ucode_ldr)
111 return true;
112
113 /*
114 * Disable when:
115 *
116 * 1) The CPU does not support CPUID.
117 *
118 * 2) Bit 31 in CPUID[1]:ECX is clear
119 * The bit is reserved for hypervisor use. This is still not
120 * completely accurate as XEN PV guests don't see that CPUID bit
121 * set, but that's good enough as they don't land on the BSP
122 * path anyway.
123 *
124 * 3) Certain AMD patch levels are not allowed to be
125 * overwritten.
126 */
127 if (!have_cpuid_p() ||
128 native_cpuid_ecx(1) & BIT(31) ||
129 amd_check_current_patch_level())
130 dis_ucode_ldr = true;
131
132 return dis_ucode_ldr;
133 }
134
load_ucode_bsp(void)135 void __init load_ucode_bsp(void)
136 {
137 unsigned int cpuid_1_eax;
138 bool intel = true;
139
140 if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
141 dis_ucode_ldr = true;
142
143 if (microcode_loader_disabled())
144 return;
145
146 cpuid_1_eax = native_cpuid_eax(1);
147
148 switch (x86_cpuid_vendor()) {
149 case X86_VENDOR_INTEL:
150 if (x86_family(cpuid_1_eax) < 6)
151 return;
152 break;
153
154 case X86_VENDOR_AMD:
155 if (x86_family(cpuid_1_eax) < 0x10)
156 return;
157 intel = false;
158 break;
159
160 default:
161 return;
162 }
163
164 if (intel)
165 load_ucode_intel_bsp(&early_data);
166 else
167 load_ucode_amd_bsp(&early_data, cpuid_1_eax);
168 }
169
load_ucode_ap(void)170 void load_ucode_ap(void)
171 {
172 unsigned int cpuid_1_eax;
173
174 /*
175 * Can't use microcode_loader_disabled() here - .init section
176 * hell. It doesn't have to either - the BSP variant must've
177 * parsed cmdline already anyway.
178 */
179 if (dis_ucode_ldr)
180 return;
181
182 cpuid_1_eax = native_cpuid_eax(1);
183
184 switch (x86_cpuid_vendor()) {
185 case X86_VENDOR_INTEL:
186 if (x86_family(cpuid_1_eax) >= 6)
187 load_ucode_intel_ap();
188 break;
189 case X86_VENDOR_AMD:
190 if (x86_family(cpuid_1_eax) >= 0x10)
191 load_ucode_amd_ap(cpuid_1_eax);
192 break;
193 default:
194 break;
195 }
196 }
197
find_microcode_in_initrd(const char * path)198 struct cpio_data __init find_microcode_in_initrd(const char *path)
199 {
200 #ifdef CONFIG_BLK_DEV_INITRD
201 unsigned long start = 0;
202 size_t size;
203
204 #ifdef CONFIG_X86_32
205 size = boot_params.hdr.ramdisk_size;
206 /* Early load on BSP has a temporary mapping. */
207 if (size)
208 start = initrd_start_early;
209
210 #else /* CONFIG_X86_64 */
211 size = (unsigned long)boot_params.ext_ramdisk_size << 32;
212 size |= boot_params.hdr.ramdisk_size;
213
214 if (size) {
215 start = (unsigned long)boot_params.ext_ramdisk_image << 32;
216 start |= boot_params.hdr.ramdisk_image;
217 start += PAGE_OFFSET;
218 }
219 #endif
220
221 /*
222 * Fixup the start address: after reserve_initrd() runs, initrd_start
223 * has the virtual address of the beginning of the initrd. It also
224 * possibly relocates the ramdisk. In either case, initrd_start contains
225 * the updated address so use that instead.
226 */
227 if (initrd_start)
228 start = initrd_start;
229
230 return find_cpio_data(path, (void *)start, size, NULL);
231 #else /* !CONFIG_BLK_DEV_INITRD */
232 return (struct cpio_data){ NULL, 0, "" };
233 #endif
234 }
235
reload_early_microcode(unsigned int cpu)236 static void reload_early_microcode(unsigned int cpu)
237 {
238 int vendor, family;
239
240 vendor = x86_cpuid_vendor();
241 family = x86_cpuid_family();
242
243 switch (vendor) {
244 case X86_VENDOR_INTEL:
245 if (family >= 6)
246 reload_ucode_intel();
247 break;
248 case X86_VENDOR_AMD:
249 if (family >= 0x10)
250 reload_ucode_amd(cpu);
251 break;
252 default:
253 break;
254 }
255 }
256
257 /* fake device for request_firmware */
258 static struct platform_device *microcode_pdev;
259
260 #ifdef CONFIG_MICROCODE_LATE_LOADING
261 /*
262 * Late loading dance. Why the heavy-handed stomp_machine effort?
263 *
264 * - HT siblings must be idle and not execute other code while the other sibling
265 * is loading microcode in order to avoid any negative interactions caused by
266 * the loading.
267 *
268 * - In addition, microcode update on the cores must be serialized until this
269 * requirement can be relaxed in the future. Right now, this is conservative
270 * and good.
271 */
272 enum sibling_ctrl {
273 /* Spinwait with timeout */
274 SCTRL_WAIT,
275 /* Invoke the microcode_apply() callback */
276 SCTRL_APPLY,
277 /* Proceed without invoking the microcode_apply() callback */
278 SCTRL_DONE,
279 };
280
281 struct microcode_ctrl {
282 enum sibling_ctrl ctrl;
283 enum ucode_state result;
284 unsigned int ctrl_cpu;
285 bool nmi_enabled;
286 };
287
288 DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
289 static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
290 static atomic_t late_cpus_in, offline_in_nmi;
291 static unsigned int loops_per_usec;
292 static cpumask_t cpu_offline_mask;
293
wait_for_cpus(atomic_t * cnt)294 static noinstr bool wait_for_cpus(atomic_t *cnt)
295 {
296 unsigned int timeout, loops;
297
298 WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0);
299
300 for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
301 if (!raw_atomic_read(cnt))
302 return true;
303
304 for (loops = 0; loops < loops_per_usec; loops++)
305 cpu_relax();
306
307 /* If invoked directly, tickle the NMI watchdog */
308 if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
309 instrumentation_begin();
310 touch_nmi_watchdog();
311 instrumentation_end();
312 }
313 }
314 /* Prevent the late comers from making progress and let them time out */
315 raw_atomic_inc(cnt);
316 return false;
317 }
318
wait_for_ctrl(void)319 static noinstr bool wait_for_ctrl(void)
320 {
321 unsigned int timeout, loops;
322
323 for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
324 if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT)
325 return true;
326
327 for (loops = 0; loops < loops_per_usec; loops++)
328 cpu_relax();
329
330 /* If invoked directly, tickle the NMI watchdog */
331 if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
332 instrumentation_begin();
333 touch_nmi_watchdog();
334 instrumentation_end();
335 }
336 }
337 return false;
338 }
339
340 /*
341 * Protected against instrumentation up to the point where the primary
342 * thread completed the update. See microcode_nmi_handler() for details.
343 */
load_secondary_wait(unsigned int ctrl_cpu)344 static noinstr bool load_secondary_wait(unsigned int ctrl_cpu)
345 {
346 /* Initial rendezvous to ensure that all CPUs have arrived */
347 if (!wait_for_cpus(&late_cpus_in)) {
348 raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
349 return false;
350 }
351
352 /*
353 * Wait for primary threads to complete. If one of them hangs due
354 * to the update, there is no way out. This is non-recoverable
355 * because the CPU might hold locks or resources and confuse the
356 * scheduler, watchdogs etc. There is no way to safely evacuate the
357 * machine.
358 */
359 if (wait_for_ctrl())
360 return true;
361
362 instrumentation_begin();
363 panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu);
364 instrumentation_end();
365 }
366
367 /*
368 * Protected against instrumentation up to the point where the primary
369 * thread completed the update. See microcode_nmi_handler() for details.
370 */
load_secondary(unsigned int cpu)371 static noinstr void load_secondary(unsigned int cpu)
372 {
373 unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu);
374 enum ucode_state ret;
375
376 if (!load_secondary_wait(ctrl_cpu)) {
377 instrumentation_begin();
378 pr_err_once("load: %d CPUs timed out\n",
379 atomic_read(&late_cpus_in) - 1);
380 instrumentation_end();
381 return;
382 }
383
384 /* Primary thread completed. Allow to invoke instrumentable code */
385 instrumentation_begin();
386 /*
387 * If the primary succeeded then invoke the apply() callback,
388 * otherwise copy the state from the primary thread.
389 */
390 if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY)
391 ret = microcode_ops->apply_microcode(cpu);
392 else
393 ret = per_cpu(ucode_ctrl.result, ctrl_cpu);
394
395 this_cpu_write(ucode_ctrl.result, ret);
396 this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
397 instrumentation_end();
398 }
399
__load_primary(unsigned int cpu)400 static void __load_primary(unsigned int cpu)
401 {
402 struct cpumask *secondaries = topology_sibling_cpumask(cpu);
403 enum sibling_ctrl ctrl;
404 enum ucode_state ret;
405 unsigned int sibling;
406
407 /* Initial rendezvous to ensure that all CPUs have arrived */
408 if (!wait_for_cpus(&late_cpus_in)) {
409 this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
410 pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1);
411 return;
412 }
413
414 ret = microcode_ops->apply_microcode(cpu);
415 this_cpu_write(ucode_ctrl.result, ret);
416 this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
417
418 /*
419 * If the update was successful, let the siblings run the apply()
420 * callback. If not, tell them it's done. This also covers the
421 * case where the CPU has uniform loading at package or system
422 * scope implemented but does not advertise it.
423 */
424 if (ret == UCODE_UPDATED || ret == UCODE_OK)
425 ctrl = SCTRL_APPLY;
426 else
427 ctrl = SCTRL_DONE;
428
429 for_each_cpu(sibling, secondaries) {
430 if (sibling != cpu)
431 per_cpu(ucode_ctrl.ctrl, sibling) = ctrl;
432 }
433 }
434
kick_offline_cpus(unsigned int nr_offl)435 static bool kick_offline_cpus(unsigned int nr_offl)
436 {
437 unsigned int cpu, timeout;
438
439 for_each_cpu(cpu, &cpu_offline_mask) {
440 /* Enable the rendezvous handler and send NMI */
441 per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
442 apic_send_nmi_to_offline_cpu(cpu);
443 }
444
445 /* Wait for them to arrive */
446 for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) {
447 if (atomic_read(&offline_in_nmi) == nr_offl)
448 return true;
449 udelay(1);
450 }
451 /* Let the others time out */
452 return false;
453 }
454
release_offline_cpus(void)455 static void release_offline_cpus(void)
456 {
457 unsigned int cpu;
458
459 for_each_cpu(cpu, &cpu_offline_mask)
460 per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE;
461 }
462
load_primary(unsigned int cpu)463 static void load_primary(unsigned int cpu)
464 {
465 unsigned int nr_offl = cpumask_weight(&cpu_offline_mask);
466 bool proceed = true;
467
468 /* Kick soft-offlined SMT siblings if required */
469 if (!cpu && nr_offl)
470 proceed = kick_offline_cpus(nr_offl);
471
472 /* If the soft-offlined CPUs did not respond, abort */
473 if (proceed)
474 __load_primary(cpu);
475
476 /* Unconditionally release soft-offlined SMT siblings if required */
477 if (!cpu && nr_offl)
478 release_offline_cpus();
479 }
480
481 /*
482 * Minimal stub rendezvous handler for soft-offlined CPUs which participate
483 * in the NMI rendezvous to protect against a concurrent NMI on affected
484 * CPUs.
485 */
microcode_offline_nmi_handler(void)486 void noinstr microcode_offline_nmi_handler(void)
487 {
488 if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
489 return;
490 raw_cpu_write(ucode_ctrl.nmi_enabled, false);
491 raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE);
492 raw_atomic_inc(&offline_in_nmi);
493 wait_for_ctrl();
494 }
495
microcode_update_handler(void)496 static noinstr bool microcode_update_handler(void)
497 {
498 unsigned int cpu = raw_smp_processor_id();
499
500 if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) {
501 instrumentation_begin();
502 load_primary(cpu);
503 instrumentation_end();
504 } else {
505 load_secondary(cpu);
506 }
507
508 instrumentation_begin();
509 touch_nmi_watchdog();
510 instrumentation_end();
511
512 return true;
513 }
514
515 /*
516 * Protection against instrumentation is required for CPUs which are not
517 * safe against an NMI which is delivered to the secondary SMT sibling
518 * while the primary thread updates the microcode. Instrumentation can end
519 * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI
520 * which is the opposite of what the NMI rendezvous is trying to achieve.
521 *
522 * The primary thread is safe versus instrumentation as the actual
523 * microcode update handles this correctly. It's only the sibling code
524 * path which must be NMI safe until the primary thread completed the
525 * update.
526 */
microcode_nmi_handler(void)527 bool noinstr microcode_nmi_handler(void)
528 {
529 if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
530 return false;
531
532 raw_cpu_write(ucode_ctrl.nmi_enabled, false);
533 return microcode_update_handler();
534 }
535
load_cpus_stopped(void * unused)536 static int load_cpus_stopped(void *unused)
537 {
538 if (microcode_ops->use_nmi) {
539 /* Enable the NMI handler and raise NMI */
540 this_cpu_write(ucode_ctrl.nmi_enabled, true);
541 apic->send_IPI(smp_processor_id(), NMI_VECTOR);
542 } else {
543 /* Just invoke the handler directly */
544 microcode_update_handler();
545 }
546 return 0;
547 }
548
load_late_stop_cpus(bool is_safe)549 static int load_late_stop_cpus(bool is_safe)
550 {
551 unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0;
552 unsigned int nr_offl, offline = 0;
553 int old_rev = boot_cpu_data.microcode;
554 struct cpuinfo_x86 prev_info;
555
556 if (!is_safe) {
557 pr_err("Late microcode loading without minimal revision check.\n");
558 pr_err("You should switch to early loading, if possible.\n");
559 }
560
561 atomic_set(&late_cpus_in, num_online_cpus());
562 atomic_set(&offline_in_nmi, 0);
563 loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000);
564
565 /*
566 * Take a snapshot before the microcode update in order to compare and
567 * check whether any bits changed after an update.
568 */
569 store_cpu_caps(&prev_info);
570
571 if (microcode_ops->use_nmi)
572 static_branch_enable_cpuslocked(µcode_nmi_handler_enable);
573
574 stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask);
575
576 if (microcode_ops->use_nmi)
577 static_branch_disable_cpuslocked(µcode_nmi_handler_enable);
578
579 /* Analyze the results */
580 for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
581 switch (per_cpu(ucode_ctrl.result, cpu)) {
582 case UCODE_UPDATED: updated++; break;
583 case UCODE_TIMEOUT: timedout++; break;
584 case UCODE_OK: siblings++; break;
585 case UCODE_OFFLINE: offline++; break;
586 default: failed++; break;
587 }
588 }
589
590 if (microcode_ops->finalize_late_load)
591 microcode_ops->finalize_late_load(!updated);
592
593 if (!updated) {
594 /* Nothing changed. */
595 if (!failed && !timedout)
596 return 0;
597
598 nr_offl = cpumask_weight(&cpu_offline_mask);
599 if (offline < nr_offl) {
600 pr_warn("%u offline siblings did not respond.\n",
601 nr_offl - atomic_read(&offline_in_nmi));
602 return -EIO;
603 }
604 pr_err("update failed: %u CPUs failed %u CPUs timed out\n",
605 failed, timedout);
606 return -EIO;
607 }
608
609 if (!is_safe || failed || timedout)
610 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
611
612 pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings);
613 if (failed || timedout) {
614 pr_err("load incomplete. %u CPUs timed out or failed\n",
615 num_online_cpus() - (updated + siblings));
616 }
617 pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode);
618 microcode_check(&prev_info);
619
620 return updated + siblings == num_online_cpus() ? 0 : -EIO;
621 }
622
623 /*
624 * This function does two things:
625 *
626 * 1) Ensure that all required CPUs which are present and have been booted
627 * once are online.
628 *
629 * To pass this check, all primary threads must be online.
630 *
631 * If the microcode load is not safe against NMI then all SMT threads
632 * must be online as well because they still react to NMIs when they are
633 * soft-offlined and parked in one of the play_dead() variants. So if a
634 * NMI hits while the primary thread updates the microcode the resulting
635 * behaviour is undefined. The default play_dead() implementation on
636 * modern CPUs uses MWAIT, which is also not guaranteed to be safe
637 * against a microcode update which affects MWAIT.
638 *
639 * As soft-offlined CPUs still react on NMIs, the SMT sibling
640 * restriction can be lifted when the vendor driver signals to use NMI
641 * for rendezvous and the APIC provides a mechanism to send an NMI to a
642 * soft-offlined CPU. The soft-offlined CPUs are then able to
643 * participate in the rendezvous in a trivial stub handler.
644 *
645 * 2) Initialize the per CPU control structure and create a cpumask
646 * which contains "offline"; secondary threads, so they can be handled
647 * correctly by a control CPU.
648 */
setup_cpus(void)649 static bool setup_cpus(void)
650 {
651 struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, };
652 bool allow_smt_offline;
653 unsigned int cpu;
654
655 allow_smt_offline = microcode_ops->nmi_safe ||
656 (microcode_ops->use_nmi && apic->nmi_to_offline_cpu);
657
658 cpumask_clear(&cpu_offline_mask);
659
660 for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
661 /*
662 * Offline CPUs sit in one of the play_dead() functions
663 * with interrupts disabled, but they still react on NMIs
664 * and execute arbitrary code. Also MWAIT being updated
665 * while the offline CPU sits there is not necessarily safe
666 * on all CPU variants.
667 *
668 * Mark them in the offline_cpus mask which will be handled
669 * by CPU0 later in the update process.
670 *
671 * Ensure that the primary thread is online so that it is
672 * guaranteed that all cores are updated.
673 */
674 if (!cpu_online(cpu)) {
675 if (topology_is_primary_thread(cpu) || !allow_smt_offline) {
676 pr_err("CPU %u not online, loading aborted\n", cpu);
677 return false;
678 }
679 cpumask_set_cpu(cpu, &cpu_offline_mask);
680 per_cpu(ucode_ctrl, cpu) = ctrl;
681 continue;
682 }
683
684 /*
685 * Initialize the per CPU state. This is core scope for now,
686 * but prepared to take package or system scope into account.
687 */
688 ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu));
689 per_cpu(ucode_ctrl, cpu) = ctrl;
690 }
691 return true;
692 }
693
load_late_locked(void)694 static int load_late_locked(void)
695 {
696 if (!setup_cpus())
697 return -EBUSY;
698
699 switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) {
700 case UCODE_NEW:
701 return load_late_stop_cpus(false);
702 case UCODE_NEW_SAFE:
703 return load_late_stop_cpus(true);
704 case UCODE_NFOUND:
705 return -ENOENT;
706 case UCODE_OK:
707 return 0;
708 default:
709 return -EBADFD;
710 }
711 }
712
reload_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)713 static ssize_t reload_store(struct device *dev,
714 struct device_attribute *attr,
715 const char *buf, size_t size)
716 {
717 unsigned long val;
718 ssize_t ret;
719
720 ret = kstrtoul(buf, 0, &val);
721 if (ret || val != 1)
722 return -EINVAL;
723
724 cpus_read_lock();
725 ret = load_late_locked();
726 cpus_read_unlock();
727
728 return ret ? : size;
729 }
730
731 static DEVICE_ATTR_WO(reload);
732 #endif
733
version_show(struct device * dev,struct device_attribute * attr,char * buf)734 static ssize_t version_show(struct device *dev,
735 struct device_attribute *attr, char *buf)
736 {
737 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
738
739 return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
740 }
741
processor_flags_show(struct device * dev,struct device_attribute * attr,char * buf)742 static ssize_t processor_flags_show(struct device *dev,
743 struct device_attribute *attr, char *buf)
744 {
745 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
746
747 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
748 }
749
750 static DEVICE_ATTR_RO(version);
751 static DEVICE_ATTR_RO(processor_flags);
752
753 static struct attribute *mc_default_attrs[] = {
754 &dev_attr_version.attr,
755 &dev_attr_processor_flags.attr,
756 NULL
757 };
758
759 static const struct attribute_group mc_attr_group = {
760 .attrs = mc_default_attrs,
761 .name = "microcode",
762 };
763
microcode_fini_cpu(int cpu)764 static void microcode_fini_cpu(int cpu)
765 {
766 if (microcode_ops->microcode_fini_cpu)
767 microcode_ops->microcode_fini_cpu(cpu);
768 }
769
770 /**
771 * microcode_bsp_resume - Update boot CPU microcode during resume.
772 */
microcode_bsp_resume(void)773 void microcode_bsp_resume(void)
774 {
775 int cpu = smp_processor_id();
776 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
777
778 if (uci->mc)
779 microcode_ops->apply_microcode(cpu);
780 else
781 reload_early_microcode(cpu);
782 }
783
784 static struct syscore_ops mc_syscore_ops = {
785 .resume = microcode_bsp_resume,
786 };
787
mc_cpu_online(unsigned int cpu)788 static int mc_cpu_online(unsigned int cpu)
789 {
790 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
791 struct device *dev = get_cpu_device(cpu);
792
793 memset(uci, 0, sizeof(*uci));
794
795 microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig);
796 cpu_data(cpu).microcode = uci->cpu_sig.rev;
797 if (!cpu)
798 boot_cpu_data.microcode = uci->cpu_sig.rev;
799
800 if (sysfs_create_group(&dev->kobj, &mc_attr_group))
801 pr_err("Failed to create group for CPU%d\n", cpu);
802 return 0;
803 }
804
mc_cpu_down_prep(unsigned int cpu)805 static int mc_cpu_down_prep(unsigned int cpu)
806 {
807 struct device *dev = get_cpu_device(cpu);
808
809 microcode_fini_cpu(cpu);
810 sysfs_remove_group(&dev->kobj, &mc_attr_group);
811 return 0;
812 }
813
814 static struct attribute *cpu_root_microcode_attrs[] = {
815 #ifdef CONFIG_MICROCODE_LATE_LOADING
816 &dev_attr_reload.attr,
817 #endif
818 NULL
819 };
820
821 static const struct attribute_group cpu_root_microcode_group = {
822 .name = "microcode",
823 .attrs = cpu_root_microcode_attrs,
824 };
825
microcode_init(void)826 static int __init microcode_init(void)
827 {
828 struct device *dev_root;
829 struct cpuinfo_x86 *c = &boot_cpu_data;
830 int error;
831
832 if (microcode_loader_disabled())
833 return -EINVAL;
834
835 if (c->x86_vendor == X86_VENDOR_INTEL)
836 microcode_ops = init_intel_microcode();
837 else if (c->x86_vendor == X86_VENDOR_AMD)
838 microcode_ops = init_amd_microcode();
839 else
840 pr_err("no support for this CPU vendor\n");
841
842 if (!microcode_ops)
843 return -ENODEV;
844
845 pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev));
846
847 if (early_data.new_rev)
848 pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev);
849
850 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0);
851 if (IS_ERR(microcode_pdev))
852 return PTR_ERR(microcode_pdev);
853
854 dev_root = bus_get_dev_root(&cpu_subsys);
855 if (dev_root) {
856 error = sysfs_create_group(&dev_root->kobj, &cpu_root_microcode_group);
857 put_device(dev_root);
858 if (error) {
859 pr_err("Error creating microcode group!\n");
860 goto out_pdev;
861 }
862 }
863
864 register_syscore_ops(&mc_syscore_ops);
865 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
866 mc_cpu_online, mc_cpu_down_prep);
867
868 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
869
870 return 0;
871
872 out_pdev:
873 platform_device_unregister(microcode_pdev);
874 return error;
875
876 }
877 late_initcall(microcode_init);
878