1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MICROCODE_H 3 #define _ASM_X86_MICROCODE_H 4 5 #include <asm/cpu.h> 6 #include <linux/earlycpio.h> 7 #include <linux/initrd.h> 8 #include <asm/microcode_amd.h> 9 10 struct ucode_patch { 11 struct list_head plist; 12 void *data; /* Intel uses only this one */ 13 unsigned int size; 14 u32 patch_id; 15 u16 equiv_cpu; 16 }; 17 18 extern struct list_head microcode_cache; 19 20 struct cpu_signature { 21 unsigned int sig; 22 unsigned int pf; 23 unsigned int rev; 24 }; 25 26 struct device; 27 28 enum ucode_state { 29 UCODE_OK = 0, 30 UCODE_NEW, 31 UCODE_UPDATED, 32 UCODE_NFOUND, 33 UCODE_ERROR, 34 }; 35 36 struct microcode_ops { 37 enum ucode_state (*request_microcode_fw) (int cpu, struct device *); 38 39 void (*microcode_fini_cpu) (int cpu); 40 41 /* 42 * The generic 'microcode_core' part guarantees that 43 * the callbacks below run on a target cpu when they 44 * are being called. 45 * See also the "Synchronization" section in microcode_core.c. 46 */ 47 enum ucode_state (*apply_microcode) (int cpu); 48 int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); 49 }; 50 51 struct ucode_cpu_info { 52 struct cpu_signature cpu_sig; 53 void *mc; 54 }; 55 extern struct ucode_cpu_info ucode_cpu_info[]; 56 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); 57 58 #ifdef CONFIG_MICROCODE_INTEL 59 extern struct microcode_ops * __init init_intel_microcode(void); 60 #else 61 static inline struct microcode_ops * __init init_intel_microcode(void) 62 { 63 return NULL; 64 } 65 #endif /* CONFIG_MICROCODE_INTEL */ 66 67 #ifdef CONFIG_MICROCODE_AMD 68 extern struct microcode_ops * __init init_amd_microcode(void); 69 extern void __exit exit_amd_microcode(void); 70 #else 71 static inline struct microcode_ops * __init init_amd_microcode(void) 72 { 73 return NULL; 74 } 75 static inline void __exit exit_amd_microcode(void) {} 76 #endif 77 78 #define MAX_UCODE_COUNT 128 79 80 #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) 81 #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') 82 #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') 83 #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') 84 #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') 85 #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') 86 #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') 87 88 #define CPUID_IS(a, b, c, ebx, ecx, edx) \ 89 (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c)))) 90 91 /* 92 * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. 93 * x86_cpuid_vendor() gets vendor id for BSP. 94 * 95 * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify 96 * coding, we still use x86_cpuid_vendor() to get vendor id for AP. 97 * 98 * x86_cpuid_vendor() gets vendor information directly from CPUID. 99 */ 100 static inline int x86_cpuid_vendor(void) 101 { 102 u32 eax = 0x00000000; 103 u32 ebx, ecx = 0, edx; 104 105 native_cpuid(&eax, &ebx, &ecx, &edx); 106 107 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) 108 return X86_VENDOR_INTEL; 109 110 if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) 111 return X86_VENDOR_AMD; 112 113 return X86_VENDOR_UNKNOWN; 114 } 115 116 static inline unsigned int x86_cpuid_family(void) 117 { 118 u32 eax = 0x00000001; 119 u32 ebx, ecx = 0, edx; 120 121 native_cpuid(&eax, &ebx, &ecx, &edx); 122 123 return x86_family(eax); 124 } 125 126 #ifdef CONFIG_MICROCODE 127 extern void __init load_ucode_bsp(void); 128 extern void load_ucode_ap(void); 129 void reload_early_microcode(unsigned int cpu); 130 extern bool initrd_gone; 131 void microcode_bsp_resume(void); 132 #else 133 static inline void __init load_ucode_bsp(void) { } 134 static inline void load_ucode_ap(void) { } 135 static inline void reload_early_microcode(unsigned int cpu) { } 136 static inline void microcode_bsp_resume(void) { } 137 #endif 138 139 #endif /* _ASM_X86_MICROCODE_H */ 140