1*d02a0efdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */ 2*d02a0efdSThomas Gleixner #ifndef _X86_MICROCODE_INTERNAL_H 3*d02a0efdSThomas Gleixner #define _X86_MICROCODE_INTERNAL_H 4*d02a0efdSThomas Gleixner 5*d02a0efdSThomas Gleixner #include <linux/earlycpio.h> 6*d02a0efdSThomas Gleixner #include <linux/initrd.h> 7*d02a0efdSThomas Gleixner 8*d02a0efdSThomas Gleixner #include <asm/cpu.h> 9*d02a0efdSThomas Gleixner #include <asm/microcode.h> 10*d02a0efdSThomas Gleixner 11*d02a0efdSThomas Gleixner struct ucode_patch { 12*d02a0efdSThomas Gleixner struct list_head plist; 13*d02a0efdSThomas Gleixner void *data; /* Intel uses only this one */ 14*d02a0efdSThomas Gleixner unsigned int size; 15*d02a0efdSThomas Gleixner u32 patch_id; 16*d02a0efdSThomas Gleixner u16 equiv_cpu; 17*d02a0efdSThomas Gleixner }; 18*d02a0efdSThomas Gleixner 19*d02a0efdSThomas Gleixner extern struct list_head microcode_cache; 20*d02a0efdSThomas Gleixner 21*d02a0efdSThomas Gleixner struct device; 22*d02a0efdSThomas Gleixner 23*d02a0efdSThomas Gleixner enum ucode_state { 24*d02a0efdSThomas Gleixner UCODE_OK = 0, 25*d02a0efdSThomas Gleixner UCODE_NEW, 26*d02a0efdSThomas Gleixner UCODE_UPDATED, 27*d02a0efdSThomas Gleixner UCODE_NFOUND, 28*d02a0efdSThomas Gleixner UCODE_ERROR, 29*d02a0efdSThomas Gleixner }; 30*d02a0efdSThomas Gleixner 31*d02a0efdSThomas Gleixner struct microcode_ops { 32*d02a0efdSThomas Gleixner enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); 33*d02a0efdSThomas Gleixner 34*d02a0efdSThomas Gleixner void (*microcode_fini_cpu)(int cpu); 35*d02a0efdSThomas Gleixner 36*d02a0efdSThomas Gleixner /* 37*d02a0efdSThomas Gleixner * The generic 'microcode_core' part guarantees that 38*d02a0efdSThomas Gleixner * the callbacks below run on a target cpu when they 39*d02a0efdSThomas Gleixner * are being called. 40*d02a0efdSThomas Gleixner * See also the "Synchronization" section in microcode_core.c. 41*d02a0efdSThomas Gleixner */ 42*d02a0efdSThomas Gleixner enum ucode_state (*apply_microcode)(int cpu); 43*d02a0efdSThomas Gleixner int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); 44*d02a0efdSThomas Gleixner }; 45*d02a0efdSThomas Gleixner 46*d02a0efdSThomas Gleixner extern struct ucode_cpu_info ucode_cpu_info[]; 47*d02a0efdSThomas Gleixner struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); 48*d02a0efdSThomas Gleixner 49*d02a0efdSThomas Gleixner #define MAX_UCODE_COUNT 128 50*d02a0efdSThomas Gleixner 51*d02a0efdSThomas Gleixner #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) 52*d02a0efdSThomas Gleixner #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') 53*d02a0efdSThomas Gleixner #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') 54*d02a0efdSThomas Gleixner #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') 55*d02a0efdSThomas Gleixner #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') 56*d02a0efdSThomas Gleixner #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') 57*d02a0efdSThomas Gleixner #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') 58*d02a0efdSThomas Gleixner 59*d02a0efdSThomas Gleixner #define CPUID_IS(a, b, c, ebx, ecx, edx) \ 60*d02a0efdSThomas Gleixner (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) 61*d02a0efdSThomas Gleixner 62*d02a0efdSThomas Gleixner /* 63*d02a0efdSThomas Gleixner * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. 64*d02a0efdSThomas Gleixner * x86_cpuid_vendor() gets vendor id for BSP. 65*d02a0efdSThomas Gleixner * 66*d02a0efdSThomas Gleixner * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify 67*d02a0efdSThomas Gleixner * coding, we still use x86_cpuid_vendor() to get vendor id for AP. 68*d02a0efdSThomas Gleixner * 69*d02a0efdSThomas Gleixner * x86_cpuid_vendor() gets vendor information directly from CPUID. 70*d02a0efdSThomas Gleixner */ 71*d02a0efdSThomas Gleixner static inline int x86_cpuid_vendor(void) 72*d02a0efdSThomas Gleixner { 73*d02a0efdSThomas Gleixner u32 eax = 0x00000000; 74*d02a0efdSThomas Gleixner u32 ebx, ecx = 0, edx; 75*d02a0efdSThomas Gleixner 76*d02a0efdSThomas Gleixner native_cpuid(&eax, &ebx, &ecx, &edx); 77*d02a0efdSThomas Gleixner 78*d02a0efdSThomas Gleixner if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) 79*d02a0efdSThomas Gleixner return X86_VENDOR_INTEL; 80*d02a0efdSThomas Gleixner 81*d02a0efdSThomas Gleixner if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) 82*d02a0efdSThomas Gleixner return X86_VENDOR_AMD; 83*d02a0efdSThomas Gleixner 84*d02a0efdSThomas Gleixner return X86_VENDOR_UNKNOWN; 85*d02a0efdSThomas Gleixner } 86*d02a0efdSThomas Gleixner 87*d02a0efdSThomas Gleixner static inline unsigned int x86_cpuid_family(void) 88*d02a0efdSThomas Gleixner { 89*d02a0efdSThomas Gleixner u32 eax = 0x00000001; 90*d02a0efdSThomas Gleixner u32 ebx, ecx = 0, edx; 91*d02a0efdSThomas Gleixner 92*d02a0efdSThomas Gleixner native_cpuid(&eax, &ebx, &ecx, &edx); 93*d02a0efdSThomas Gleixner 94*d02a0efdSThomas Gleixner return x86_family(eax); 95*d02a0efdSThomas Gleixner } 96*d02a0efdSThomas Gleixner 97*d02a0efdSThomas Gleixner extern bool initrd_gone; 98*d02a0efdSThomas Gleixner 99*d02a0efdSThomas Gleixner #ifdef CONFIG_CPU_SUP_AMD 100*d02a0efdSThomas Gleixner void load_ucode_amd_bsp(unsigned int family); 101*d02a0efdSThomas Gleixner void load_ucode_amd_ap(unsigned int family); 102*d02a0efdSThomas Gleixner void load_ucode_amd_early(unsigned int cpuid_1_eax); 103*d02a0efdSThomas Gleixner int save_microcode_in_initrd_amd(unsigned int family); 104*d02a0efdSThomas Gleixner void reload_ucode_amd(unsigned int cpu); 105*d02a0efdSThomas Gleixner struct microcode_ops *init_amd_microcode(void); 106*d02a0efdSThomas Gleixner void exit_amd_microcode(void); 107*d02a0efdSThomas Gleixner #else /* CONFIG_MICROCODE_AMD */ 108*d02a0efdSThomas Gleixner static inline void load_ucode_amd_bsp(unsigned int family) { } 109*d02a0efdSThomas Gleixner static inline void load_ucode_amd_ap(unsigned int family) { } 110*d02a0efdSThomas Gleixner static inline void load_ucode_amd_early(unsigned int family) { } 111*d02a0efdSThomas Gleixner static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } 112*d02a0efdSThomas Gleixner static inline void reload_ucode_amd(unsigned int cpu) { } 113*d02a0efdSThomas Gleixner static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } 114*d02a0efdSThomas Gleixner static inline void exit_amd_microcode(void) { } 115*d02a0efdSThomas Gleixner #endif /* !CONFIG_MICROCODE_AMD */ 116*d02a0efdSThomas Gleixner 117*d02a0efdSThomas Gleixner #ifdef CONFIG_CPU_SUP_INTEL 118*d02a0efdSThomas Gleixner void load_ucode_intel_bsp(void); 119*d02a0efdSThomas Gleixner void load_ucode_intel_ap(void); 120*d02a0efdSThomas Gleixner int save_microcode_in_initrd_intel(void); 121*d02a0efdSThomas Gleixner void reload_ucode_intel(void); 122*d02a0efdSThomas Gleixner struct microcode_ops *init_intel_microcode(void); 123*d02a0efdSThomas Gleixner #else /* CONFIG_CPU_SUP_INTEL */ 124*d02a0efdSThomas Gleixner static inline void load_ucode_intel_bsp(void) { } 125*d02a0efdSThomas Gleixner static inline void load_ucode_intel_ap(void) { } 126*d02a0efdSThomas Gleixner static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; } 127*d02a0efdSThomas Gleixner static inline void reload_ucode_intel(void) { } 128*d02a0efdSThomas Gleixner static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } 129*d02a0efdSThomas Gleixner #endif /* !CONFIG_CPU_SUP_INTEL */ 130*d02a0efdSThomas Gleixner 131*d02a0efdSThomas Gleixner #endif /* _X86_MICROCODE_INTERNAL_H */ 132