xref: /openbmc/linux/arch/x86/include/asm/microcode.h (revision 5f9c01aa7c49a2d74474d6d879a797b8badf29e6)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_MICROCODE_H
21965aae3SH. Peter Anvin #define _ASM_X86_MICROCODE_H
3bb898558SAl Viro 
499f925ceSBorislav Petkov #include <asm/cpu.h>
5760d765bSBorislav Petkov #include <linux/earlycpio.h>
6*5f9c01aaSBorislav Petkov #include <linux/initrd.h>
7760d765bSBorislav Petkov 
8e1b43e3fSBorislav Petkov #define native_rdmsr(msr, val1, val2)			\
9e1b43e3fSBorislav Petkov do {							\
10e1b43e3fSBorislav Petkov 	u64 __val = native_read_msr((msr));		\
11e1b43e3fSBorislav Petkov 	(void)((val1) = (u32)__val);			\
12e1b43e3fSBorislav Petkov 	(void)((val2) = (u32)(__val >> 32));		\
13e1b43e3fSBorislav Petkov } while (0)
14e1b43e3fSBorislav Petkov 
15e1b43e3fSBorislav Petkov #define native_wrmsr(msr, low, high)			\
16e1b43e3fSBorislav Petkov 	native_write_msr(msr, low, high)
17e1b43e3fSBorislav Petkov 
18e1b43e3fSBorislav Petkov #define native_wrmsrl(msr, val)				\
19e1b43e3fSBorislav Petkov 	native_write_msr((msr),				\
20e1b43e3fSBorislav Petkov 			 (u32)((u64)(val)),		\
21e1b43e3fSBorislav Petkov 			 (u32)((u64)(val) >> 32))
22e1b43e3fSBorislav Petkov 
23bb898558SAl Viro struct cpu_signature {
24bb898558SAl Viro 	unsigned int sig;
25bb898558SAl Viro 	unsigned int pf;
26bb898558SAl Viro 	unsigned int rev;
27bb898558SAl Viro };
28bb898558SAl Viro 
29bb898558SAl Viro struct device;
30bb898558SAl Viro 
31871b72ddSDmitry Adamushko enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
32871b72ddSDmitry Adamushko 
33bb898558SAl Viro struct microcode_ops {
34871b72ddSDmitry Adamushko 	enum ucode_state (*request_microcode_user) (int cpu,
35871b72ddSDmitry Adamushko 				const void __user *buf, size_t size);
36bb898558SAl Viro 
3748e30685SBorislav Petkov 	enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
3848e30685SBorislav Petkov 						  bool refresh_fw);
39bb898558SAl Viro 
40bb898558SAl Viro 	void (*microcode_fini_cpu) (int cpu);
41871b72ddSDmitry Adamushko 
42871b72ddSDmitry Adamushko 	/*
43871b72ddSDmitry Adamushko 	 * The generic 'microcode_core' part guarantees that
44871b72ddSDmitry Adamushko 	 * the callbacks below run on a target cpu when they
45871b72ddSDmitry Adamushko 	 * are being called.
46871b72ddSDmitry Adamushko 	 * See also the "Synchronization" section in microcode_core.c.
47871b72ddSDmitry Adamushko 	 */
48871b72ddSDmitry Adamushko 	int (*apply_microcode) (int cpu);
49871b72ddSDmitry Adamushko 	int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
50bb898558SAl Viro };
51bb898558SAl Viro 
52bb898558SAl Viro struct ucode_cpu_info {
53bb898558SAl Viro 	struct cpu_signature	cpu_sig;
54bb898558SAl Viro 	int			valid;
55bb898558SAl Viro 	void			*mc;
56bb898558SAl Viro };
57bb898558SAl Viro extern struct ucode_cpu_info ucode_cpu_info[];
58bb898558SAl Viro 
599a2bc335SBorislav Petkov #ifdef CONFIG_MICROCODE
609a2bc335SBorislav Petkov int __init microcode_init(void);
619a2bc335SBorislav Petkov #else
629a2bc335SBorislav Petkov static inline int __init microcode_init(void)	{ return 0; };
639a2bc335SBorislav Petkov #endif
649a2bc335SBorislav Petkov 
65bb898558SAl Viro #ifdef CONFIG_MICROCODE_INTEL
66bb898558SAl Viro extern struct microcode_ops * __init init_intel_microcode(void);
67bb898558SAl Viro #else
68bb898558SAl Viro static inline struct microcode_ops * __init init_intel_microcode(void)
69bb898558SAl Viro {
70bb898558SAl Viro 	return NULL;
71bb898558SAl Viro }
72bb898558SAl Viro #endif /* CONFIG_MICROCODE_INTEL */
73bb898558SAl Viro 
74bb898558SAl Viro #ifdef CONFIG_MICROCODE_AMD
75bb898558SAl Viro extern struct microcode_ops * __init init_amd_microcode(void);
76f72c1a57SBorislav Petkov extern void __exit exit_amd_microcode(void);
77bb898558SAl Viro #else
78bb898558SAl Viro static inline struct microcode_ops * __init init_amd_microcode(void)
79bb898558SAl Viro {
80bb898558SAl Viro 	return NULL;
81bb898558SAl Viro }
82f72c1a57SBorislav Petkov static inline void __exit exit_amd_microcode(void) {}
83bb898558SAl Viro #endif
84bb898558SAl Viro 
85a8ebf6d1SFenghua Yu #define MAX_UCODE_COUNT 128
8658ce8d6dSBorislav Petkov 
8758ce8d6dSBorislav Petkov #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
8858ce8d6dSBorislav Petkov #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
8958ce8d6dSBorislav Petkov #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
9058ce8d6dSBorislav Petkov #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
9158ce8d6dSBorislav Petkov #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
9258ce8d6dSBorislav Petkov #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
9358ce8d6dSBorislav Petkov #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
9458ce8d6dSBorislav Petkov 
9558ce8d6dSBorislav Petkov #define CPUID_IS(a, b, c, ebx, ecx, edx)	\
9658ce8d6dSBorislav Petkov 		(!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
9758ce8d6dSBorislav Petkov 
9858ce8d6dSBorislav Petkov /*
9958ce8d6dSBorislav Petkov  * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
10099f925ceSBorislav Petkov  * x86_cpuid_vendor() gets vendor id for BSP.
10158ce8d6dSBorislav Petkov  *
10258ce8d6dSBorislav Petkov  * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
10399f925ceSBorislav Petkov  * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
10458ce8d6dSBorislav Petkov  *
10599f925ceSBorislav Petkov  * x86_cpuid_vendor() gets vendor information directly from CPUID.
10658ce8d6dSBorislav Petkov  */
10799f925ceSBorislav Petkov static inline int x86_cpuid_vendor(void)
10858ce8d6dSBorislav Petkov {
10958ce8d6dSBorislav Petkov 	u32 eax = 0x00000000;
11058ce8d6dSBorislav Petkov 	u32 ebx, ecx = 0, edx;
11158ce8d6dSBorislav Petkov 
11258ce8d6dSBorislav Petkov 	native_cpuid(&eax, &ebx, &ecx, &edx);
11358ce8d6dSBorislav Petkov 
11458ce8d6dSBorislav Petkov 	if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
11558ce8d6dSBorislav Petkov 		return X86_VENDOR_INTEL;
11658ce8d6dSBorislav Petkov 
11758ce8d6dSBorislav Petkov 	if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
11858ce8d6dSBorislav Petkov 		return X86_VENDOR_AMD;
11958ce8d6dSBorislav Petkov 
12058ce8d6dSBorislav Petkov 	return X86_VENDOR_UNKNOWN;
12158ce8d6dSBorislav Petkov }
12258ce8d6dSBorislav Petkov 
12399f925ceSBorislav Petkov static inline unsigned int x86_cpuid_family(void)
12458ce8d6dSBorislav Petkov {
12558ce8d6dSBorislav Petkov 	u32 eax = 0x00000001;
12658ce8d6dSBorislav Petkov 	u32 ebx, ecx = 0, edx;
12758ce8d6dSBorislav Petkov 
12858ce8d6dSBorislav Petkov 	native_cpuid(&eax, &ebx, &ecx, &edx);
12958ce8d6dSBorislav Petkov 
13099f925ceSBorislav Petkov 	return x86_family(eax);
13158ce8d6dSBorislav Petkov }
13258ce8d6dSBorislav Petkov 
133fe055896SBorislav Petkov #ifdef CONFIG_MICROCODE
134a8ebf6d1SFenghua Yu extern void __init load_ucode_bsp(void);
135148f9bb8SPaul Gortmaker extern void load_ucode_ap(void);
136a8ebf6d1SFenghua Yu extern int __init save_microcode_in_initrd(void);
137fbae4ba8SBorislav Petkov void reload_early_microcode(void);
138760d765bSBorislav Petkov extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
139a8ebf6d1SFenghua Yu #else
140a8ebf6d1SFenghua Yu static inline void __init load_ucode_bsp(void)			{ }
141148f9bb8SPaul Gortmaker static inline void load_ucode_ap(void)				{ }
142fe055896SBorislav Petkov static inline int __init save_microcode_in_initrd(void)		{ return 0; }
143fbae4ba8SBorislav Petkov static inline void reload_early_microcode(void)			{ }
144fe055896SBorislav Petkov static inline bool
145fe055896SBorislav Petkov get_builtin_firmware(struct cpio_data *cd, const char *name)	{ return false; }
146a8ebf6d1SFenghua Yu #endif
147*5f9c01aaSBorislav Petkov 
148*5f9c01aaSBorislav Petkov static inline unsigned long get_initrd_start(void)
149*5f9c01aaSBorislav Petkov {
150*5f9c01aaSBorislav Petkov #ifdef CONFIG_BLK_DEV_INITRD
151*5f9c01aaSBorislav Petkov 	return initrd_start;
152*5f9c01aaSBorislav Petkov #else
153*5f9c01aaSBorislav Petkov 	return 0;
154*5f9c01aaSBorislav Petkov #endif
155*5f9c01aaSBorislav Petkov }
156*5f9c01aaSBorislav Petkov 
157*5f9c01aaSBorislav Petkov static inline unsigned long get_initrd_start_addr(void)
158*5f9c01aaSBorislav Petkov {
159*5f9c01aaSBorislav Petkov #ifdef CONFIG_BLK_DEV_INITRD
160*5f9c01aaSBorislav Petkov #ifdef CONFIG_X86_32
161*5f9c01aaSBorislav Petkov 	unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
162*5f9c01aaSBorislav Petkov 
163*5f9c01aaSBorislav Petkov 	return (unsigned long)__pa_nodebug(*initrd_start_p);
164*5f9c01aaSBorislav Petkov #else
165*5f9c01aaSBorislav Petkov 	return get_initrd_start();
166*5f9c01aaSBorislav Petkov #endif
167*5f9c01aaSBorislav Petkov #else /* CONFIG_BLK_DEV_INITRD */
168*5f9c01aaSBorislav Petkov 	return 0;
169*5f9c01aaSBorislav Petkov #endif
170*5f9c01aaSBorislav Petkov }
171*5f9c01aaSBorislav Petkov 
1721965aae3SH. Peter Anvin #endif /* _ASM_X86_MICROCODE_H */
173