xref: /openbmc/linux/arch/x86/include/asm/microcode.h (revision a8ebf6d1d6971b90a20f5bd0465e6d520377e33b)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_MICROCODE_H
21965aae3SH. Peter Anvin #define _ASM_X86_MICROCODE_H
3bb898558SAl Viro 
4bb898558SAl Viro struct cpu_signature {
5bb898558SAl Viro 	unsigned int sig;
6bb898558SAl Viro 	unsigned int pf;
7bb898558SAl Viro 	unsigned int rev;
8bb898558SAl Viro };
9bb898558SAl Viro 
10bb898558SAl Viro struct device;
11bb898558SAl Viro 
12871b72ddSDmitry Adamushko enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
13871b72ddSDmitry Adamushko 
14bb898558SAl Viro struct microcode_ops {
15871b72ddSDmitry Adamushko 	enum ucode_state (*request_microcode_user) (int cpu,
16871b72ddSDmitry Adamushko 				const void __user *buf, size_t size);
17bb898558SAl Viro 
1848e30685SBorislav Petkov 	enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
1948e30685SBorislav Petkov 						  bool refresh_fw);
20bb898558SAl Viro 
21bb898558SAl Viro 	void (*microcode_fini_cpu) (int cpu);
22871b72ddSDmitry Adamushko 
23871b72ddSDmitry Adamushko 	/*
24871b72ddSDmitry Adamushko 	 * The generic 'microcode_core' part guarantees that
25871b72ddSDmitry Adamushko 	 * the callbacks below run on a target cpu when they
26871b72ddSDmitry Adamushko 	 * are being called.
27871b72ddSDmitry Adamushko 	 * See also the "Synchronization" section in microcode_core.c.
28871b72ddSDmitry Adamushko 	 */
29871b72ddSDmitry Adamushko 	int (*apply_microcode) (int cpu);
30871b72ddSDmitry Adamushko 	int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
31bb898558SAl Viro };
32bb898558SAl Viro 
33bb898558SAl Viro struct ucode_cpu_info {
34bb898558SAl Viro 	struct cpu_signature	cpu_sig;
35bb898558SAl Viro 	int			valid;
36bb898558SAl Viro 	void			*mc;
37bb898558SAl Viro };
38bb898558SAl Viro extern struct ucode_cpu_info ucode_cpu_info[];
39bb898558SAl Viro 
40bb898558SAl Viro #ifdef CONFIG_MICROCODE_INTEL
41bb898558SAl Viro extern struct microcode_ops * __init init_intel_microcode(void);
42bb898558SAl Viro #else
43bb898558SAl Viro static inline struct microcode_ops * __init init_intel_microcode(void)
44bb898558SAl Viro {
45bb898558SAl Viro 	return NULL;
46bb898558SAl Viro }
47bb898558SAl Viro #endif /* CONFIG_MICROCODE_INTEL */
48bb898558SAl Viro 
49bb898558SAl Viro #ifdef CONFIG_MICROCODE_AMD
50bb898558SAl Viro extern struct microcode_ops * __init init_amd_microcode(void);
51f72c1a57SBorislav Petkov extern void __exit exit_amd_microcode(void);
52bb898558SAl Viro #else
53bb898558SAl Viro static inline struct microcode_ops * __init init_amd_microcode(void)
54bb898558SAl Viro {
55bb898558SAl Viro 	return NULL;
56bb898558SAl Viro }
57f72c1a57SBorislav Petkov static inline void __exit exit_amd_microcode(void) {}
58bb898558SAl Viro #endif
59bb898558SAl Viro 
60*a8ebf6d1SFenghua Yu #ifdef CONFIG_MICROCODE_EARLY
61*a8ebf6d1SFenghua Yu #define MAX_UCODE_COUNT 128
62*a8ebf6d1SFenghua Yu extern void __init load_ucode_bsp(void);
63*a8ebf6d1SFenghua Yu extern __init void load_ucode_ap(void);
64*a8ebf6d1SFenghua Yu extern int __init save_microcode_in_initrd(void);
65*a8ebf6d1SFenghua Yu #else
66*a8ebf6d1SFenghua Yu static inline void __init load_ucode_bsp(void) {}
67*a8ebf6d1SFenghua Yu static inline __init void load_ucode_ap(void) {}
68*a8ebf6d1SFenghua Yu static inline int __init save_microcode_in_initrd(void)
69*a8ebf6d1SFenghua Yu {
70*a8ebf6d1SFenghua Yu 	return 0;
71*a8ebf6d1SFenghua Yu }
72*a8ebf6d1SFenghua Yu #endif
73*a8ebf6d1SFenghua Yu 
741965aae3SH. Peter Anvin #endif /* _ASM_X86_MICROCODE_H */
75