11965aae3SH. Peter Anvin #ifndef _ASM_X86_MICROCODE_H 21965aae3SH. Peter Anvin #define _ASM_X86_MICROCODE_H 3bb898558SAl Viro 4*e1b43e3fSBorislav Petkov #define native_rdmsr(msr, val1, val2) \ 5*e1b43e3fSBorislav Petkov do { \ 6*e1b43e3fSBorislav Petkov u64 __val = native_read_msr((msr)); \ 7*e1b43e3fSBorislav Petkov (void)((val1) = (u32)__val); \ 8*e1b43e3fSBorislav Petkov (void)((val2) = (u32)(__val >> 32)); \ 9*e1b43e3fSBorislav Petkov } while (0) 10*e1b43e3fSBorislav Petkov 11*e1b43e3fSBorislav Petkov #define native_wrmsr(msr, low, high) \ 12*e1b43e3fSBorislav Petkov native_write_msr(msr, low, high) 13*e1b43e3fSBorislav Petkov 14*e1b43e3fSBorislav Petkov #define native_wrmsrl(msr, val) \ 15*e1b43e3fSBorislav Petkov native_write_msr((msr), \ 16*e1b43e3fSBorislav Petkov (u32)((u64)(val)), \ 17*e1b43e3fSBorislav Petkov (u32)((u64)(val) >> 32)) 18*e1b43e3fSBorislav Petkov 19bb898558SAl Viro struct cpu_signature { 20bb898558SAl Viro unsigned int sig; 21bb898558SAl Viro unsigned int pf; 22bb898558SAl Viro unsigned int rev; 23bb898558SAl Viro }; 24bb898558SAl Viro 25bb898558SAl Viro struct device; 26bb898558SAl Viro 27871b72ddSDmitry Adamushko enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; 28871b72ddSDmitry Adamushko 29bb898558SAl Viro struct microcode_ops { 30871b72ddSDmitry Adamushko enum ucode_state (*request_microcode_user) (int cpu, 31871b72ddSDmitry Adamushko const void __user *buf, size_t size); 32bb898558SAl Viro 3348e30685SBorislav Petkov enum ucode_state (*request_microcode_fw) (int cpu, struct device *, 3448e30685SBorislav Petkov bool refresh_fw); 35bb898558SAl Viro 36bb898558SAl Viro void (*microcode_fini_cpu) (int cpu); 37871b72ddSDmitry Adamushko 38871b72ddSDmitry Adamushko /* 39871b72ddSDmitry Adamushko * The generic 'microcode_core' part guarantees that 40871b72ddSDmitry Adamushko * the callbacks below run on a target cpu when they 41871b72ddSDmitry Adamushko * are being called. 42871b72ddSDmitry Adamushko * See also the "Synchronization" section in microcode_core.c. 43871b72ddSDmitry Adamushko */ 44871b72ddSDmitry Adamushko int (*apply_microcode) (int cpu); 45871b72ddSDmitry Adamushko int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); 46bb898558SAl Viro }; 47bb898558SAl Viro 48bb898558SAl Viro struct ucode_cpu_info { 49bb898558SAl Viro struct cpu_signature cpu_sig; 50bb898558SAl Viro int valid; 51bb898558SAl Viro void *mc; 52bb898558SAl Viro }; 53bb898558SAl Viro extern struct ucode_cpu_info ucode_cpu_info[]; 54bb898558SAl Viro 55bb898558SAl Viro #ifdef CONFIG_MICROCODE_INTEL 56bb898558SAl Viro extern struct microcode_ops * __init init_intel_microcode(void); 57bb898558SAl Viro #else 58bb898558SAl Viro static inline struct microcode_ops * __init init_intel_microcode(void) 59bb898558SAl Viro { 60bb898558SAl Viro return NULL; 61bb898558SAl Viro } 62bb898558SAl Viro #endif /* CONFIG_MICROCODE_INTEL */ 63bb898558SAl Viro 64bb898558SAl Viro #ifdef CONFIG_MICROCODE_AMD 65bb898558SAl Viro extern struct microcode_ops * __init init_amd_microcode(void); 66f72c1a57SBorislav Petkov extern void __exit exit_amd_microcode(void); 67bb898558SAl Viro #else 68bb898558SAl Viro static inline struct microcode_ops * __init init_amd_microcode(void) 69bb898558SAl Viro { 70bb898558SAl Viro return NULL; 71bb898558SAl Viro } 72f72c1a57SBorislav Petkov static inline void __exit exit_amd_microcode(void) {} 73bb898558SAl Viro #endif 74bb898558SAl Viro 75a8ebf6d1SFenghua Yu #ifdef CONFIG_MICROCODE_EARLY 76a8ebf6d1SFenghua Yu #define MAX_UCODE_COUNT 128 77a8ebf6d1SFenghua Yu extern void __init load_ucode_bsp(void); 78148f9bb8SPaul Gortmaker extern void load_ucode_ap(void); 79a8ebf6d1SFenghua Yu extern int __init save_microcode_in_initrd(void); 80a8ebf6d1SFenghua Yu #else 81a8ebf6d1SFenghua Yu static inline void __init load_ucode_bsp(void) {} 82148f9bb8SPaul Gortmaker static inline void load_ucode_ap(void) {} 83a8ebf6d1SFenghua Yu static inline int __init save_microcode_in_initrd(void) 84a8ebf6d1SFenghua Yu { 85a8ebf6d1SFenghua Yu return 0; 86a8ebf6d1SFenghua Yu } 87a8ebf6d1SFenghua Yu #endif 88a8ebf6d1SFenghua Yu 891965aae3SH. Peter Anvin #endif /* _ASM_X86_MICROCODE_H */ 90