1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __X86_MCE_INTERNAL_H__ 3 #define __X86_MCE_INTERNAL_H__ 4 5 #undef pr_fmt 6 #define pr_fmt(fmt) "mce: " fmt 7 8 #include <linux/device.h> 9 #include <asm/mce.h> 10 11 enum severity_level { 12 MCE_NO_SEVERITY, 13 MCE_DEFERRED_SEVERITY, 14 MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY, 15 MCE_KEEP_SEVERITY, 16 MCE_SOME_SEVERITY, 17 MCE_AO_SEVERITY, 18 MCE_UC_SEVERITY, 19 MCE_AR_SEVERITY, 20 MCE_PANIC_SEVERITY, 21 }; 22 23 extern struct blocking_notifier_head x86_mce_decoder_chain; 24 25 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ 26 27 struct mce_evt_llist { 28 struct llist_node llnode; 29 struct mce mce; 30 }; 31 32 void mce_gen_pool_process(struct work_struct *__unused); 33 bool mce_gen_pool_empty(void); 34 int mce_gen_pool_add(struct mce *mce); 35 int mce_gen_pool_init(void); 36 struct llist_node *mce_gen_pool_prepare_records(void); 37 38 int mce_severity(struct mce *a, struct pt_regs *regs, char **msg, bool is_excp); 39 struct dentry *mce_get_debugfs_dir(void); 40 41 extern mce_banks_t mce_banks_ce_disabled; 42 43 #ifdef CONFIG_X86_MCE_INTEL 44 unsigned long cmci_intel_adjust_timer(unsigned long interval); 45 bool mce_intel_cmci_poll(void); 46 void mce_intel_hcpu_update(unsigned long cpu); 47 void cmci_disable_bank(int bank); 48 void intel_init_cmci(void); 49 void intel_init_lmce(void); 50 void intel_clear_lmce(void); 51 bool intel_filter_mce(struct mce *m); 52 #else 53 # define cmci_intel_adjust_timer mce_adjust_timer_default 54 static inline bool mce_intel_cmci_poll(void) { return false; } 55 static inline void mce_intel_hcpu_update(unsigned long cpu) { } 56 static inline void cmci_disable_bank(int bank) { } 57 static inline void intel_init_cmci(void) { } 58 static inline void intel_init_lmce(void) { } 59 static inline void intel_clear_lmce(void) { } 60 static inline bool intel_filter_mce(struct mce *m) { return false; } 61 #endif 62 63 void mce_timer_kick(unsigned long interval); 64 65 #ifdef CONFIG_ACPI_APEI 66 int apei_write_mce(struct mce *m); 67 ssize_t apei_read_mce(struct mce *m, u64 *record_id); 68 int apei_check_mce(void); 69 int apei_clear_mce(u64 record_id); 70 #else 71 static inline int apei_write_mce(struct mce *m) 72 { 73 return -EINVAL; 74 } 75 static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id) 76 { 77 return 0; 78 } 79 static inline int apei_check_mce(void) 80 { 81 return 0; 82 } 83 static inline int apei_clear_mce(u64 record_id) 84 { 85 return -EINVAL; 86 } 87 #endif 88 89 /* 90 * We consider records to be equivalent if bank+status+addr+misc all match. 91 * This is only used when the system is going down because of a fatal error 92 * to avoid cluttering the console log with essentially repeated information. 93 * In normal processing all errors seen are logged. 94 */ 95 static inline bool mce_cmp(struct mce *m1, struct mce *m2) 96 { 97 return m1->bank != m2->bank || 98 m1->status != m2->status || 99 m1->addr != m2->addr || 100 m1->misc != m2->misc; 101 } 102 103 extern struct device_attribute dev_attr_trigger; 104 105 #ifdef CONFIG_X86_MCELOG_LEGACY 106 void mce_work_trigger(void); 107 void mce_register_injector_chain(struct notifier_block *nb); 108 void mce_unregister_injector_chain(struct notifier_block *nb); 109 #else 110 static inline void mce_work_trigger(void) { } 111 static inline void mce_register_injector_chain(struct notifier_block *nb) { } 112 static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } 113 #endif 114 115 struct mca_config { 116 __u64 lmce_disabled : 1, 117 disabled : 1, 118 ser : 1, 119 recovery : 1, 120 bios_cmci_threshold : 1, 121 /* Proper #MC exception handler is set */ 122 initialized : 1, 123 __reserved : 58; 124 125 bool dont_log_ce; 126 bool cmci_disabled; 127 bool ignore_ce; 128 bool print_all; 129 130 int monarch_timeout; 131 int panic_timeout; 132 u32 rip_msr; 133 s8 bootlog; 134 }; 135 136 extern struct mca_config mca_cfg; 137 DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks); 138 139 struct mce_vendor_flags { 140 /* 141 * Indicates that overflow conditions are not fatal, when set. 142 */ 143 __u64 overflow_recov : 1, 144 145 /* 146 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and 147 * Recovery. It indicates support for data poisoning in HW and deferred 148 * error interrupts. 149 */ 150 succor : 1, 151 152 /* 153 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands 154 * the register space for each MCA bank and also increases number of 155 * banks. Also, to accommodate the new banks and registers, the MCA 156 * register space is moved to a new MSR range. 157 */ 158 smca : 1, 159 160 /* Zen IFU quirk */ 161 zen_ifu_quirk : 1, 162 163 /* AMD-style error thresholding banks present. */ 164 amd_threshold : 1, 165 166 /* Pentium, family 5-style MCA */ 167 p5 : 1, 168 169 /* Centaur Winchip C6-style MCA */ 170 winchip : 1, 171 172 /* SandyBridge IFU quirk */ 173 snb_ifu_quirk : 1, 174 175 /* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */ 176 skx_repmov_quirk : 1, 177 178 __reserved_0 : 55; 179 }; 180 181 extern struct mce_vendor_flags mce_flags; 182 183 struct mce_bank { 184 /* subevents to enable */ 185 u64 ctl; 186 187 /* initialise bank? */ 188 __u64 init : 1, 189 190 /* 191 * (AMD) MCA_CONFIG[McaLsbInStatusSupported]: When set, this bit indicates 192 * the LSB field is found in MCA_STATUS and not in MCA_ADDR. 193 */ 194 lsb_in_status : 1, 195 196 __reserved_1 : 62; 197 }; 198 199 DECLARE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array); 200 201 enum mca_msr { 202 MCA_CTL, 203 MCA_STATUS, 204 MCA_ADDR, 205 MCA_MISC, 206 }; 207 208 /* Decide whether to add MCE record to MCE event pool or filter it out. */ 209 extern bool filter_mce(struct mce *m); 210 211 #ifdef CONFIG_X86_MCE_AMD 212 extern bool amd_filter_mce(struct mce *m); 213 214 /* 215 * If MCA_CONFIG[McaLsbInStatusSupported] is set, extract ErrAddr in bits 216 * [56:0] of MCA_STATUS, else in bits [55:0] of MCA_ADDR. 217 */ 218 static __always_inline void smca_extract_err_addr(struct mce *m) 219 { 220 u8 lsb; 221 222 if (!mce_flags.smca) 223 return; 224 225 if (this_cpu_ptr(mce_banks_array)[m->bank].lsb_in_status) { 226 lsb = (m->status >> 24) & 0x3f; 227 228 m->addr &= GENMASK_ULL(56, lsb); 229 230 return; 231 } 232 233 lsb = (m->addr >> 56) & 0x3f; 234 235 m->addr &= GENMASK_ULL(55, lsb); 236 } 237 238 #else 239 static inline bool amd_filter_mce(struct mce *m) { return false; } 240 static inline void smca_extract_err_addr(struct mce *m) { } 241 #endif 242 243 #ifdef CONFIG_X86_ANCIENT_MCE 244 void intel_p5_mcheck_init(struct cpuinfo_x86 *c); 245 void winchip_mcheck_init(struct cpuinfo_x86 *c); 246 noinstr void pentium_machine_check(struct pt_regs *regs); 247 noinstr void winchip_machine_check(struct pt_regs *regs); 248 static inline void enable_p5_mce(void) { mce_p5_enabled = 1; } 249 #else 250 static __always_inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {} 251 static __always_inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} 252 static __always_inline void enable_p5_mce(void) {} 253 static __always_inline void pentium_machine_check(struct pt_regs *regs) {} 254 static __always_inline void winchip_machine_check(struct pt_regs *regs) {} 255 #endif 256 257 noinstr u64 mce_rdmsrl(u32 msr); 258 259 static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg) 260 { 261 if (cpu_feature_enabled(X86_FEATURE_SMCA)) { 262 switch (reg) { 263 case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); 264 case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); 265 case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); 266 case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank); 267 } 268 } 269 270 switch (reg) { 271 case MCA_CTL: return MSR_IA32_MCx_CTL(bank); 272 case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); 273 case MCA_MISC: return MSR_IA32_MCx_MISC(bank); 274 case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank); 275 } 276 277 return 0; 278 } 279 280 extern void (*mc_poll_banks)(void); 281 #endif /* __X86_MCE_INTERNAL_H__ */ 282