1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __X86_MCE_INTERNAL_H__ 3 #define __X86_MCE_INTERNAL_H__ 4 5 #undef pr_fmt 6 #define pr_fmt(fmt) "mce: " fmt 7 8 #include <linux/device.h> 9 #include <asm/mce.h> 10 11 enum severity_level { 12 MCE_NO_SEVERITY, 13 MCE_DEFERRED_SEVERITY, 14 MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY, 15 MCE_KEEP_SEVERITY, 16 MCE_SOME_SEVERITY, 17 MCE_AO_SEVERITY, 18 MCE_UC_SEVERITY, 19 MCE_AR_SEVERITY, 20 MCE_PANIC_SEVERITY, 21 }; 22 23 extern struct blocking_notifier_head x86_mce_decoder_chain; 24 25 #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ 26 27 struct mce_evt_llist { 28 struct llist_node llnode; 29 struct mce mce; 30 }; 31 32 void mce_gen_pool_process(struct work_struct *__unused); 33 bool mce_gen_pool_empty(void); 34 int mce_gen_pool_add(struct mce *mce); 35 int mce_gen_pool_init(void); 36 struct llist_node *mce_gen_pool_prepare_records(void); 37 38 extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp); 39 struct dentry *mce_get_debugfs_dir(void); 40 41 extern mce_banks_t mce_banks_ce_disabled; 42 43 #ifdef CONFIG_X86_MCE_INTEL 44 unsigned long cmci_intel_adjust_timer(unsigned long interval); 45 bool mce_intel_cmci_poll(void); 46 void mce_intel_hcpu_update(unsigned long cpu); 47 void cmci_disable_bank(int bank); 48 void intel_init_cmci(void); 49 void intel_init_lmce(void); 50 void intel_clear_lmce(void); 51 #else 52 # define cmci_intel_adjust_timer mce_adjust_timer_default 53 static inline bool mce_intel_cmci_poll(void) { return false; } 54 static inline void mce_intel_hcpu_update(unsigned long cpu) { } 55 static inline void cmci_disable_bank(int bank) { } 56 static inline void intel_init_cmci(void) { } 57 static inline void intel_init_lmce(void) { } 58 static inline void intel_clear_lmce(void) { } 59 #endif 60 61 void mce_timer_kick(unsigned long interval); 62 63 #ifdef CONFIG_ACPI_APEI 64 int apei_write_mce(struct mce *m); 65 ssize_t apei_read_mce(struct mce *m, u64 *record_id); 66 int apei_check_mce(void); 67 int apei_clear_mce(u64 record_id); 68 #else 69 static inline int apei_write_mce(struct mce *m) 70 { 71 return -EINVAL; 72 } 73 static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id) 74 { 75 return 0; 76 } 77 static inline int apei_check_mce(void) 78 { 79 return 0; 80 } 81 static inline int apei_clear_mce(u64 record_id) 82 { 83 return -EINVAL; 84 } 85 #endif 86 87 /* 88 * We consider records to be equivalent if bank+status+addr+misc all match. 89 * This is only used when the system is going down because of a fatal error 90 * to avoid cluttering the console log with essentially repeated information. 91 * In normal processing all errors seen are logged. 92 */ 93 static inline bool mce_cmp(struct mce *m1, struct mce *m2) 94 { 95 return m1->bank != m2->bank || 96 m1->status != m2->status || 97 m1->addr != m2->addr || 98 m1->misc != m2->misc; 99 } 100 101 extern struct device_attribute dev_attr_trigger; 102 103 #ifdef CONFIG_X86_MCELOG_LEGACY 104 void mce_work_trigger(void); 105 void mce_register_injector_chain(struct notifier_block *nb); 106 void mce_unregister_injector_chain(struct notifier_block *nb); 107 #else 108 static inline void mce_work_trigger(void) { } 109 static inline void mce_register_injector_chain(struct notifier_block *nb) { } 110 static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } 111 #endif 112 113 struct mca_config { 114 bool dont_log_ce; 115 bool cmci_disabled; 116 bool ignore_ce; 117 118 __u64 lmce_disabled : 1, 119 disabled : 1, 120 ser : 1, 121 recovery : 1, 122 bios_cmci_threshold : 1, 123 __reserved : 59; 124 125 s8 bootlog; 126 int tolerant; 127 int monarch_timeout; 128 int panic_timeout; 129 u32 rip_msr; 130 }; 131 132 extern struct mca_config mca_cfg; 133 DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks); 134 135 struct mce_vendor_flags { 136 /* 137 * Indicates that overflow conditions are not fatal, when set. 138 */ 139 __u64 overflow_recov : 1, 140 141 /* 142 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and 143 * Recovery. It indicates support for data poisoning in HW and deferred 144 * error interrupts. 145 */ 146 succor : 1, 147 148 /* 149 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands 150 * the register space for each MCA bank and also increases number of 151 * banks. Also, to accommodate the new banks and registers, the MCA 152 * register space is moved to a new MSR range. 153 */ 154 smca : 1, 155 156 __reserved_0 : 61; 157 }; 158 159 extern struct mce_vendor_flags mce_flags; 160 161 struct mca_msr_regs { 162 u32 (*ctl) (int bank); 163 u32 (*status) (int bank); 164 u32 (*addr) (int bank); 165 u32 (*misc) (int bank); 166 }; 167 168 extern struct mca_msr_regs msr_ops; 169 170 /* Decide whether to add MCE record to MCE event pool or filter it out. */ 171 extern bool filter_mce(struct mce *m); 172 173 #ifdef CONFIG_X86_MCE_AMD 174 extern bool amd_filter_mce(struct mce *m); 175 #else 176 static inline bool amd_filter_mce(struct mce *m) { return false; }; 177 #endif 178 179 #endif /* __X86_MCE_INTERNAL_H__ */ 180