121afaf18SBorislav Petkov /* 221afaf18SBorislav Petkov * Machine check handler. 321afaf18SBorislav Petkov * 421afaf18SBorislav Petkov * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 521afaf18SBorislav Petkov * Rest from unknown author(s). 621afaf18SBorislav Petkov * 2004 Andi Kleen. Rewrote most of it. 721afaf18SBorislav Petkov * Copyright 2008 Intel Corporation 821afaf18SBorislav Petkov * Author: Andi Kleen 921afaf18SBorislav Petkov */ 1021afaf18SBorislav Petkov 1121afaf18SBorislav Petkov #include <linux/thread_info.h> 1221afaf18SBorislav Petkov #include <linux/capability.h> 1321afaf18SBorislav Petkov #include <linux/miscdevice.h> 1421afaf18SBorislav Petkov #include <linux/ratelimit.h> 1521afaf18SBorislav Petkov #include <linux/rcupdate.h> 1621afaf18SBorislav Petkov #include <linux/kobject.h> 1721afaf18SBorislav Petkov #include <linux/uaccess.h> 1821afaf18SBorislav Petkov #include <linux/kdebug.h> 1921afaf18SBorislav Petkov #include <linux/kernel.h> 2021afaf18SBorislav Petkov #include <linux/percpu.h> 2121afaf18SBorislav Petkov #include <linux/string.h> 2221afaf18SBorislav Petkov #include <linux/device.h> 2321afaf18SBorislav Petkov #include <linux/syscore_ops.h> 2421afaf18SBorislav Petkov #include <linux/delay.h> 2521afaf18SBorislav Petkov #include <linux/ctype.h> 2621afaf18SBorislav Petkov #include <linux/sched.h> 2721afaf18SBorislav Petkov #include <linux/sysfs.h> 2821afaf18SBorislav Petkov #include <linux/types.h> 2921afaf18SBorislav Petkov #include <linux/slab.h> 3021afaf18SBorislav Petkov #include <linux/init.h> 3121afaf18SBorislav Petkov #include <linux/kmod.h> 3221afaf18SBorislav Petkov #include <linux/poll.h> 3321afaf18SBorislav Petkov #include <linux/nmi.h> 3421afaf18SBorislav Petkov #include <linux/cpu.h> 3521afaf18SBorislav Petkov #include <linux/ras.h> 3621afaf18SBorislav Petkov #include <linux/smp.h> 3721afaf18SBorislav Petkov #include <linux/fs.h> 3821afaf18SBorislav Petkov #include <linux/mm.h> 3921afaf18SBorislav Petkov #include <linux/debugfs.h> 4021afaf18SBorislav Petkov #include <linux/irq_work.h> 4121afaf18SBorislav Petkov #include <linux/export.h> 4221afaf18SBorislav Petkov #include <linux/jump_label.h> 4321afaf18SBorislav Petkov #include <linux/set_memory.h> 4421afaf18SBorislav Petkov 4521afaf18SBorislav Petkov #include <asm/intel-family.h> 4621afaf18SBorislav Petkov #include <asm/processor.h> 4721afaf18SBorislav Petkov #include <asm/traps.h> 4821afaf18SBorislav Petkov #include <asm/tlbflush.h> 4921afaf18SBorislav Petkov #include <asm/mce.h> 5021afaf18SBorislav Petkov #include <asm/msr.h> 5121afaf18SBorislav Petkov #include <asm/reboot.h> 5221afaf18SBorislav Petkov 5321afaf18SBorislav Petkov #include "internal.h" 5421afaf18SBorislav Petkov 5521afaf18SBorislav Petkov static DEFINE_MUTEX(mce_log_mutex); 5621afaf18SBorislav Petkov 5721afaf18SBorislav Petkov /* sysfs synchronization */ 5821afaf18SBorislav Petkov static DEFINE_MUTEX(mce_sysfs_mutex); 5921afaf18SBorislav Petkov 6021afaf18SBorislav Petkov #define CREATE_TRACE_POINTS 6121afaf18SBorislav Petkov #include <trace/events/mce.h> 6221afaf18SBorislav Petkov 6321afaf18SBorislav Petkov #define SPINUNIT 100 /* 100ns */ 6421afaf18SBorislav Petkov 6521afaf18SBorislav Petkov DEFINE_PER_CPU(unsigned, mce_exception_count); 6621afaf18SBorislav Petkov 6721afaf18SBorislav Petkov struct mce_bank *mce_banks __read_mostly; 6821afaf18SBorislav Petkov struct mce_vendor_flags mce_flags __read_mostly; 6921afaf18SBorislav Petkov 7021afaf18SBorislav Petkov struct mca_config mca_cfg __read_mostly = { 7121afaf18SBorislav Petkov .bootlog = -1, 7221afaf18SBorislav Petkov /* 7321afaf18SBorislav Petkov * Tolerant levels: 7421afaf18SBorislav Petkov * 0: always panic on uncorrected errors, log corrected errors 7521afaf18SBorislav Petkov * 1: panic or SIGBUS on uncorrected errors, log corrected errors 7621afaf18SBorislav Petkov * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors 7721afaf18SBorislav Petkov * 3: never panic or SIGBUS, log all errors (for testing only) 7821afaf18SBorislav Petkov */ 7921afaf18SBorislav Petkov .tolerant = 1, 8021afaf18SBorislav Petkov .monarch_timeout = -1 8121afaf18SBorislav Petkov }; 8221afaf18SBorislav Petkov 8321afaf18SBorislav Petkov static DEFINE_PER_CPU(struct mce, mces_seen); 8421afaf18SBorislav Petkov static unsigned long mce_need_notify; 8521afaf18SBorislav Petkov static int cpu_missing; 8621afaf18SBorislav Petkov 8721afaf18SBorislav Petkov /* 8821afaf18SBorislav Petkov * MCA banks polled by the period polling timer for corrected events. 8921afaf18SBorislav Petkov * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). 9021afaf18SBorislav Petkov */ 9121afaf18SBorislav Petkov DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { 9221afaf18SBorislav Petkov [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL 9321afaf18SBorislav Petkov }; 9421afaf18SBorislav Petkov 9521afaf18SBorislav Petkov /* 9621afaf18SBorislav Petkov * MCA banks controlled through firmware first for corrected errors. 9721afaf18SBorislav Petkov * This is a global list of banks for which we won't enable CMCI and we 9821afaf18SBorislav Petkov * won't poll. Firmware controls these banks and is responsible for 9921afaf18SBorislav Petkov * reporting corrected errors through GHES. Uncorrected/recoverable 10021afaf18SBorislav Petkov * errors are still notified through a machine check. 10121afaf18SBorislav Petkov */ 10221afaf18SBorislav Petkov mce_banks_t mce_banks_ce_disabled; 10321afaf18SBorislav Petkov 10421afaf18SBorislav Petkov static struct work_struct mce_work; 10521afaf18SBorislav Petkov static struct irq_work mce_irq_work; 10621afaf18SBorislav Petkov 10721afaf18SBorislav Petkov static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); 10821afaf18SBorislav Petkov 10921afaf18SBorislav Petkov /* 11021afaf18SBorislav Petkov * CPU/chipset specific EDAC code can register a notifier call here to print 11121afaf18SBorislav Petkov * MCE errors in a human-readable form. 11221afaf18SBorislav Petkov */ 11321afaf18SBorislav Petkov BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain); 11421afaf18SBorislav Petkov 11521afaf18SBorislav Petkov /* Do initial initialization of a struct mce */ 11621afaf18SBorislav Petkov void mce_setup(struct mce *m) 11721afaf18SBorislav Petkov { 11821afaf18SBorislav Petkov memset(m, 0, sizeof(struct mce)); 11921afaf18SBorislav Petkov m->cpu = m->extcpu = smp_processor_id(); 12021afaf18SBorislav Petkov /* need the internal __ version to avoid deadlocks */ 12121afaf18SBorislav Petkov m->time = __ktime_get_real_seconds(); 12221afaf18SBorislav Petkov m->cpuvendor = boot_cpu_data.x86_vendor; 12321afaf18SBorislav Petkov m->cpuid = cpuid_eax(1); 12421afaf18SBorislav Petkov m->socketid = cpu_data(m->extcpu).phys_proc_id; 12521afaf18SBorislav Petkov m->apicid = cpu_data(m->extcpu).initial_apicid; 12621afaf18SBorislav Petkov rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); 12721afaf18SBorislav Petkov 12821afaf18SBorislav Petkov if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) 12921afaf18SBorislav Petkov rdmsrl(MSR_PPIN, m->ppin); 13021afaf18SBorislav Petkov 13121afaf18SBorislav Petkov m->microcode = boot_cpu_data.microcode; 13221afaf18SBorislav Petkov } 13321afaf18SBorislav Petkov 13421afaf18SBorislav Petkov DEFINE_PER_CPU(struct mce, injectm); 13521afaf18SBorislav Petkov EXPORT_PER_CPU_SYMBOL_GPL(injectm); 13621afaf18SBorislav Petkov 13721afaf18SBorislav Petkov void mce_log(struct mce *m) 13821afaf18SBorislav Petkov { 13921afaf18SBorislav Petkov if (!mce_gen_pool_add(m)) 14021afaf18SBorislav Petkov irq_work_queue(&mce_irq_work); 14121afaf18SBorislav Petkov } 14221afaf18SBorislav Petkov 14321afaf18SBorislav Petkov void mce_inject_log(struct mce *m) 14421afaf18SBorislav Petkov { 14521afaf18SBorislav Petkov mutex_lock(&mce_log_mutex); 14621afaf18SBorislav Petkov mce_log(m); 14721afaf18SBorislav Petkov mutex_unlock(&mce_log_mutex); 14821afaf18SBorislav Petkov } 14921afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_inject_log); 15021afaf18SBorislav Petkov 15121afaf18SBorislav Petkov static struct notifier_block mce_srao_nb; 15221afaf18SBorislav Petkov 15321afaf18SBorislav Petkov /* 15421afaf18SBorislav Petkov * We run the default notifier if we have only the SRAO, the first and the 15521afaf18SBorislav Petkov * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS 15621afaf18SBorislav Petkov * notifiers registered on the chain. 15721afaf18SBorislav Petkov */ 15821afaf18SBorislav Petkov #define NUM_DEFAULT_NOTIFIERS 3 15921afaf18SBorislav Petkov static atomic_t num_notifiers; 16021afaf18SBorislav Petkov 16121afaf18SBorislav Petkov void mce_register_decode_chain(struct notifier_block *nb) 16221afaf18SBorislav Petkov { 16321afaf18SBorislav Petkov if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC)) 16421afaf18SBorislav Petkov return; 16521afaf18SBorislav Petkov 16621afaf18SBorislav Petkov atomic_inc(&num_notifiers); 16721afaf18SBorislav Petkov 16821afaf18SBorislav Petkov blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); 16921afaf18SBorislav Petkov } 17021afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_register_decode_chain); 17121afaf18SBorislav Petkov 17221afaf18SBorislav Petkov void mce_unregister_decode_chain(struct notifier_block *nb) 17321afaf18SBorislav Petkov { 17421afaf18SBorislav Petkov atomic_dec(&num_notifiers); 17521afaf18SBorislav Petkov 17621afaf18SBorislav Petkov blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb); 17721afaf18SBorislav Petkov } 17821afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); 17921afaf18SBorislav Petkov 18021afaf18SBorislav Petkov static inline u32 ctl_reg(int bank) 18121afaf18SBorislav Petkov { 18221afaf18SBorislav Petkov return MSR_IA32_MCx_CTL(bank); 18321afaf18SBorislav Petkov } 18421afaf18SBorislav Petkov 18521afaf18SBorislav Petkov static inline u32 status_reg(int bank) 18621afaf18SBorislav Petkov { 18721afaf18SBorislav Petkov return MSR_IA32_MCx_STATUS(bank); 18821afaf18SBorislav Petkov } 18921afaf18SBorislav Petkov 19021afaf18SBorislav Petkov static inline u32 addr_reg(int bank) 19121afaf18SBorislav Petkov { 19221afaf18SBorislav Petkov return MSR_IA32_MCx_ADDR(bank); 19321afaf18SBorislav Petkov } 19421afaf18SBorislav Petkov 19521afaf18SBorislav Petkov static inline u32 misc_reg(int bank) 19621afaf18SBorislav Petkov { 19721afaf18SBorislav Petkov return MSR_IA32_MCx_MISC(bank); 19821afaf18SBorislav Petkov } 19921afaf18SBorislav Petkov 20021afaf18SBorislav Petkov static inline u32 smca_ctl_reg(int bank) 20121afaf18SBorislav Petkov { 20221afaf18SBorislav Petkov return MSR_AMD64_SMCA_MCx_CTL(bank); 20321afaf18SBorislav Petkov } 20421afaf18SBorislav Petkov 20521afaf18SBorislav Petkov static inline u32 smca_status_reg(int bank) 20621afaf18SBorislav Petkov { 20721afaf18SBorislav Petkov return MSR_AMD64_SMCA_MCx_STATUS(bank); 20821afaf18SBorislav Petkov } 20921afaf18SBorislav Petkov 21021afaf18SBorislav Petkov static inline u32 smca_addr_reg(int bank) 21121afaf18SBorislav Petkov { 21221afaf18SBorislav Petkov return MSR_AMD64_SMCA_MCx_ADDR(bank); 21321afaf18SBorislav Petkov } 21421afaf18SBorislav Petkov 21521afaf18SBorislav Petkov static inline u32 smca_misc_reg(int bank) 21621afaf18SBorislav Petkov { 21721afaf18SBorislav Petkov return MSR_AMD64_SMCA_MCx_MISC(bank); 21821afaf18SBorislav Petkov } 21921afaf18SBorislav Petkov 22021afaf18SBorislav Petkov struct mca_msr_regs msr_ops = { 22121afaf18SBorislav Petkov .ctl = ctl_reg, 22221afaf18SBorislav Petkov .status = status_reg, 22321afaf18SBorislav Petkov .addr = addr_reg, 22421afaf18SBorislav Petkov .misc = misc_reg 22521afaf18SBorislav Petkov }; 22621afaf18SBorislav Petkov 22721afaf18SBorislav Petkov static void __print_mce(struct mce *m) 22821afaf18SBorislav Petkov { 22921afaf18SBorislav Petkov pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", 23021afaf18SBorislav Petkov m->extcpu, 23121afaf18SBorislav Petkov (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""), 23221afaf18SBorislav Petkov m->mcgstatus, m->bank, m->status); 23321afaf18SBorislav Petkov 23421afaf18SBorislav Petkov if (m->ip) { 23521afaf18SBorislav Petkov pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", 23621afaf18SBorislav Petkov !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 23721afaf18SBorislav Petkov m->cs, m->ip); 23821afaf18SBorislav Petkov 23921afaf18SBorislav Petkov if (m->cs == __KERNEL_CS) 24021afaf18SBorislav Petkov pr_cont("{%pS}", (void *)(unsigned long)m->ip); 24121afaf18SBorislav Petkov pr_cont("\n"); 24221afaf18SBorislav Petkov } 24321afaf18SBorislav Petkov 24421afaf18SBorislav Petkov pr_emerg(HW_ERR "TSC %llx ", m->tsc); 24521afaf18SBorislav Petkov if (m->addr) 24621afaf18SBorislav Petkov pr_cont("ADDR %llx ", m->addr); 24721afaf18SBorislav Petkov if (m->misc) 24821afaf18SBorislav Petkov pr_cont("MISC %llx ", m->misc); 24921afaf18SBorislav Petkov 25021afaf18SBorislav Petkov if (mce_flags.smca) { 25121afaf18SBorislav Petkov if (m->synd) 25221afaf18SBorislav Petkov pr_cont("SYND %llx ", m->synd); 25321afaf18SBorislav Petkov if (m->ipid) 25421afaf18SBorislav Petkov pr_cont("IPID %llx ", m->ipid); 25521afaf18SBorislav Petkov } 25621afaf18SBorislav Petkov 25721afaf18SBorislav Petkov pr_cont("\n"); 25821afaf18SBorislav Petkov /* 25921afaf18SBorislav Petkov * Note this output is parsed by external tools and old fields 26021afaf18SBorislav Petkov * should not be changed. 26121afaf18SBorislav Petkov */ 26221afaf18SBorislav Petkov pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", 26321afaf18SBorislav Petkov m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, 26421afaf18SBorislav Petkov m->microcode); 26521afaf18SBorislav Petkov } 26621afaf18SBorislav Petkov 26721afaf18SBorislav Petkov static void print_mce(struct mce *m) 26821afaf18SBorislav Petkov { 26921afaf18SBorislav Petkov __print_mce(m); 27021afaf18SBorislav Petkov 27121afaf18SBorislav Petkov if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON) 27221afaf18SBorislav Petkov pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); 27321afaf18SBorislav Petkov } 27421afaf18SBorislav Petkov 27521afaf18SBorislav Petkov #define PANIC_TIMEOUT 5 /* 5 seconds */ 27621afaf18SBorislav Petkov 27721afaf18SBorislav Petkov static atomic_t mce_panicked; 27821afaf18SBorislav Petkov 27921afaf18SBorislav Petkov static int fake_panic; 28021afaf18SBorislav Petkov static atomic_t mce_fake_panicked; 28121afaf18SBorislav Petkov 28221afaf18SBorislav Petkov /* Panic in progress. Enable interrupts and wait for final IPI */ 28321afaf18SBorislav Petkov static void wait_for_panic(void) 28421afaf18SBorislav Petkov { 28521afaf18SBorislav Petkov long timeout = PANIC_TIMEOUT*USEC_PER_SEC; 28621afaf18SBorislav Petkov 28721afaf18SBorislav Petkov preempt_disable(); 28821afaf18SBorislav Petkov local_irq_enable(); 28921afaf18SBorislav Petkov while (timeout-- > 0) 29021afaf18SBorislav Petkov udelay(1); 29121afaf18SBorislav Petkov if (panic_timeout == 0) 29221afaf18SBorislav Petkov panic_timeout = mca_cfg.panic_timeout; 29321afaf18SBorislav Petkov panic("Panicing machine check CPU died"); 29421afaf18SBorislav Petkov } 29521afaf18SBorislav Petkov 29621afaf18SBorislav Petkov static void mce_panic(const char *msg, struct mce *final, char *exp) 29721afaf18SBorislav Petkov { 29821afaf18SBorislav Petkov int apei_err = 0; 29921afaf18SBorislav Petkov struct llist_node *pending; 30021afaf18SBorislav Petkov struct mce_evt_llist *l; 30121afaf18SBorislav Petkov 30221afaf18SBorislav Petkov if (!fake_panic) { 30321afaf18SBorislav Petkov /* 30421afaf18SBorislav Petkov * Make sure only one CPU runs in machine check panic 30521afaf18SBorislav Petkov */ 30621afaf18SBorislav Petkov if (atomic_inc_return(&mce_panicked) > 1) 30721afaf18SBorislav Petkov wait_for_panic(); 30821afaf18SBorislav Petkov barrier(); 30921afaf18SBorislav Petkov 31021afaf18SBorislav Petkov bust_spinlocks(1); 31121afaf18SBorislav Petkov console_verbose(); 31221afaf18SBorislav Petkov } else { 31321afaf18SBorislav Petkov /* Don't log too much for fake panic */ 31421afaf18SBorislav Petkov if (atomic_inc_return(&mce_fake_panicked) > 1) 31521afaf18SBorislav Petkov return; 31621afaf18SBorislav Petkov } 31721afaf18SBorislav Petkov pending = mce_gen_pool_prepare_records(); 31821afaf18SBorislav Petkov /* First print corrected ones that are still unlogged */ 31921afaf18SBorislav Petkov llist_for_each_entry(l, pending, llnode) { 32021afaf18SBorislav Petkov struct mce *m = &l->mce; 32121afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_UC)) { 32221afaf18SBorislav Petkov print_mce(m); 32321afaf18SBorislav Petkov if (!apei_err) 32421afaf18SBorislav Petkov apei_err = apei_write_mce(m); 32521afaf18SBorislav Petkov } 32621afaf18SBorislav Petkov } 32721afaf18SBorislav Petkov /* Now print uncorrected but with the final one last */ 32821afaf18SBorislav Petkov llist_for_each_entry(l, pending, llnode) { 32921afaf18SBorislav Petkov struct mce *m = &l->mce; 33021afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_UC)) 33121afaf18SBorislav Petkov continue; 33221afaf18SBorislav Petkov if (!final || mce_cmp(m, final)) { 33321afaf18SBorislav Petkov print_mce(m); 33421afaf18SBorislav Petkov if (!apei_err) 33521afaf18SBorislav Petkov apei_err = apei_write_mce(m); 33621afaf18SBorislav Petkov } 33721afaf18SBorislav Petkov } 33821afaf18SBorislav Petkov if (final) { 33921afaf18SBorislav Petkov print_mce(final); 34021afaf18SBorislav Petkov if (!apei_err) 34121afaf18SBorislav Petkov apei_err = apei_write_mce(final); 34221afaf18SBorislav Petkov } 34321afaf18SBorislav Petkov if (cpu_missing) 34421afaf18SBorislav Petkov pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n"); 34521afaf18SBorislav Petkov if (exp) 34621afaf18SBorislav Petkov pr_emerg(HW_ERR "Machine check: %s\n", exp); 34721afaf18SBorislav Petkov if (!fake_panic) { 34821afaf18SBorislav Petkov if (panic_timeout == 0) 34921afaf18SBorislav Petkov panic_timeout = mca_cfg.panic_timeout; 35021afaf18SBorislav Petkov panic(msg); 35121afaf18SBorislav Petkov } else 35221afaf18SBorislav Petkov pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); 35321afaf18SBorislav Petkov } 35421afaf18SBorislav Petkov 35521afaf18SBorislav Petkov /* Support code for software error injection */ 35621afaf18SBorislav Petkov 35721afaf18SBorislav Petkov static int msr_to_offset(u32 msr) 35821afaf18SBorislav Petkov { 35921afaf18SBorislav Petkov unsigned bank = __this_cpu_read(injectm.bank); 36021afaf18SBorislav Petkov 36121afaf18SBorislav Petkov if (msr == mca_cfg.rip_msr) 36221afaf18SBorislav Petkov return offsetof(struct mce, ip); 36321afaf18SBorislav Petkov if (msr == msr_ops.status(bank)) 36421afaf18SBorislav Petkov return offsetof(struct mce, status); 36521afaf18SBorislav Petkov if (msr == msr_ops.addr(bank)) 36621afaf18SBorislav Petkov return offsetof(struct mce, addr); 36721afaf18SBorislav Petkov if (msr == msr_ops.misc(bank)) 36821afaf18SBorislav Petkov return offsetof(struct mce, misc); 36921afaf18SBorislav Petkov if (msr == MSR_IA32_MCG_STATUS) 37021afaf18SBorislav Petkov return offsetof(struct mce, mcgstatus); 37121afaf18SBorislav Petkov return -1; 37221afaf18SBorislav Petkov } 37321afaf18SBorislav Petkov 37421afaf18SBorislav Petkov /* MSR access wrappers used for error injection */ 37521afaf18SBorislav Petkov static u64 mce_rdmsrl(u32 msr) 37621afaf18SBorislav Petkov { 37721afaf18SBorislav Petkov u64 v; 37821afaf18SBorislav Petkov 37921afaf18SBorislav Petkov if (__this_cpu_read(injectm.finished)) { 38021afaf18SBorislav Petkov int offset = msr_to_offset(msr); 38121afaf18SBorislav Petkov 38221afaf18SBorislav Petkov if (offset < 0) 38321afaf18SBorislav Petkov return 0; 38421afaf18SBorislav Petkov return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); 38521afaf18SBorislav Petkov } 38621afaf18SBorislav Petkov 38721afaf18SBorislav Petkov if (rdmsrl_safe(msr, &v)) { 38821afaf18SBorislav Petkov WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr); 38921afaf18SBorislav Petkov /* 39021afaf18SBorislav Petkov * Return zero in case the access faulted. This should 39121afaf18SBorislav Petkov * not happen normally but can happen if the CPU does 39221afaf18SBorislav Petkov * something weird, or if the code is buggy. 39321afaf18SBorislav Petkov */ 39421afaf18SBorislav Petkov v = 0; 39521afaf18SBorislav Petkov } 39621afaf18SBorislav Petkov 39721afaf18SBorislav Petkov return v; 39821afaf18SBorislav Petkov } 39921afaf18SBorislav Petkov 40021afaf18SBorislav Petkov static void mce_wrmsrl(u32 msr, u64 v) 40121afaf18SBorislav Petkov { 40221afaf18SBorislav Petkov if (__this_cpu_read(injectm.finished)) { 40321afaf18SBorislav Petkov int offset = msr_to_offset(msr); 40421afaf18SBorislav Petkov 40521afaf18SBorislav Petkov if (offset >= 0) 40621afaf18SBorislav Petkov *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; 40721afaf18SBorislav Petkov return; 40821afaf18SBorislav Petkov } 40921afaf18SBorislav Petkov wrmsrl(msr, v); 41021afaf18SBorislav Petkov } 41121afaf18SBorislav Petkov 41221afaf18SBorislav Petkov /* 41321afaf18SBorislav Petkov * Collect all global (w.r.t. this processor) status about this machine 41421afaf18SBorislav Petkov * check into our "mce" struct so that we can use it later to assess 41521afaf18SBorislav Petkov * the severity of the problem as we read per-bank specific details. 41621afaf18SBorislav Petkov */ 41721afaf18SBorislav Petkov static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) 41821afaf18SBorislav Petkov { 41921afaf18SBorislav Petkov mce_setup(m); 42021afaf18SBorislav Petkov 42121afaf18SBorislav Petkov m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); 42221afaf18SBorislav Petkov if (regs) { 42321afaf18SBorislav Petkov /* 42421afaf18SBorislav Petkov * Get the address of the instruction at the time of 42521afaf18SBorislav Petkov * the machine check error. 42621afaf18SBorislav Petkov */ 42721afaf18SBorislav Petkov if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { 42821afaf18SBorislav Petkov m->ip = regs->ip; 42921afaf18SBorislav Petkov m->cs = regs->cs; 43021afaf18SBorislav Petkov 43121afaf18SBorislav Petkov /* 43221afaf18SBorislav Petkov * When in VM86 mode make the cs look like ring 3 43321afaf18SBorislav Petkov * always. This is a lie, but it's better than passing 43421afaf18SBorislav Petkov * the additional vm86 bit around everywhere. 43521afaf18SBorislav Petkov */ 43621afaf18SBorislav Petkov if (v8086_mode(regs)) 43721afaf18SBorislav Petkov m->cs |= 3; 43821afaf18SBorislav Petkov } 43921afaf18SBorislav Petkov /* Use accurate RIP reporting if available. */ 44021afaf18SBorislav Petkov if (mca_cfg.rip_msr) 44121afaf18SBorislav Petkov m->ip = mce_rdmsrl(mca_cfg.rip_msr); 44221afaf18SBorislav Petkov } 44321afaf18SBorislav Petkov } 44421afaf18SBorislav Petkov 44521afaf18SBorislav Petkov int mce_available(struct cpuinfo_x86 *c) 44621afaf18SBorislav Petkov { 44721afaf18SBorislav Petkov if (mca_cfg.disabled) 44821afaf18SBorislav Petkov return 0; 44921afaf18SBorislav Petkov return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); 45021afaf18SBorislav Petkov } 45121afaf18SBorislav Petkov 45221afaf18SBorislav Petkov static void mce_schedule_work(void) 45321afaf18SBorislav Petkov { 45421afaf18SBorislav Petkov if (!mce_gen_pool_empty()) 45521afaf18SBorislav Petkov schedule_work(&mce_work); 45621afaf18SBorislav Petkov } 45721afaf18SBorislav Petkov 45821afaf18SBorislav Petkov static void mce_irq_work_cb(struct irq_work *entry) 45921afaf18SBorislav Petkov { 46021afaf18SBorislav Petkov mce_schedule_work(); 46121afaf18SBorislav Petkov } 46221afaf18SBorislav Petkov 46321afaf18SBorislav Petkov static void mce_report_event(struct pt_regs *regs) 46421afaf18SBorislav Petkov { 46521afaf18SBorislav Petkov if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { 46621afaf18SBorislav Petkov mce_notify_irq(); 46721afaf18SBorislav Petkov /* 46821afaf18SBorislav Petkov * Triggering the work queue here is just an insurance 46921afaf18SBorislav Petkov * policy in case the syscall exit notify handler 47021afaf18SBorislav Petkov * doesn't run soon enough or ends up running on the 47121afaf18SBorislav Petkov * wrong CPU (can happen when audit sleeps) 47221afaf18SBorislav Petkov */ 47321afaf18SBorislav Petkov mce_schedule_work(); 47421afaf18SBorislav Petkov return; 47521afaf18SBorislav Petkov } 47621afaf18SBorislav Petkov 47721afaf18SBorislav Petkov irq_work_queue(&mce_irq_work); 47821afaf18SBorislav Petkov } 47921afaf18SBorislav Petkov 48021afaf18SBorislav Petkov /* 48121afaf18SBorislav Petkov * Check if the address reported by the CPU is in a format we can parse. 48221afaf18SBorislav Petkov * It would be possible to add code for most other cases, but all would 48321afaf18SBorislav Petkov * be somewhat complicated (e.g. segment offset would require an instruction 48421afaf18SBorislav Petkov * parser). So only support physical addresses up to page granuality for now. 48521afaf18SBorislav Petkov */ 48621afaf18SBorislav Petkov int mce_usable_address(struct mce *m) 48721afaf18SBorislav Petkov { 48821afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_ADDRV)) 48921afaf18SBorislav Petkov return 0; 49021afaf18SBorislav Petkov 49121afaf18SBorislav Petkov /* Checks after this one are Intel-specific: */ 49221afaf18SBorislav Petkov if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 49321afaf18SBorislav Petkov return 1; 49421afaf18SBorislav Petkov 49521afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_MISCV)) 49621afaf18SBorislav Petkov return 0; 49721afaf18SBorislav Petkov 49821afaf18SBorislav Petkov if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) 49921afaf18SBorislav Petkov return 0; 50021afaf18SBorislav Petkov 50121afaf18SBorislav Petkov if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) 50221afaf18SBorislav Petkov return 0; 50321afaf18SBorislav Petkov 50421afaf18SBorislav Petkov return 1; 50521afaf18SBorislav Petkov } 50621afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_usable_address); 50721afaf18SBorislav Petkov 50821afaf18SBorislav Petkov bool mce_is_memory_error(struct mce *m) 50921afaf18SBorislav Petkov { 51021afaf18SBorislav Petkov if (m->cpuvendor == X86_VENDOR_AMD || 51121afaf18SBorislav Petkov m->cpuvendor == X86_VENDOR_HYGON) { 51221afaf18SBorislav Petkov return amd_mce_is_memory_error(m); 51321afaf18SBorislav Petkov } else if (m->cpuvendor == X86_VENDOR_INTEL) { 51421afaf18SBorislav Petkov /* 51521afaf18SBorislav Petkov * Intel SDM Volume 3B - 15.9.2 Compound Error Codes 51621afaf18SBorislav Petkov * 51721afaf18SBorislav Petkov * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for 51821afaf18SBorislav Petkov * indicating a memory error. Bit 8 is used for indicating a 51921afaf18SBorislav Petkov * cache hierarchy error. The combination of bit 2 and bit 3 52021afaf18SBorislav Petkov * is used for indicating a `generic' cache hierarchy error 52121afaf18SBorislav Petkov * But we can't just blindly check the above bits, because if 52221afaf18SBorislav Petkov * bit 11 is set, then it is a bus/interconnect error - and 52321afaf18SBorislav Petkov * either way the above bits just gives more detail on what 52421afaf18SBorislav Petkov * bus/interconnect error happened. Note that bit 12 can be 52521afaf18SBorislav Petkov * ignored, as it's the "filter" bit. 52621afaf18SBorislav Petkov */ 52721afaf18SBorislav Petkov return (m->status & 0xef80) == BIT(7) || 52821afaf18SBorislav Petkov (m->status & 0xef00) == BIT(8) || 52921afaf18SBorislav Petkov (m->status & 0xeffc) == 0xc; 53021afaf18SBorislav Petkov } 53121afaf18SBorislav Petkov 53221afaf18SBorislav Petkov return false; 53321afaf18SBorislav Petkov } 53421afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_is_memory_error); 53521afaf18SBorislav Petkov 53621afaf18SBorislav Petkov bool mce_is_correctable(struct mce *m) 53721afaf18SBorislav Petkov { 53821afaf18SBorislav Petkov if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) 53921afaf18SBorislav Petkov return false; 54021afaf18SBorislav Petkov 54121afaf18SBorislav Petkov if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) 54221afaf18SBorislav Petkov return false; 54321afaf18SBorislav Petkov 54421afaf18SBorislav Petkov if (m->status & MCI_STATUS_UC) 54521afaf18SBorislav Petkov return false; 54621afaf18SBorislav Petkov 54721afaf18SBorislav Petkov return true; 54821afaf18SBorislav Petkov } 54921afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_is_correctable); 55021afaf18SBorislav Petkov 55121afaf18SBorislav Petkov static bool cec_add_mce(struct mce *m) 55221afaf18SBorislav Petkov { 55321afaf18SBorislav Petkov if (!m) 55421afaf18SBorislav Petkov return false; 55521afaf18SBorislav Petkov 55621afaf18SBorislav Petkov /* We eat only correctable DRAM errors with usable addresses. */ 55721afaf18SBorislav Petkov if (mce_is_memory_error(m) && 55821afaf18SBorislav Petkov mce_is_correctable(m) && 55921afaf18SBorislav Petkov mce_usable_address(m)) 56021afaf18SBorislav Petkov if (!cec_add_elem(m->addr >> PAGE_SHIFT)) 56121afaf18SBorislav Petkov return true; 56221afaf18SBorislav Petkov 56321afaf18SBorislav Petkov return false; 56421afaf18SBorislav Petkov } 56521afaf18SBorislav Petkov 56621afaf18SBorislav Petkov static int mce_first_notifier(struct notifier_block *nb, unsigned long val, 56721afaf18SBorislav Petkov void *data) 56821afaf18SBorislav Petkov { 56921afaf18SBorislav Petkov struct mce *m = (struct mce *)data; 57021afaf18SBorislav Petkov 57121afaf18SBorislav Petkov if (!m) 57221afaf18SBorislav Petkov return NOTIFY_DONE; 57321afaf18SBorislav Petkov 57421afaf18SBorislav Petkov if (cec_add_mce(m)) 57521afaf18SBorislav Petkov return NOTIFY_STOP; 57621afaf18SBorislav Petkov 57721afaf18SBorislav Petkov /* Emit the trace record: */ 57821afaf18SBorislav Petkov trace_mce_record(m); 57921afaf18SBorislav Petkov 58021afaf18SBorislav Petkov set_bit(0, &mce_need_notify); 58121afaf18SBorislav Petkov 58221afaf18SBorislav Petkov mce_notify_irq(); 58321afaf18SBorislav Petkov 58421afaf18SBorislav Petkov return NOTIFY_DONE; 58521afaf18SBorislav Petkov } 58621afaf18SBorislav Petkov 58721afaf18SBorislav Petkov static struct notifier_block first_nb = { 58821afaf18SBorislav Petkov .notifier_call = mce_first_notifier, 58921afaf18SBorislav Petkov .priority = MCE_PRIO_FIRST, 59021afaf18SBorislav Petkov }; 59121afaf18SBorislav Petkov 59221afaf18SBorislav Petkov static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, 59321afaf18SBorislav Petkov void *data) 59421afaf18SBorislav Petkov { 59521afaf18SBorislav Petkov struct mce *mce = (struct mce *)data; 59621afaf18SBorislav Petkov unsigned long pfn; 59721afaf18SBorislav Petkov 59821afaf18SBorislav Petkov if (!mce) 59921afaf18SBorislav Petkov return NOTIFY_DONE; 60021afaf18SBorislav Petkov 60121afaf18SBorislav Petkov if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { 60221afaf18SBorislav Petkov pfn = mce->addr >> PAGE_SHIFT; 60321afaf18SBorislav Petkov if (!memory_failure(pfn, 0)) 60421afaf18SBorislav Petkov set_mce_nospec(pfn); 60521afaf18SBorislav Petkov } 60621afaf18SBorislav Petkov 60721afaf18SBorislav Petkov return NOTIFY_OK; 60821afaf18SBorislav Petkov } 60921afaf18SBorislav Petkov static struct notifier_block mce_srao_nb = { 61021afaf18SBorislav Petkov .notifier_call = srao_decode_notifier, 61121afaf18SBorislav Petkov .priority = MCE_PRIO_SRAO, 61221afaf18SBorislav Petkov }; 61321afaf18SBorislav Petkov 61421afaf18SBorislav Petkov static int mce_default_notifier(struct notifier_block *nb, unsigned long val, 61521afaf18SBorislav Petkov void *data) 61621afaf18SBorislav Petkov { 61721afaf18SBorislav Petkov struct mce *m = (struct mce *)data; 61821afaf18SBorislav Petkov 61921afaf18SBorislav Petkov if (!m) 62021afaf18SBorislav Petkov return NOTIFY_DONE; 62121afaf18SBorislav Petkov 62221afaf18SBorislav Petkov if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS) 62321afaf18SBorislav Petkov return NOTIFY_DONE; 62421afaf18SBorislav Petkov 62521afaf18SBorislav Petkov __print_mce(m); 62621afaf18SBorislav Petkov 62721afaf18SBorislav Petkov return NOTIFY_DONE; 62821afaf18SBorislav Petkov } 62921afaf18SBorislav Petkov 63021afaf18SBorislav Petkov static struct notifier_block mce_default_nb = { 63121afaf18SBorislav Petkov .notifier_call = mce_default_notifier, 63221afaf18SBorislav Petkov /* lowest prio, we want it to run last. */ 63321afaf18SBorislav Petkov .priority = MCE_PRIO_LOWEST, 63421afaf18SBorislav Petkov }; 63521afaf18SBorislav Petkov 63621afaf18SBorislav Petkov /* 63721afaf18SBorislav Petkov * Read ADDR and MISC registers. 63821afaf18SBorislav Petkov */ 63921afaf18SBorislav Petkov static void mce_read_aux(struct mce *m, int i) 64021afaf18SBorislav Petkov { 64121afaf18SBorislav Petkov if (m->status & MCI_STATUS_MISCV) 64221afaf18SBorislav Petkov m->misc = mce_rdmsrl(msr_ops.misc(i)); 64321afaf18SBorislav Petkov 64421afaf18SBorislav Petkov if (m->status & MCI_STATUS_ADDRV) { 64521afaf18SBorislav Petkov m->addr = mce_rdmsrl(msr_ops.addr(i)); 64621afaf18SBorislav Petkov 64721afaf18SBorislav Petkov /* 64821afaf18SBorislav Petkov * Mask the reported address by the reported granularity. 64921afaf18SBorislav Petkov */ 65021afaf18SBorislav Petkov if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { 65121afaf18SBorislav Petkov u8 shift = MCI_MISC_ADDR_LSB(m->misc); 65221afaf18SBorislav Petkov m->addr >>= shift; 65321afaf18SBorislav Petkov m->addr <<= shift; 65421afaf18SBorislav Petkov } 65521afaf18SBorislav Petkov 65621afaf18SBorislav Petkov /* 65721afaf18SBorislav Petkov * Extract [55:<lsb>] where lsb is the least significant 65821afaf18SBorislav Petkov * *valid* bit of the address bits. 65921afaf18SBorislav Petkov */ 66021afaf18SBorislav Petkov if (mce_flags.smca) { 66121afaf18SBorislav Petkov u8 lsb = (m->addr >> 56) & 0x3f; 66221afaf18SBorislav Petkov 66321afaf18SBorislav Petkov m->addr &= GENMASK_ULL(55, lsb); 66421afaf18SBorislav Petkov } 66521afaf18SBorislav Petkov } 66621afaf18SBorislav Petkov 66721afaf18SBorislav Petkov if (mce_flags.smca) { 66821afaf18SBorislav Petkov m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i)); 66921afaf18SBorislav Petkov 67021afaf18SBorislav Petkov if (m->status & MCI_STATUS_SYNDV) 67121afaf18SBorislav Petkov m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i)); 67221afaf18SBorislav Petkov } 67321afaf18SBorislav Petkov } 67421afaf18SBorislav Petkov 67521afaf18SBorislav Petkov DEFINE_PER_CPU(unsigned, mce_poll_count); 67621afaf18SBorislav Petkov 67721afaf18SBorislav Petkov /* 67821afaf18SBorislav Petkov * Poll for corrected events or events that happened before reset. 67921afaf18SBorislav Petkov * Those are just logged through /dev/mcelog. 68021afaf18SBorislav Petkov * 68121afaf18SBorislav Petkov * This is executed in standard interrupt context. 68221afaf18SBorislav Petkov * 68321afaf18SBorislav Petkov * Note: spec recommends to panic for fatal unsignalled 68421afaf18SBorislav Petkov * errors here. However this would be quite problematic -- 68521afaf18SBorislav Petkov * we would need to reimplement the Monarch handling and 68621afaf18SBorislav Petkov * it would mess up the exclusion between exception handler 687312a4661SLinus Torvalds * and poll handler -- * so we skip this for now. 68821afaf18SBorislav Petkov * These cases should not happen anyways, or only when the CPU 68921afaf18SBorislav Petkov * is already totally * confused. In this case it's likely it will 69021afaf18SBorislav Petkov * not fully execute the machine check handler either. 69121afaf18SBorislav Petkov */ 69221afaf18SBorislav Petkov bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) 69321afaf18SBorislav Petkov { 69421afaf18SBorislav Petkov bool error_seen = false; 69521afaf18SBorislav Petkov struct mce m; 69621afaf18SBorislav Petkov int i; 69721afaf18SBorislav Petkov 69821afaf18SBorislav Petkov this_cpu_inc(mce_poll_count); 69921afaf18SBorislav Petkov 70021afaf18SBorislav Petkov mce_gather_info(&m, NULL); 70121afaf18SBorislav Petkov 70221afaf18SBorislav Petkov if (flags & MCP_TIMESTAMP) 70321afaf18SBorislav Petkov m.tsc = rdtsc(); 70421afaf18SBorislav Petkov 70521afaf18SBorislav Petkov for (i = 0; i < mca_cfg.banks; i++) { 70621afaf18SBorislav Petkov if (!mce_banks[i].ctl || !test_bit(i, *b)) 70721afaf18SBorislav Petkov continue; 70821afaf18SBorislav Petkov 70921afaf18SBorislav Petkov m.misc = 0; 71021afaf18SBorislav Petkov m.addr = 0; 71121afaf18SBorislav Petkov m.bank = i; 71221afaf18SBorislav Petkov 71321afaf18SBorislav Petkov barrier(); 71421afaf18SBorislav Petkov m.status = mce_rdmsrl(msr_ops.status(i)); 71521afaf18SBorislav Petkov if (!(m.status & MCI_STATUS_VAL)) 71621afaf18SBorislav Petkov continue; 71721afaf18SBorislav Petkov 71821afaf18SBorislav Petkov /* 71921afaf18SBorislav Petkov * Uncorrected or signalled events are handled by the exception 72021afaf18SBorislav Petkov * handler when it is enabled, so don't process those here. 72121afaf18SBorislav Petkov * 72221afaf18SBorislav Petkov * TBD do the same check for MCI_STATUS_EN here? 72321afaf18SBorislav Petkov */ 72421afaf18SBorislav Petkov if (!(flags & MCP_UC) && 72521afaf18SBorislav Petkov (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC))) 72621afaf18SBorislav Petkov continue; 72721afaf18SBorislav Petkov 72821afaf18SBorislav Petkov error_seen = true; 72921afaf18SBorislav Petkov 73021afaf18SBorislav Petkov mce_read_aux(&m, i); 73121afaf18SBorislav Petkov 73221afaf18SBorislav Petkov m.severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); 73321afaf18SBorislav Petkov 73421afaf18SBorislav Petkov /* 73521afaf18SBorislav Petkov * Don't get the IP here because it's unlikely to 73621afaf18SBorislav Petkov * have anything to do with the actual error location. 73721afaf18SBorislav Petkov */ 73821afaf18SBorislav Petkov if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) 73921afaf18SBorislav Petkov mce_log(&m); 74021afaf18SBorislav Petkov else if (mce_usable_address(&m)) { 74121afaf18SBorislav Petkov /* 74221afaf18SBorislav Petkov * Although we skipped logging this, we still want 74321afaf18SBorislav Petkov * to take action. Add to the pool so the registered 74421afaf18SBorislav Petkov * notifiers will see it. 74521afaf18SBorislav Petkov */ 74621afaf18SBorislav Petkov if (!mce_gen_pool_add(&m)) 74721afaf18SBorislav Petkov mce_schedule_work(); 74821afaf18SBorislav Petkov } 74921afaf18SBorislav Petkov 75021afaf18SBorislav Petkov /* 75121afaf18SBorislav Petkov * Clear state for this bank. 75221afaf18SBorislav Petkov */ 75321afaf18SBorislav Petkov mce_wrmsrl(msr_ops.status(i), 0); 75421afaf18SBorislav Petkov } 75521afaf18SBorislav Petkov 75621afaf18SBorislav Petkov /* 75721afaf18SBorislav Petkov * Don't clear MCG_STATUS here because it's only defined for 75821afaf18SBorislav Petkov * exceptions. 75921afaf18SBorislav Petkov */ 76021afaf18SBorislav Petkov 76121afaf18SBorislav Petkov sync_core(); 76221afaf18SBorislav Petkov 76321afaf18SBorislav Petkov return error_seen; 76421afaf18SBorislav Petkov } 76521afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(machine_check_poll); 76621afaf18SBorislav Petkov 76721afaf18SBorislav Petkov /* 76821afaf18SBorislav Petkov * Do a quick check if any of the events requires a panic. 76921afaf18SBorislav Petkov * This decides if we keep the events around or clear them. 77021afaf18SBorislav Petkov */ 77121afaf18SBorislav Petkov static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, 77221afaf18SBorislav Petkov struct pt_regs *regs) 77321afaf18SBorislav Petkov { 77421afaf18SBorislav Petkov char *tmp; 77521afaf18SBorislav Petkov int i; 77621afaf18SBorislav Petkov 77721afaf18SBorislav Petkov for (i = 0; i < mca_cfg.banks; i++) { 77821afaf18SBorislav Petkov m->status = mce_rdmsrl(msr_ops.status(i)); 77921afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_VAL)) 78021afaf18SBorislav Petkov continue; 78121afaf18SBorislav Petkov 78221afaf18SBorislav Petkov __set_bit(i, validp); 78321afaf18SBorislav Petkov if (quirk_no_way_out) 78421afaf18SBorislav Petkov quirk_no_way_out(i, m, regs); 78521afaf18SBorislav Petkov 78621afaf18SBorislav Petkov if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 78721afaf18SBorislav Petkov mce_read_aux(m, i); 78821afaf18SBorislav Petkov *msg = tmp; 78921afaf18SBorislav Petkov return 1; 79021afaf18SBorislav Petkov } 79121afaf18SBorislav Petkov } 79221afaf18SBorislav Petkov return 0; 79321afaf18SBorislav Petkov } 79421afaf18SBorislav Petkov 79521afaf18SBorislav Petkov /* 79621afaf18SBorislav Petkov * Variable to establish order between CPUs while scanning. 79721afaf18SBorislav Petkov * Each CPU spins initially until executing is equal its number. 79821afaf18SBorislav Petkov */ 79921afaf18SBorislav Petkov static atomic_t mce_executing; 80021afaf18SBorislav Petkov 80121afaf18SBorislav Petkov /* 80221afaf18SBorislav Petkov * Defines order of CPUs on entry. First CPU becomes Monarch. 80321afaf18SBorislav Petkov */ 80421afaf18SBorislav Petkov static atomic_t mce_callin; 80521afaf18SBorislav Petkov 80621afaf18SBorislav Petkov /* 80721afaf18SBorislav Petkov * Check if a timeout waiting for other CPUs happened. 80821afaf18SBorislav Petkov */ 80921afaf18SBorislav Petkov static int mce_timed_out(u64 *t, const char *msg) 81021afaf18SBorislav Petkov { 81121afaf18SBorislav Petkov /* 81221afaf18SBorislav Petkov * The others already did panic for some reason. 81321afaf18SBorislav Petkov * Bail out like in a timeout. 81421afaf18SBorislav Petkov * rmb() to tell the compiler that system_state 81521afaf18SBorislav Petkov * might have been modified by someone else. 81621afaf18SBorislav Petkov */ 81721afaf18SBorislav Petkov rmb(); 81821afaf18SBorislav Petkov if (atomic_read(&mce_panicked)) 81921afaf18SBorislav Petkov wait_for_panic(); 82021afaf18SBorislav Petkov if (!mca_cfg.monarch_timeout) 82121afaf18SBorislav Petkov goto out; 82221afaf18SBorislav Petkov if ((s64)*t < SPINUNIT) { 82321afaf18SBorislav Petkov if (mca_cfg.tolerant <= 1) 82421afaf18SBorislav Petkov mce_panic(msg, NULL, NULL); 82521afaf18SBorislav Petkov cpu_missing = 1; 82621afaf18SBorislav Petkov return 1; 82721afaf18SBorislav Petkov } 82821afaf18SBorislav Petkov *t -= SPINUNIT; 82921afaf18SBorislav Petkov out: 83021afaf18SBorislav Petkov touch_nmi_watchdog(); 83121afaf18SBorislav Petkov return 0; 83221afaf18SBorislav Petkov } 83321afaf18SBorislav Petkov 83421afaf18SBorislav Petkov /* 83521afaf18SBorislav Petkov * The Monarch's reign. The Monarch is the CPU who entered 83621afaf18SBorislav Petkov * the machine check handler first. It waits for the others to 83721afaf18SBorislav Petkov * raise the exception too and then grades them. When any 83821afaf18SBorislav Petkov * error is fatal panic. Only then let the others continue. 83921afaf18SBorislav Petkov * 84021afaf18SBorislav Petkov * The other CPUs entering the MCE handler will be controlled by the 84121afaf18SBorislav Petkov * Monarch. They are called Subjects. 84221afaf18SBorislav Petkov * 84321afaf18SBorislav Petkov * This way we prevent any potential data corruption in a unrecoverable case 84421afaf18SBorislav Petkov * and also makes sure always all CPU's errors are examined. 84521afaf18SBorislav Petkov * 84621afaf18SBorislav Petkov * Also this detects the case of a machine check event coming from outer 84721afaf18SBorislav Petkov * space (not detected by any CPUs) In this case some external agent wants 84821afaf18SBorislav Petkov * us to shut down, so panic too. 84921afaf18SBorislav Petkov * 85021afaf18SBorislav Petkov * The other CPUs might still decide to panic if the handler happens 85121afaf18SBorislav Petkov * in a unrecoverable place, but in this case the system is in a semi-stable 85221afaf18SBorislav Petkov * state and won't corrupt anything by itself. It's ok to let the others 85321afaf18SBorislav Petkov * continue for a bit first. 85421afaf18SBorislav Petkov * 85521afaf18SBorislav Petkov * All the spin loops have timeouts; when a timeout happens a CPU 85621afaf18SBorislav Petkov * typically elects itself to be Monarch. 85721afaf18SBorislav Petkov */ 85821afaf18SBorislav Petkov static void mce_reign(void) 85921afaf18SBorislav Petkov { 86021afaf18SBorislav Petkov int cpu; 86121afaf18SBorislav Petkov struct mce *m = NULL; 86221afaf18SBorislav Petkov int global_worst = 0; 86321afaf18SBorislav Petkov char *msg = NULL; 86421afaf18SBorislav Petkov char *nmsg = NULL; 86521afaf18SBorislav Petkov 86621afaf18SBorislav Petkov /* 86721afaf18SBorislav Petkov * This CPU is the Monarch and the other CPUs have run 86821afaf18SBorislav Petkov * through their handlers. 86921afaf18SBorislav Petkov * Grade the severity of the errors of all the CPUs. 87021afaf18SBorislav Petkov */ 87121afaf18SBorislav Petkov for_each_possible_cpu(cpu) { 87221afaf18SBorislav Petkov int severity = mce_severity(&per_cpu(mces_seen, cpu), 87321afaf18SBorislav Petkov mca_cfg.tolerant, 87421afaf18SBorislav Petkov &nmsg, true); 87521afaf18SBorislav Petkov if (severity > global_worst) { 87621afaf18SBorislav Petkov msg = nmsg; 87721afaf18SBorislav Petkov global_worst = severity; 87821afaf18SBorislav Petkov m = &per_cpu(mces_seen, cpu); 87921afaf18SBorislav Petkov } 88021afaf18SBorislav Petkov } 88121afaf18SBorislav Petkov 88221afaf18SBorislav Petkov /* 88321afaf18SBorislav Petkov * Cannot recover? Panic here then. 88421afaf18SBorislav Petkov * This dumps all the mces in the log buffer and stops the 88521afaf18SBorislav Petkov * other CPUs. 88621afaf18SBorislav Petkov */ 88721afaf18SBorislav Petkov if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) 88821afaf18SBorislav Petkov mce_panic("Fatal machine check", m, msg); 88921afaf18SBorislav Petkov 89021afaf18SBorislav Petkov /* 89121afaf18SBorislav Petkov * For UC somewhere we let the CPU who detects it handle it. 89221afaf18SBorislav Petkov * Also must let continue the others, otherwise the handling 89321afaf18SBorislav Petkov * CPU could deadlock on a lock. 89421afaf18SBorislav Petkov */ 89521afaf18SBorislav Petkov 89621afaf18SBorislav Petkov /* 89721afaf18SBorislav Petkov * No machine check event found. Must be some external 89821afaf18SBorislav Petkov * source or one CPU is hung. Panic. 89921afaf18SBorislav Petkov */ 90021afaf18SBorislav Petkov if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) 90121afaf18SBorislav Petkov mce_panic("Fatal machine check from unknown source", NULL, NULL); 90221afaf18SBorislav Petkov 90321afaf18SBorislav Petkov /* 90421afaf18SBorislav Petkov * Now clear all the mces_seen so that they don't reappear on 90521afaf18SBorislav Petkov * the next mce. 90621afaf18SBorislav Petkov */ 90721afaf18SBorislav Petkov for_each_possible_cpu(cpu) 90821afaf18SBorislav Petkov memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); 90921afaf18SBorislav Petkov } 91021afaf18SBorislav Petkov 91121afaf18SBorislav Petkov static atomic_t global_nwo; 91221afaf18SBorislav Petkov 91321afaf18SBorislav Petkov /* 91421afaf18SBorislav Petkov * Start of Monarch synchronization. This waits until all CPUs have 91521afaf18SBorislav Petkov * entered the exception handler and then determines if any of them 91621afaf18SBorislav Petkov * saw a fatal event that requires panic. Then it executes them 91721afaf18SBorislav Petkov * in the entry order. 91821afaf18SBorislav Petkov * TBD double check parallel CPU hotunplug 91921afaf18SBorislav Petkov */ 92021afaf18SBorislav Petkov static int mce_start(int *no_way_out) 92121afaf18SBorislav Petkov { 92221afaf18SBorislav Petkov int order; 92321afaf18SBorislav Petkov int cpus = num_online_cpus(); 92421afaf18SBorislav Petkov u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 92521afaf18SBorislav Petkov 92621afaf18SBorislav Petkov if (!timeout) 92721afaf18SBorislav Petkov return -1; 92821afaf18SBorislav Petkov 92921afaf18SBorislav Petkov atomic_add(*no_way_out, &global_nwo); 93021afaf18SBorislav Petkov /* 93121afaf18SBorislav Petkov * Rely on the implied barrier below, such that global_nwo 93221afaf18SBorislav Petkov * is updated before mce_callin. 93321afaf18SBorislav Petkov */ 93421afaf18SBorislav Petkov order = atomic_inc_return(&mce_callin); 93521afaf18SBorislav Petkov 93621afaf18SBorislav Petkov /* 93721afaf18SBorislav Petkov * Wait for everyone. 93821afaf18SBorislav Petkov */ 93921afaf18SBorislav Petkov while (atomic_read(&mce_callin) != cpus) { 94021afaf18SBorislav Petkov if (mce_timed_out(&timeout, 94121afaf18SBorislav Petkov "Timeout: Not all CPUs entered broadcast exception handler")) { 94221afaf18SBorislav Petkov atomic_set(&global_nwo, 0); 94321afaf18SBorislav Petkov return -1; 94421afaf18SBorislav Petkov } 94521afaf18SBorislav Petkov ndelay(SPINUNIT); 94621afaf18SBorislav Petkov } 94721afaf18SBorislav Petkov 94821afaf18SBorislav Petkov /* 94921afaf18SBorislav Petkov * mce_callin should be read before global_nwo 95021afaf18SBorislav Petkov */ 95121afaf18SBorislav Petkov smp_rmb(); 95221afaf18SBorislav Petkov 95321afaf18SBorislav Petkov if (order == 1) { 95421afaf18SBorislav Petkov /* 95521afaf18SBorislav Petkov * Monarch: Starts executing now, the others wait. 95621afaf18SBorislav Petkov */ 95721afaf18SBorislav Petkov atomic_set(&mce_executing, 1); 95821afaf18SBorislav Petkov } else { 95921afaf18SBorislav Petkov /* 96021afaf18SBorislav Petkov * Subject: Now start the scanning loop one by one in 96121afaf18SBorislav Petkov * the original callin order. 96221afaf18SBorislav Petkov * This way when there are any shared banks it will be 96321afaf18SBorislav Petkov * only seen by one CPU before cleared, avoiding duplicates. 96421afaf18SBorislav Petkov */ 96521afaf18SBorislav Petkov while (atomic_read(&mce_executing) < order) { 96621afaf18SBorislav Petkov if (mce_timed_out(&timeout, 96721afaf18SBorislav Petkov "Timeout: Subject CPUs unable to finish machine check processing")) { 96821afaf18SBorislav Petkov atomic_set(&global_nwo, 0); 96921afaf18SBorislav Petkov return -1; 97021afaf18SBorislav Petkov } 97121afaf18SBorislav Petkov ndelay(SPINUNIT); 97221afaf18SBorislav Petkov } 97321afaf18SBorislav Petkov } 97421afaf18SBorislav Petkov 97521afaf18SBorislav Petkov /* 97621afaf18SBorislav Petkov * Cache the global no_way_out state. 97721afaf18SBorislav Petkov */ 97821afaf18SBorislav Petkov *no_way_out = atomic_read(&global_nwo); 97921afaf18SBorislav Petkov 98021afaf18SBorislav Petkov return order; 98121afaf18SBorislav Petkov } 98221afaf18SBorislav Petkov 98321afaf18SBorislav Petkov /* 98421afaf18SBorislav Petkov * Synchronize between CPUs after main scanning loop. 98521afaf18SBorislav Petkov * This invokes the bulk of the Monarch processing. 98621afaf18SBorislav Petkov */ 98721afaf18SBorislav Petkov static int mce_end(int order) 98821afaf18SBorislav Petkov { 98921afaf18SBorislav Petkov int ret = -1; 99021afaf18SBorislav Petkov u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 99121afaf18SBorislav Petkov 99221afaf18SBorislav Petkov if (!timeout) 99321afaf18SBorislav Petkov goto reset; 99421afaf18SBorislav Petkov if (order < 0) 99521afaf18SBorislav Petkov goto reset; 99621afaf18SBorislav Petkov 99721afaf18SBorislav Petkov /* 99821afaf18SBorislav Petkov * Allow others to run. 99921afaf18SBorislav Petkov */ 100021afaf18SBorislav Petkov atomic_inc(&mce_executing); 100121afaf18SBorislav Petkov 100221afaf18SBorislav Petkov if (order == 1) { 100321afaf18SBorislav Petkov /* CHECKME: Can this race with a parallel hotplug? */ 100421afaf18SBorislav Petkov int cpus = num_online_cpus(); 100521afaf18SBorislav Petkov 100621afaf18SBorislav Petkov /* 100721afaf18SBorislav Petkov * Monarch: Wait for everyone to go through their scanning 100821afaf18SBorislav Petkov * loops. 100921afaf18SBorislav Petkov */ 101021afaf18SBorislav Petkov while (atomic_read(&mce_executing) <= cpus) { 101121afaf18SBorislav Petkov if (mce_timed_out(&timeout, 101221afaf18SBorislav Petkov "Timeout: Monarch CPU unable to finish machine check processing")) 101321afaf18SBorislav Petkov goto reset; 101421afaf18SBorislav Petkov ndelay(SPINUNIT); 101521afaf18SBorislav Petkov } 101621afaf18SBorislav Petkov 101721afaf18SBorislav Petkov mce_reign(); 101821afaf18SBorislav Petkov barrier(); 101921afaf18SBorislav Petkov ret = 0; 102021afaf18SBorislav Petkov } else { 102121afaf18SBorislav Petkov /* 102221afaf18SBorislav Petkov * Subject: Wait for Monarch to finish. 102321afaf18SBorislav Petkov */ 102421afaf18SBorislav Petkov while (atomic_read(&mce_executing) != 0) { 102521afaf18SBorislav Petkov if (mce_timed_out(&timeout, 102621afaf18SBorislav Petkov "Timeout: Monarch CPU did not finish machine check processing")) 102721afaf18SBorislav Petkov goto reset; 102821afaf18SBorislav Petkov ndelay(SPINUNIT); 102921afaf18SBorislav Petkov } 103021afaf18SBorislav Petkov 103121afaf18SBorislav Petkov /* 103221afaf18SBorislav Petkov * Don't reset anything. That's done by the Monarch. 103321afaf18SBorislav Petkov */ 103421afaf18SBorislav Petkov return 0; 103521afaf18SBorislav Petkov } 103621afaf18SBorislav Petkov 103721afaf18SBorislav Petkov /* 103821afaf18SBorislav Petkov * Reset all global state. 103921afaf18SBorislav Petkov */ 104021afaf18SBorislav Petkov reset: 104121afaf18SBorislav Petkov atomic_set(&global_nwo, 0); 104221afaf18SBorislav Petkov atomic_set(&mce_callin, 0); 104321afaf18SBorislav Petkov barrier(); 104421afaf18SBorislav Petkov 104521afaf18SBorislav Petkov /* 104621afaf18SBorislav Petkov * Let others run again. 104721afaf18SBorislav Petkov */ 104821afaf18SBorislav Petkov atomic_set(&mce_executing, 0); 104921afaf18SBorislav Petkov return ret; 105021afaf18SBorislav Petkov } 105121afaf18SBorislav Petkov 105221afaf18SBorislav Petkov static void mce_clear_state(unsigned long *toclear) 105321afaf18SBorislav Petkov { 105421afaf18SBorislav Petkov int i; 105521afaf18SBorislav Petkov 105621afaf18SBorislav Petkov for (i = 0; i < mca_cfg.banks; i++) { 105721afaf18SBorislav Petkov if (test_bit(i, toclear)) 105821afaf18SBorislav Petkov mce_wrmsrl(msr_ops.status(i), 0); 105921afaf18SBorislav Petkov } 106021afaf18SBorislav Petkov } 106121afaf18SBorislav Petkov 106221afaf18SBorislav Petkov static int do_memory_failure(struct mce *m) 106321afaf18SBorislav Petkov { 106421afaf18SBorislav Petkov int flags = MF_ACTION_REQUIRED; 106521afaf18SBorislav Petkov int ret; 106621afaf18SBorislav Petkov 106721afaf18SBorislav Petkov pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr); 106821afaf18SBorislav Petkov if (!(m->mcgstatus & MCG_STATUS_RIPV)) 106921afaf18SBorislav Petkov flags |= MF_MUST_KILL; 107021afaf18SBorislav Petkov ret = memory_failure(m->addr >> PAGE_SHIFT, flags); 107121afaf18SBorislav Petkov if (ret) 107221afaf18SBorislav Petkov pr_err("Memory error not recovered"); 107321afaf18SBorislav Petkov else 107421afaf18SBorislav Petkov set_mce_nospec(m->addr >> PAGE_SHIFT); 107521afaf18SBorislav Petkov return ret; 107621afaf18SBorislav Petkov } 107721afaf18SBorislav Petkov 107821afaf18SBorislav Petkov 107921afaf18SBorislav Petkov /* 108021afaf18SBorislav Petkov * Cases where we avoid rendezvous handler timeout: 108121afaf18SBorislav Petkov * 1) If this CPU is offline. 108221afaf18SBorislav Petkov * 108321afaf18SBorislav Petkov * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to 108421afaf18SBorislav Petkov * skip those CPUs which remain looping in the 1st kernel - see 108521afaf18SBorislav Petkov * crash_nmi_callback(). 108621afaf18SBorislav Petkov * 108721afaf18SBorislav Petkov * Note: there still is a small window between kexec-ing and the new, 108821afaf18SBorislav Petkov * kdump kernel establishing a new #MC handler where a broadcasted MCE 108921afaf18SBorislav Petkov * might not get handled properly. 109021afaf18SBorislav Petkov */ 109121afaf18SBorislav Petkov static bool __mc_check_crashing_cpu(int cpu) 109221afaf18SBorislav Petkov { 109321afaf18SBorislav Petkov if (cpu_is_offline(cpu) || 109421afaf18SBorislav Petkov (crashing_cpu != -1 && crashing_cpu != cpu)) { 109521afaf18SBorislav Petkov u64 mcgstatus; 109621afaf18SBorislav Petkov 109721afaf18SBorislav Petkov mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); 109821afaf18SBorislav Petkov if (mcgstatus & MCG_STATUS_RIPV) { 109921afaf18SBorislav Petkov mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); 110021afaf18SBorislav Petkov return true; 110121afaf18SBorislav Petkov } 110221afaf18SBorislav Petkov } 110321afaf18SBorislav Petkov return false; 110421afaf18SBorislav Petkov } 110521afaf18SBorislav Petkov 110621afaf18SBorislav Petkov static void __mc_scan_banks(struct mce *m, struct mce *final, 110721afaf18SBorislav Petkov unsigned long *toclear, unsigned long *valid_banks, 110821afaf18SBorislav Petkov int no_way_out, int *worst) 110921afaf18SBorislav Petkov { 111021afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 111121afaf18SBorislav Petkov int severity, i; 111221afaf18SBorislav Petkov 111321afaf18SBorislav Petkov for (i = 0; i < cfg->banks; i++) { 111421afaf18SBorislav Petkov __clear_bit(i, toclear); 111521afaf18SBorislav Petkov if (!test_bit(i, valid_banks)) 111621afaf18SBorislav Petkov continue; 111721afaf18SBorislav Petkov 111821afaf18SBorislav Petkov if (!mce_banks[i].ctl) 111921afaf18SBorislav Petkov continue; 112021afaf18SBorislav Petkov 112121afaf18SBorislav Petkov m->misc = 0; 112221afaf18SBorislav Petkov m->addr = 0; 112321afaf18SBorislav Petkov m->bank = i; 112421afaf18SBorislav Petkov 112521afaf18SBorislav Petkov m->status = mce_rdmsrl(msr_ops.status(i)); 112621afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_VAL)) 112721afaf18SBorislav Petkov continue; 112821afaf18SBorislav Petkov 112921afaf18SBorislav Petkov /* 113021afaf18SBorislav Petkov * Corrected or non-signaled errors are handled by 113121afaf18SBorislav Petkov * machine_check_poll(). Leave them alone, unless this panics. 113221afaf18SBorislav Petkov */ 113321afaf18SBorislav Petkov if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && 113421afaf18SBorislav Petkov !no_way_out) 113521afaf18SBorislav Petkov continue; 113621afaf18SBorislav Petkov 113721afaf18SBorislav Petkov /* Set taint even when machine check was not enabled. */ 113821afaf18SBorislav Petkov add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 113921afaf18SBorislav Petkov 114021afaf18SBorislav Petkov severity = mce_severity(m, cfg->tolerant, NULL, true); 114121afaf18SBorislav Petkov 114221afaf18SBorislav Petkov /* 114321afaf18SBorislav Petkov * When machine check was for corrected/deferred handler don't 114421afaf18SBorislav Petkov * touch, unless we're panicking. 114521afaf18SBorislav Petkov */ 114621afaf18SBorislav Petkov if ((severity == MCE_KEEP_SEVERITY || 114721afaf18SBorislav Petkov severity == MCE_UCNA_SEVERITY) && !no_way_out) 114821afaf18SBorislav Petkov continue; 114921afaf18SBorislav Petkov 115021afaf18SBorislav Petkov __set_bit(i, toclear); 115121afaf18SBorislav Petkov 115221afaf18SBorislav Petkov /* Machine check event was not enabled. Clear, but ignore. */ 115321afaf18SBorislav Petkov if (severity == MCE_NO_SEVERITY) 115421afaf18SBorislav Petkov continue; 115521afaf18SBorislav Petkov 115621afaf18SBorislav Petkov mce_read_aux(m, i); 115721afaf18SBorislav Petkov 115821afaf18SBorislav Petkov /* assuming valid severity level != 0 */ 115921afaf18SBorislav Petkov m->severity = severity; 116021afaf18SBorislav Petkov 116121afaf18SBorislav Petkov mce_log(m); 116221afaf18SBorislav Petkov 116321afaf18SBorislav Petkov if (severity > *worst) { 116421afaf18SBorislav Petkov *final = *m; 116521afaf18SBorislav Petkov *worst = severity; 116621afaf18SBorislav Petkov } 116721afaf18SBorislav Petkov } 116821afaf18SBorislav Petkov 116921afaf18SBorislav Petkov /* mce_clear_state will clear *final, save locally for use later */ 117021afaf18SBorislav Petkov *m = *final; 117121afaf18SBorislav Petkov } 117221afaf18SBorislav Petkov 117321afaf18SBorislav Petkov /* 117421afaf18SBorislav Petkov * The actual machine check handler. This only handles real 117521afaf18SBorislav Petkov * exceptions when something got corrupted coming in through int 18. 117621afaf18SBorislav Petkov * 117721afaf18SBorislav Petkov * This is executed in NMI context not subject to normal locking rules. This 117821afaf18SBorislav Petkov * implies that most kernel services cannot be safely used. Don't even 117921afaf18SBorislav Petkov * think about putting a printk in there! 118021afaf18SBorislav Petkov * 118121afaf18SBorislav Petkov * On Intel systems this is entered on all CPUs in parallel through 118221afaf18SBorislav Petkov * MCE broadcast. However some CPUs might be broken beyond repair, 118321afaf18SBorislav Petkov * so be always careful when synchronizing with others. 118421afaf18SBorislav Petkov */ 118521afaf18SBorislav Petkov void do_machine_check(struct pt_regs *regs, long error_code) 118621afaf18SBorislav Petkov { 118721afaf18SBorislav Petkov DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); 118821afaf18SBorislav Petkov DECLARE_BITMAP(toclear, MAX_NR_BANKS); 118921afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 119021afaf18SBorislav Petkov int cpu = smp_processor_id(); 119121afaf18SBorislav Petkov char *msg = "Unknown"; 119221afaf18SBorislav Petkov struct mce m, *final; 119321afaf18SBorislav Petkov int worst = 0; 119421afaf18SBorislav Petkov 119521afaf18SBorislav Petkov /* 119621afaf18SBorislav Petkov * Establish sequential order between the CPUs entering the machine 119721afaf18SBorislav Petkov * check handler. 119821afaf18SBorislav Petkov */ 119921afaf18SBorislav Petkov int order = -1; 120021afaf18SBorislav Petkov 120121afaf18SBorislav Petkov /* 120221afaf18SBorislav Petkov * If no_way_out gets set, there is no safe way to recover from this 120321afaf18SBorislav Petkov * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. 120421afaf18SBorislav Petkov */ 120521afaf18SBorislav Petkov int no_way_out = 0; 120621afaf18SBorislav Petkov 120721afaf18SBorislav Petkov /* 120821afaf18SBorislav Petkov * If kill_it gets set, there might be a way to recover from this 120921afaf18SBorislav Petkov * error. 121021afaf18SBorislav Petkov */ 121121afaf18SBorislav Petkov int kill_it = 0; 121221afaf18SBorislav Petkov 121321afaf18SBorislav Petkov /* 121421afaf18SBorislav Petkov * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES 121521afaf18SBorislav Petkov * on Intel. 121621afaf18SBorislav Petkov */ 121721afaf18SBorislav Petkov int lmce = 1; 121821afaf18SBorislav Petkov 121921afaf18SBorislav Petkov if (__mc_check_crashing_cpu(cpu)) 122021afaf18SBorislav Petkov return; 122121afaf18SBorislav Petkov 122221afaf18SBorislav Petkov ist_enter(regs); 122321afaf18SBorislav Petkov 122421afaf18SBorislav Petkov this_cpu_inc(mce_exception_count); 122521afaf18SBorislav Petkov 122621afaf18SBorislav Petkov mce_gather_info(&m, regs); 122721afaf18SBorislav Petkov m.tsc = rdtsc(); 122821afaf18SBorislav Petkov 122921afaf18SBorislav Petkov final = this_cpu_ptr(&mces_seen); 123021afaf18SBorislav Petkov *final = m; 123121afaf18SBorislav Petkov 123221afaf18SBorislav Petkov memset(valid_banks, 0, sizeof(valid_banks)); 123321afaf18SBorislav Petkov no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); 123421afaf18SBorislav Petkov 123521afaf18SBorislav Petkov barrier(); 123621afaf18SBorislav Petkov 123721afaf18SBorislav Petkov /* 123821afaf18SBorislav Petkov * When no restart IP might need to kill or panic. 123921afaf18SBorislav Petkov * Assume the worst for now, but if we find the 124021afaf18SBorislav Petkov * severity is MCE_AR_SEVERITY we have other options. 124121afaf18SBorislav Petkov */ 124221afaf18SBorislav Petkov if (!(m.mcgstatus & MCG_STATUS_RIPV)) 124321afaf18SBorislav Petkov kill_it = 1; 124421afaf18SBorislav Petkov 124521afaf18SBorislav Petkov /* 124621afaf18SBorislav Petkov * Check if this MCE is signaled to only this logical processor, 124721afaf18SBorislav Petkov * on Intel only. 124821afaf18SBorislav Petkov */ 124921afaf18SBorislav Petkov if (m.cpuvendor == X86_VENDOR_INTEL) 125021afaf18SBorislav Petkov lmce = m.mcgstatus & MCG_STATUS_LMCES; 125121afaf18SBorislav Petkov 125221afaf18SBorislav Petkov /* 125321afaf18SBorislav Petkov * Local machine check may already know that we have to panic. 125421afaf18SBorislav Petkov * Broadcast machine check begins rendezvous in mce_start() 125521afaf18SBorislav Petkov * Go through all banks in exclusion of the other CPUs. This way we 125621afaf18SBorislav Petkov * don't report duplicated events on shared banks because the first one 125721afaf18SBorislav Petkov * to see it will clear it. 125821afaf18SBorislav Petkov */ 125921afaf18SBorislav Petkov if (lmce) { 126021afaf18SBorislav Petkov if (no_way_out) 126121afaf18SBorislav Petkov mce_panic("Fatal local machine check", &m, msg); 126221afaf18SBorislav Petkov } else { 126321afaf18SBorislav Petkov order = mce_start(&no_way_out); 126421afaf18SBorislav Petkov } 126521afaf18SBorislav Petkov 126621afaf18SBorislav Petkov __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst); 126721afaf18SBorislav Petkov 126821afaf18SBorislav Petkov if (!no_way_out) 126921afaf18SBorislav Petkov mce_clear_state(toclear); 127021afaf18SBorislav Petkov 127121afaf18SBorislav Petkov /* 127221afaf18SBorislav Petkov * Do most of the synchronization with other CPUs. 127321afaf18SBorislav Petkov * When there's any problem use only local no_way_out state. 127421afaf18SBorislav Petkov */ 127521afaf18SBorislav Petkov if (!lmce) { 127621afaf18SBorislav Petkov if (mce_end(order) < 0) 127721afaf18SBorislav Petkov no_way_out = worst >= MCE_PANIC_SEVERITY; 127821afaf18SBorislav Petkov } else { 127921afaf18SBorislav Petkov /* 128021afaf18SBorislav Petkov * If there was a fatal machine check we should have 128121afaf18SBorislav Petkov * already called mce_panic earlier in this function. 128221afaf18SBorislav Petkov * Since we re-read the banks, we might have found 128321afaf18SBorislav Petkov * something new. Check again to see if we found a 128421afaf18SBorislav Petkov * fatal error. We call "mce_severity()" again to 128521afaf18SBorislav Petkov * make sure we have the right "msg". 128621afaf18SBorislav Petkov */ 128721afaf18SBorislav Petkov if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { 128821afaf18SBorislav Petkov mce_severity(&m, cfg->tolerant, &msg, true); 128921afaf18SBorislav Petkov mce_panic("Local fatal machine check!", &m, msg); 129021afaf18SBorislav Petkov } 129121afaf18SBorislav Petkov } 129221afaf18SBorislav Petkov 129321afaf18SBorislav Petkov /* 129421afaf18SBorislav Petkov * If tolerant is at an insane level we drop requests to kill 129521afaf18SBorislav Petkov * processes and continue even when there is no way out. 129621afaf18SBorislav Petkov */ 129721afaf18SBorislav Petkov if (cfg->tolerant == 3) 129821afaf18SBorislav Petkov kill_it = 0; 129921afaf18SBorislav Petkov else if (no_way_out) 130021afaf18SBorislav Petkov mce_panic("Fatal machine check on current CPU", &m, msg); 130121afaf18SBorislav Petkov 130221afaf18SBorislav Petkov if (worst > 0) 130321afaf18SBorislav Petkov mce_report_event(regs); 130421afaf18SBorislav Petkov mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); 130521afaf18SBorislav Petkov 130621afaf18SBorislav Petkov sync_core(); 130721afaf18SBorislav Petkov 130821afaf18SBorislav Petkov if (worst != MCE_AR_SEVERITY && !kill_it) 130921afaf18SBorislav Petkov goto out_ist; 131021afaf18SBorislav Petkov 131121afaf18SBorislav Petkov /* Fault was in user mode and we need to take some action */ 131221afaf18SBorislav Petkov if ((m.cs & 3) == 3) { 131321afaf18SBorislav Petkov ist_begin_non_atomic(regs); 131421afaf18SBorislav Petkov local_irq_enable(); 131521afaf18SBorislav Petkov 131621afaf18SBorislav Petkov if (kill_it || do_memory_failure(&m)) 131721afaf18SBorislav Petkov force_sig(SIGBUS, current); 131821afaf18SBorislav Petkov local_irq_disable(); 131921afaf18SBorislav Petkov ist_end_non_atomic(); 132021afaf18SBorislav Petkov } else { 132121afaf18SBorislav Petkov if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0)) 132221afaf18SBorislav Petkov mce_panic("Failed kernel mode recovery", &m, NULL); 132321afaf18SBorislav Petkov } 132421afaf18SBorislav Petkov 132521afaf18SBorislav Petkov out_ist: 132621afaf18SBorislav Petkov ist_exit(regs); 132721afaf18SBorislav Petkov } 132821afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(do_machine_check); 132921afaf18SBorislav Petkov 133021afaf18SBorislav Petkov #ifndef CONFIG_MEMORY_FAILURE 133121afaf18SBorislav Petkov int memory_failure(unsigned long pfn, int flags) 133221afaf18SBorislav Petkov { 133321afaf18SBorislav Petkov /* mce_severity() should not hand us an ACTION_REQUIRED error */ 133421afaf18SBorislav Petkov BUG_ON(flags & MF_ACTION_REQUIRED); 133521afaf18SBorislav Petkov pr_err("Uncorrected memory error in page 0x%lx ignored\n" 133621afaf18SBorislav Petkov "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", 133721afaf18SBorislav Petkov pfn); 133821afaf18SBorislav Petkov 133921afaf18SBorislav Petkov return 0; 134021afaf18SBorislav Petkov } 134121afaf18SBorislav Petkov #endif 134221afaf18SBorislav Petkov 134321afaf18SBorislav Petkov /* 134421afaf18SBorislav Petkov * Periodic polling timer for "silent" machine check errors. If the 134521afaf18SBorislav Petkov * poller finds an MCE, poll 2x faster. When the poller finds no more 134621afaf18SBorislav Petkov * errors, poll 2x slower (up to check_interval seconds). 134721afaf18SBorislav Petkov */ 134821afaf18SBorislav Petkov static unsigned long check_interval = INITIAL_CHECK_INTERVAL; 134921afaf18SBorislav Petkov 135021afaf18SBorislav Petkov static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ 135121afaf18SBorislav Petkov static DEFINE_PER_CPU(struct timer_list, mce_timer); 135221afaf18SBorislav Petkov 135321afaf18SBorislav Petkov static unsigned long mce_adjust_timer_default(unsigned long interval) 135421afaf18SBorislav Petkov { 135521afaf18SBorislav Petkov return interval; 135621afaf18SBorislav Petkov } 135721afaf18SBorislav Petkov 135821afaf18SBorislav Petkov static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; 135921afaf18SBorislav Petkov 136021afaf18SBorislav Petkov static void __start_timer(struct timer_list *t, unsigned long interval) 136121afaf18SBorislav Petkov { 136221afaf18SBorislav Petkov unsigned long when = jiffies + interval; 136321afaf18SBorislav Petkov unsigned long flags; 136421afaf18SBorislav Petkov 136521afaf18SBorislav Petkov local_irq_save(flags); 136621afaf18SBorislav Petkov 136721afaf18SBorislav Petkov if (!timer_pending(t) || time_before(when, t->expires)) 136821afaf18SBorislav Petkov mod_timer(t, round_jiffies(when)); 136921afaf18SBorislav Petkov 137021afaf18SBorislav Petkov local_irq_restore(flags); 137121afaf18SBorislav Petkov } 137221afaf18SBorislav Petkov 137321afaf18SBorislav Petkov static void mce_timer_fn(struct timer_list *t) 137421afaf18SBorislav Petkov { 137521afaf18SBorislav Petkov struct timer_list *cpu_t = this_cpu_ptr(&mce_timer); 137621afaf18SBorislav Petkov unsigned long iv; 137721afaf18SBorislav Petkov 137821afaf18SBorislav Petkov WARN_ON(cpu_t != t); 137921afaf18SBorislav Petkov 138021afaf18SBorislav Petkov iv = __this_cpu_read(mce_next_interval); 138121afaf18SBorislav Petkov 138221afaf18SBorislav Petkov if (mce_available(this_cpu_ptr(&cpu_info))) { 138321afaf18SBorislav Petkov machine_check_poll(0, this_cpu_ptr(&mce_poll_banks)); 138421afaf18SBorislav Petkov 138521afaf18SBorislav Petkov if (mce_intel_cmci_poll()) { 138621afaf18SBorislav Petkov iv = mce_adjust_timer(iv); 138721afaf18SBorislav Petkov goto done; 138821afaf18SBorislav Petkov } 138921afaf18SBorislav Petkov } 139021afaf18SBorislav Petkov 139121afaf18SBorislav Petkov /* 139221afaf18SBorislav Petkov * Alert userspace if needed. If we logged an MCE, reduce the polling 139321afaf18SBorislav Petkov * interval, otherwise increase the polling interval. 139421afaf18SBorislav Petkov */ 139521afaf18SBorislav Petkov if (mce_notify_irq()) 139621afaf18SBorislav Petkov iv = max(iv / 2, (unsigned long) HZ/100); 139721afaf18SBorislav Petkov else 139821afaf18SBorislav Petkov iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); 139921afaf18SBorislav Petkov 140021afaf18SBorislav Petkov done: 140121afaf18SBorislav Petkov __this_cpu_write(mce_next_interval, iv); 140221afaf18SBorislav Petkov __start_timer(t, iv); 140321afaf18SBorislav Petkov } 140421afaf18SBorislav Petkov 140521afaf18SBorislav Petkov /* 140621afaf18SBorislav Petkov * Ensure that the timer is firing in @interval from now. 140721afaf18SBorislav Petkov */ 140821afaf18SBorislav Petkov void mce_timer_kick(unsigned long interval) 140921afaf18SBorislav Petkov { 141021afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 141121afaf18SBorislav Petkov unsigned long iv = __this_cpu_read(mce_next_interval); 141221afaf18SBorislav Petkov 141321afaf18SBorislav Petkov __start_timer(t, interval); 141421afaf18SBorislav Petkov 141521afaf18SBorislav Petkov if (interval < iv) 141621afaf18SBorislav Petkov __this_cpu_write(mce_next_interval, interval); 141721afaf18SBorislav Petkov } 141821afaf18SBorislav Petkov 141921afaf18SBorislav Petkov /* Must not be called in IRQ context where del_timer_sync() can deadlock */ 142021afaf18SBorislav Petkov static void mce_timer_delete_all(void) 142121afaf18SBorislav Petkov { 142221afaf18SBorislav Petkov int cpu; 142321afaf18SBorislav Petkov 142421afaf18SBorislav Petkov for_each_online_cpu(cpu) 142521afaf18SBorislav Petkov del_timer_sync(&per_cpu(mce_timer, cpu)); 142621afaf18SBorislav Petkov } 142721afaf18SBorislav Petkov 142821afaf18SBorislav Petkov /* 142921afaf18SBorislav Petkov * Notify the user(s) about new machine check events. 143021afaf18SBorislav Petkov * Can be called from interrupt context, but not from machine check/NMI 143121afaf18SBorislav Petkov * context. 143221afaf18SBorislav Petkov */ 143321afaf18SBorislav Petkov int mce_notify_irq(void) 143421afaf18SBorislav Petkov { 143521afaf18SBorislav Petkov /* Not more than two messages every minute */ 143621afaf18SBorislav Petkov static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); 143721afaf18SBorislav Petkov 143821afaf18SBorislav Petkov if (test_and_clear_bit(0, &mce_need_notify)) { 143921afaf18SBorislav Petkov mce_work_trigger(); 144021afaf18SBorislav Petkov 144121afaf18SBorislav Petkov if (__ratelimit(&ratelimit)) 144221afaf18SBorislav Petkov pr_info(HW_ERR "Machine check events logged\n"); 144321afaf18SBorislav Petkov 144421afaf18SBorislav Petkov return 1; 144521afaf18SBorislav Petkov } 144621afaf18SBorislav Petkov return 0; 144721afaf18SBorislav Petkov } 144821afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_notify_irq); 144921afaf18SBorislav Petkov 145021afaf18SBorislav Petkov static int __mcheck_cpu_mce_banks_init(void) 145121afaf18SBorislav Petkov { 145221afaf18SBorislav Petkov int i; 145321afaf18SBorislav Petkov u8 num_banks = mca_cfg.banks; 145421afaf18SBorislav Petkov 145521afaf18SBorislav Petkov mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL); 145621afaf18SBorislav Petkov if (!mce_banks) 145721afaf18SBorislav Petkov return -ENOMEM; 145821afaf18SBorislav Petkov 145921afaf18SBorislav Petkov for (i = 0; i < num_banks; i++) { 146021afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 146121afaf18SBorislav Petkov 146221afaf18SBorislav Petkov b->ctl = -1ULL; 146321afaf18SBorislav Petkov b->init = 1; 146421afaf18SBorislav Petkov } 146521afaf18SBorislav Petkov return 0; 146621afaf18SBorislav Petkov } 146721afaf18SBorislav Petkov 146821afaf18SBorislav Petkov /* 146921afaf18SBorislav Petkov * Initialize Machine Checks for a CPU. 147021afaf18SBorislav Petkov */ 147121afaf18SBorislav Petkov static int __mcheck_cpu_cap_init(void) 147221afaf18SBorislav Petkov { 147321afaf18SBorislav Petkov unsigned b; 147421afaf18SBorislav Petkov u64 cap; 147521afaf18SBorislav Petkov 147621afaf18SBorislav Petkov rdmsrl(MSR_IA32_MCG_CAP, cap); 147721afaf18SBorislav Petkov 147821afaf18SBorislav Petkov b = cap & MCG_BANKCNT_MASK; 147921afaf18SBorislav Petkov if (!mca_cfg.banks) 148021afaf18SBorislav Petkov pr_info("CPU supports %d MCE banks\n", b); 148121afaf18SBorislav Petkov 148221afaf18SBorislav Petkov if (b > MAX_NR_BANKS) { 148321afaf18SBorislav Petkov pr_warn("Using only %u machine check banks out of %u\n", 148421afaf18SBorislav Petkov MAX_NR_BANKS, b); 148521afaf18SBorislav Petkov b = MAX_NR_BANKS; 148621afaf18SBorislav Petkov } 148721afaf18SBorislav Petkov 148821afaf18SBorislav Petkov /* Don't support asymmetric configurations today */ 148921afaf18SBorislav Petkov WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks); 149021afaf18SBorislav Petkov mca_cfg.banks = b; 149121afaf18SBorislav Petkov 149221afaf18SBorislav Petkov if (!mce_banks) { 149321afaf18SBorislav Petkov int err = __mcheck_cpu_mce_banks_init(); 149421afaf18SBorislav Petkov 149521afaf18SBorislav Petkov if (err) 149621afaf18SBorislav Petkov return err; 149721afaf18SBorislav Petkov } 149821afaf18SBorislav Petkov 149921afaf18SBorislav Petkov /* Use accurate RIP reporting if available. */ 150021afaf18SBorislav Petkov if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) 150121afaf18SBorislav Petkov mca_cfg.rip_msr = MSR_IA32_MCG_EIP; 150221afaf18SBorislav Petkov 150321afaf18SBorislav Petkov if (cap & MCG_SER_P) 150421afaf18SBorislav Petkov mca_cfg.ser = 1; 150521afaf18SBorislav Petkov 150621afaf18SBorislav Petkov return 0; 150721afaf18SBorislav Petkov } 150821afaf18SBorislav Petkov 150921afaf18SBorislav Petkov static void __mcheck_cpu_init_generic(void) 151021afaf18SBorislav Petkov { 151121afaf18SBorislav Petkov enum mcp_flags m_fl = 0; 151221afaf18SBorislav Petkov mce_banks_t all_banks; 151321afaf18SBorislav Petkov u64 cap; 151421afaf18SBorislav Petkov 151521afaf18SBorislav Petkov if (!mca_cfg.bootlog) 151621afaf18SBorislav Petkov m_fl = MCP_DONTLOG; 151721afaf18SBorislav Petkov 151821afaf18SBorislav Petkov /* 151921afaf18SBorislav Petkov * Log the machine checks left over from the previous reset. 152021afaf18SBorislav Petkov */ 152121afaf18SBorislav Petkov bitmap_fill(all_banks, MAX_NR_BANKS); 152221afaf18SBorislav Petkov machine_check_poll(MCP_UC | m_fl, &all_banks); 152321afaf18SBorislav Petkov 152421afaf18SBorislav Petkov cr4_set_bits(X86_CR4_MCE); 152521afaf18SBorislav Petkov 152621afaf18SBorislav Petkov rdmsrl(MSR_IA32_MCG_CAP, cap); 152721afaf18SBorislav Petkov if (cap & MCG_CTL_P) 152821afaf18SBorislav Petkov wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 152921afaf18SBorislav Petkov } 153021afaf18SBorislav Petkov 153121afaf18SBorislav Petkov static void __mcheck_cpu_init_clear_banks(void) 153221afaf18SBorislav Petkov { 153321afaf18SBorislav Petkov int i; 153421afaf18SBorislav Petkov 153521afaf18SBorislav Petkov for (i = 0; i < mca_cfg.banks; i++) { 153621afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 153721afaf18SBorislav Petkov 153821afaf18SBorislav Petkov if (!b->init) 153921afaf18SBorislav Petkov continue; 154021afaf18SBorislav Petkov wrmsrl(msr_ops.ctl(i), b->ctl); 154121afaf18SBorislav Petkov wrmsrl(msr_ops.status(i), 0); 154221afaf18SBorislav Petkov } 154321afaf18SBorislav Petkov } 154421afaf18SBorislav Petkov 154521afaf18SBorislav Petkov /* 154621afaf18SBorislav Petkov * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and 154721afaf18SBorislav Petkov * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM 154821afaf18SBorislav Petkov * Vol 3B Table 15-20). But this confuses both the code that determines 154921afaf18SBorislav Petkov * whether the machine check occurred in kernel or user mode, and also 155021afaf18SBorislav Petkov * the severity assessment code. Pretend that EIPV was set, and take the 155121afaf18SBorislav Petkov * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. 155221afaf18SBorislav Petkov */ 155321afaf18SBorislav Petkov static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) 155421afaf18SBorislav Petkov { 155521afaf18SBorislav Petkov if (bank != 0) 155621afaf18SBorislav Petkov return; 155721afaf18SBorislav Petkov if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) 155821afaf18SBorislav Petkov return; 155921afaf18SBorislav Petkov if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| 156021afaf18SBorislav Petkov MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| 156121afaf18SBorislav Petkov MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| 156221afaf18SBorislav Petkov MCACOD)) != 156321afaf18SBorislav Petkov (MCI_STATUS_UC|MCI_STATUS_EN| 156421afaf18SBorislav Petkov MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| 156521afaf18SBorislav Petkov MCI_STATUS_AR|MCACOD_INSTR)) 156621afaf18SBorislav Petkov return; 156721afaf18SBorislav Petkov 156821afaf18SBorislav Petkov m->mcgstatus |= MCG_STATUS_EIPV; 156921afaf18SBorislav Petkov m->ip = regs->ip; 157021afaf18SBorislav Petkov m->cs = regs->cs; 157121afaf18SBorislav Petkov } 157221afaf18SBorislav Petkov 157321afaf18SBorislav Petkov /* Add per CPU specific workarounds here */ 157421afaf18SBorislav Petkov static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 157521afaf18SBorislav Petkov { 157621afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 157721afaf18SBorislav Petkov 157821afaf18SBorislav Petkov if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 157921afaf18SBorislav Petkov pr_info("unknown CPU type - not enabling MCE support\n"); 158021afaf18SBorislav Petkov return -EOPNOTSUPP; 158121afaf18SBorislav Petkov } 158221afaf18SBorislav Petkov 158321afaf18SBorislav Petkov /* This should be disabled by the BIOS, but isn't always */ 158421afaf18SBorislav Petkov if (c->x86_vendor == X86_VENDOR_AMD) { 158521afaf18SBorislav Petkov if (c->x86 == 15 && cfg->banks > 4) { 158621afaf18SBorislav Petkov /* 158721afaf18SBorislav Petkov * disable GART TBL walk error reporting, which 158821afaf18SBorislav Petkov * trips off incorrectly with the IOMMU & 3ware 158921afaf18SBorislav Petkov * & Cerberus: 159021afaf18SBorislav Petkov */ 159121afaf18SBorislav Petkov clear_bit(10, (unsigned long *)&mce_banks[4].ctl); 159221afaf18SBorislav Petkov } 159321afaf18SBorislav Petkov if (c->x86 < 0x11 && cfg->bootlog < 0) { 159421afaf18SBorislav Petkov /* 159521afaf18SBorislav Petkov * Lots of broken BIOS around that don't clear them 159621afaf18SBorislav Petkov * by default and leave crap in there. Don't log: 159721afaf18SBorislav Petkov */ 159821afaf18SBorislav Petkov cfg->bootlog = 0; 159921afaf18SBorislav Petkov } 160021afaf18SBorislav Petkov /* 160121afaf18SBorislav Petkov * Various K7s with broken bank 0 around. Always disable 160221afaf18SBorislav Petkov * by default. 160321afaf18SBorislav Petkov */ 160421afaf18SBorislav Petkov if (c->x86 == 6 && cfg->banks > 0) 160521afaf18SBorislav Petkov mce_banks[0].ctl = 0; 160621afaf18SBorislav Petkov 160721afaf18SBorislav Petkov /* 160821afaf18SBorislav Petkov * overflow_recov is supported for F15h Models 00h-0fh 160921afaf18SBorislav Petkov * even though we don't have a CPUID bit for it. 161021afaf18SBorislav Petkov */ 161121afaf18SBorislav Petkov if (c->x86 == 0x15 && c->x86_model <= 0xf) 161221afaf18SBorislav Petkov mce_flags.overflow_recov = 1; 161321afaf18SBorislav Petkov 161421afaf18SBorislav Petkov /* 161521afaf18SBorislav Petkov * Turn off MC4_MISC thresholding banks on those models since 161621afaf18SBorislav Petkov * they're not supported there. 161721afaf18SBorislav Petkov */ 161821afaf18SBorislav Petkov if (c->x86 == 0x15 && 161921afaf18SBorislav Petkov (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { 162021afaf18SBorislav Petkov int i; 162121afaf18SBorislav Petkov u64 hwcr; 162221afaf18SBorislav Petkov bool need_toggle; 162321afaf18SBorislav Petkov u32 msrs[] = { 162421afaf18SBorislav Petkov 0x00000413, /* MC4_MISC0 */ 162521afaf18SBorislav Petkov 0xc0000408, /* MC4_MISC1 */ 162621afaf18SBorislav Petkov }; 162721afaf18SBorislav Petkov 162821afaf18SBorislav Petkov rdmsrl(MSR_K7_HWCR, hwcr); 162921afaf18SBorislav Petkov 163021afaf18SBorislav Petkov /* McStatusWrEn has to be set */ 163121afaf18SBorislav Petkov need_toggle = !(hwcr & BIT(18)); 163221afaf18SBorislav Petkov 163321afaf18SBorislav Petkov if (need_toggle) 163421afaf18SBorislav Petkov wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); 163521afaf18SBorislav Petkov 163621afaf18SBorislav Petkov /* Clear CntP bit safely */ 163721afaf18SBorislav Petkov for (i = 0; i < ARRAY_SIZE(msrs); i++) 163821afaf18SBorislav Petkov msr_clear_bit(msrs[i], 62); 163921afaf18SBorislav Petkov 164021afaf18SBorislav Petkov /* restore old settings */ 164121afaf18SBorislav Petkov if (need_toggle) 164221afaf18SBorislav Petkov wrmsrl(MSR_K7_HWCR, hwcr); 164321afaf18SBorislav Petkov } 164421afaf18SBorislav Petkov } 164521afaf18SBorislav Petkov 164621afaf18SBorislav Petkov if (c->x86_vendor == X86_VENDOR_INTEL) { 164721afaf18SBorislav Petkov /* 164821afaf18SBorislav Petkov * SDM documents that on family 6 bank 0 should not be written 164921afaf18SBorislav Petkov * because it aliases to another special BIOS controlled 165021afaf18SBorislav Petkov * register. 165121afaf18SBorislav Petkov * But it's not aliased anymore on model 0x1a+ 165221afaf18SBorislav Petkov * Don't ignore bank 0 completely because there could be a 165321afaf18SBorislav Petkov * valid event later, merely don't write CTL0. 165421afaf18SBorislav Petkov */ 165521afaf18SBorislav Petkov 165621afaf18SBorislav Petkov if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0) 165721afaf18SBorislav Petkov mce_banks[0].init = 0; 165821afaf18SBorislav Petkov 165921afaf18SBorislav Petkov /* 166021afaf18SBorislav Petkov * All newer Intel systems support MCE broadcasting. Enable 166121afaf18SBorislav Petkov * synchronization with a one second timeout. 166221afaf18SBorislav Petkov */ 166321afaf18SBorislav Petkov if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && 166421afaf18SBorislav Petkov cfg->monarch_timeout < 0) 166521afaf18SBorislav Petkov cfg->monarch_timeout = USEC_PER_SEC; 166621afaf18SBorislav Petkov 166721afaf18SBorislav Petkov /* 166821afaf18SBorislav Petkov * There are also broken BIOSes on some Pentium M and 166921afaf18SBorislav Petkov * earlier systems: 167021afaf18SBorislav Petkov */ 167121afaf18SBorislav Petkov if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) 167221afaf18SBorislav Petkov cfg->bootlog = 0; 167321afaf18SBorislav Petkov 167421afaf18SBorislav Petkov if (c->x86 == 6 && c->x86_model == 45) 167521afaf18SBorislav Petkov quirk_no_way_out = quirk_sandybridge_ifu; 167621afaf18SBorislav Petkov } 167721afaf18SBorislav Petkov if (cfg->monarch_timeout < 0) 167821afaf18SBorislav Petkov cfg->monarch_timeout = 0; 167921afaf18SBorislav Petkov if (cfg->bootlog != 0) 168021afaf18SBorislav Petkov cfg->panic_timeout = 30; 168121afaf18SBorislav Petkov 168221afaf18SBorislav Petkov return 0; 168321afaf18SBorislav Petkov } 168421afaf18SBorislav Petkov 168521afaf18SBorislav Petkov static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 168621afaf18SBorislav Petkov { 168721afaf18SBorislav Petkov if (c->x86 != 5) 168821afaf18SBorislav Petkov return 0; 168921afaf18SBorislav Petkov 169021afaf18SBorislav Petkov switch (c->x86_vendor) { 169121afaf18SBorislav Petkov case X86_VENDOR_INTEL: 169221afaf18SBorislav Petkov intel_p5_mcheck_init(c); 169321afaf18SBorislav Petkov return 1; 169421afaf18SBorislav Petkov break; 169521afaf18SBorislav Petkov case X86_VENDOR_CENTAUR: 169621afaf18SBorislav Petkov winchip_mcheck_init(c); 169721afaf18SBorislav Petkov return 1; 169821afaf18SBorislav Petkov break; 169921afaf18SBorislav Petkov default: 170021afaf18SBorislav Petkov return 0; 170121afaf18SBorislav Petkov } 170221afaf18SBorislav Petkov 170321afaf18SBorislav Petkov return 0; 170421afaf18SBorislav Petkov } 170521afaf18SBorislav Petkov 170621afaf18SBorislav Petkov /* 170721afaf18SBorislav Petkov * Init basic CPU features needed for early decoding of MCEs. 170821afaf18SBorislav Petkov */ 170921afaf18SBorislav Petkov static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) 171021afaf18SBorislav Petkov { 171121afaf18SBorislav Petkov if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { 171221afaf18SBorislav Petkov mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); 171321afaf18SBorislav Petkov mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); 171421afaf18SBorislav Petkov mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); 171521afaf18SBorislav Petkov 171621afaf18SBorislav Petkov if (mce_flags.smca) { 171721afaf18SBorislav Petkov msr_ops.ctl = smca_ctl_reg; 171821afaf18SBorislav Petkov msr_ops.status = smca_status_reg; 171921afaf18SBorislav Petkov msr_ops.addr = smca_addr_reg; 172021afaf18SBorislav Petkov msr_ops.misc = smca_misc_reg; 172121afaf18SBorislav Petkov } 172221afaf18SBorislav Petkov } 172321afaf18SBorislav Petkov } 172421afaf18SBorislav Petkov 172521afaf18SBorislav Petkov static void mce_centaur_feature_init(struct cpuinfo_x86 *c) 172621afaf18SBorislav Petkov { 172721afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 172821afaf18SBorislav Petkov 172921afaf18SBorislav Petkov /* 173021afaf18SBorislav Petkov * All newer Centaur CPUs support MCE broadcasting. Enable 173121afaf18SBorislav Petkov * synchronization with a one second timeout. 173221afaf18SBorislav Petkov */ 173321afaf18SBorislav Petkov if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || 173421afaf18SBorislav Petkov c->x86 > 6) { 173521afaf18SBorislav Petkov if (cfg->monarch_timeout < 0) 173621afaf18SBorislav Petkov cfg->monarch_timeout = USEC_PER_SEC; 173721afaf18SBorislav Petkov } 173821afaf18SBorislav Petkov } 173921afaf18SBorislav Petkov 174021afaf18SBorislav Petkov static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) 174121afaf18SBorislav Petkov { 174221afaf18SBorislav Petkov switch (c->x86_vendor) { 174321afaf18SBorislav Petkov case X86_VENDOR_INTEL: 174421afaf18SBorislav Petkov mce_intel_feature_init(c); 174521afaf18SBorislav Petkov mce_adjust_timer = cmci_intel_adjust_timer; 174621afaf18SBorislav Petkov break; 174721afaf18SBorislav Petkov 174821afaf18SBorislav Petkov case X86_VENDOR_AMD: { 174921afaf18SBorislav Petkov mce_amd_feature_init(c); 175021afaf18SBorislav Petkov break; 175121afaf18SBorislav Petkov } 175221afaf18SBorislav Petkov 175321afaf18SBorislav Petkov case X86_VENDOR_HYGON: 175421afaf18SBorislav Petkov mce_hygon_feature_init(c); 175521afaf18SBorislav Petkov break; 175621afaf18SBorislav Petkov 175721afaf18SBorislav Petkov case X86_VENDOR_CENTAUR: 175821afaf18SBorislav Petkov mce_centaur_feature_init(c); 175921afaf18SBorislav Petkov break; 176021afaf18SBorislav Petkov 176121afaf18SBorislav Petkov default: 176221afaf18SBorislav Petkov break; 176321afaf18SBorislav Petkov } 176421afaf18SBorislav Petkov } 176521afaf18SBorislav Petkov 176621afaf18SBorislav Petkov static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) 176721afaf18SBorislav Petkov { 176821afaf18SBorislav Petkov switch (c->x86_vendor) { 176921afaf18SBorislav Petkov case X86_VENDOR_INTEL: 177021afaf18SBorislav Petkov mce_intel_feature_clear(c); 177121afaf18SBorislav Petkov break; 177221afaf18SBorislav Petkov default: 177321afaf18SBorislav Petkov break; 177421afaf18SBorislav Petkov } 177521afaf18SBorislav Petkov } 177621afaf18SBorislav Petkov 177721afaf18SBorislav Petkov static void mce_start_timer(struct timer_list *t) 177821afaf18SBorislav Petkov { 177921afaf18SBorislav Petkov unsigned long iv = check_interval * HZ; 178021afaf18SBorislav Petkov 178121afaf18SBorislav Petkov if (mca_cfg.ignore_ce || !iv) 178221afaf18SBorislav Petkov return; 178321afaf18SBorislav Petkov 178421afaf18SBorislav Petkov this_cpu_write(mce_next_interval, iv); 178521afaf18SBorislav Petkov __start_timer(t, iv); 178621afaf18SBorislav Petkov } 178721afaf18SBorislav Petkov 178821afaf18SBorislav Petkov static void __mcheck_cpu_setup_timer(void) 178921afaf18SBorislav Petkov { 179021afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 179121afaf18SBorislav Petkov 179221afaf18SBorislav Petkov timer_setup(t, mce_timer_fn, TIMER_PINNED); 179321afaf18SBorislav Petkov } 179421afaf18SBorislav Petkov 179521afaf18SBorislav Petkov static void __mcheck_cpu_init_timer(void) 179621afaf18SBorislav Petkov { 179721afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 179821afaf18SBorislav Petkov 179921afaf18SBorislav Petkov timer_setup(t, mce_timer_fn, TIMER_PINNED); 180021afaf18SBorislav Petkov mce_start_timer(t); 180121afaf18SBorislav Petkov } 180221afaf18SBorislav Petkov 180321afaf18SBorislav Petkov /* Handle unconfigured int18 (should never happen) */ 180421afaf18SBorislav Petkov static void unexpected_machine_check(struct pt_regs *regs, long error_code) 180521afaf18SBorislav Petkov { 180621afaf18SBorislav Petkov pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", 180721afaf18SBorislav Petkov smp_processor_id()); 180821afaf18SBorislav Petkov } 180921afaf18SBorislav Petkov 181021afaf18SBorislav Petkov /* Call the installed machine check handler for this CPU setup. */ 181121afaf18SBorislav Petkov void (*machine_check_vector)(struct pt_regs *, long error_code) = 181221afaf18SBorislav Petkov unexpected_machine_check; 181321afaf18SBorislav Petkov 181421afaf18SBorislav Petkov dotraplinkage void do_mce(struct pt_regs *regs, long error_code) 181521afaf18SBorislav Petkov { 181621afaf18SBorislav Petkov machine_check_vector(regs, error_code); 181721afaf18SBorislav Petkov } 181821afaf18SBorislav Petkov 181921afaf18SBorislav Petkov /* 182021afaf18SBorislav Petkov * Called for each booted CPU to set up machine checks. 182121afaf18SBorislav Petkov * Must be called with preempt off: 182221afaf18SBorislav Petkov */ 182321afaf18SBorislav Petkov void mcheck_cpu_init(struct cpuinfo_x86 *c) 182421afaf18SBorislav Petkov { 182521afaf18SBorislav Petkov if (mca_cfg.disabled) 182621afaf18SBorislav Petkov return; 182721afaf18SBorislav Petkov 182821afaf18SBorislav Petkov if (__mcheck_cpu_ancient_init(c)) 182921afaf18SBorislav Petkov return; 183021afaf18SBorislav Petkov 183121afaf18SBorislav Petkov if (!mce_available(c)) 183221afaf18SBorislav Petkov return; 183321afaf18SBorislav Petkov 183421afaf18SBorislav Petkov if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { 183521afaf18SBorislav Petkov mca_cfg.disabled = 1; 183621afaf18SBorislav Petkov return; 183721afaf18SBorislav Petkov } 183821afaf18SBorislav Petkov 183921afaf18SBorislav Petkov if (mce_gen_pool_init()) { 184021afaf18SBorislav Petkov mca_cfg.disabled = 1; 184121afaf18SBorislav Petkov pr_emerg("Couldn't allocate MCE records pool!\n"); 184221afaf18SBorislav Petkov return; 184321afaf18SBorislav Petkov } 184421afaf18SBorislav Petkov 184521afaf18SBorislav Petkov machine_check_vector = do_machine_check; 184621afaf18SBorislav Petkov 184721afaf18SBorislav Petkov __mcheck_cpu_init_early(c); 184821afaf18SBorislav Petkov __mcheck_cpu_init_generic(); 184921afaf18SBorislav Petkov __mcheck_cpu_init_vendor(c); 185021afaf18SBorislav Petkov __mcheck_cpu_init_clear_banks(); 185121afaf18SBorislav Petkov __mcheck_cpu_setup_timer(); 185221afaf18SBorislav Petkov } 185321afaf18SBorislav Petkov 185421afaf18SBorislav Petkov /* 185521afaf18SBorislav Petkov * Called for each booted CPU to clear some machine checks opt-ins 185621afaf18SBorislav Petkov */ 185721afaf18SBorislav Petkov void mcheck_cpu_clear(struct cpuinfo_x86 *c) 185821afaf18SBorislav Petkov { 185921afaf18SBorislav Petkov if (mca_cfg.disabled) 186021afaf18SBorislav Petkov return; 186121afaf18SBorislav Petkov 186221afaf18SBorislav Petkov if (!mce_available(c)) 186321afaf18SBorislav Petkov return; 186421afaf18SBorislav Petkov 186521afaf18SBorislav Petkov /* 186621afaf18SBorislav Petkov * Possibly to clear general settings generic to x86 186721afaf18SBorislav Petkov * __mcheck_cpu_clear_generic(c); 186821afaf18SBorislav Petkov */ 186921afaf18SBorislav Petkov __mcheck_cpu_clear_vendor(c); 187021afaf18SBorislav Petkov 187121afaf18SBorislav Petkov } 187221afaf18SBorislav Petkov 187321afaf18SBorislav Petkov static void __mce_disable_bank(void *arg) 187421afaf18SBorislav Petkov { 187521afaf18SBorislav Petkov int bank = *((int *)arg); 187621afaf18SBorislav Petkov __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); 187721afaf18SBorislav Petkov cmci_disable_bank(bank); 187821afaf18SBorislav Petkov } 187921afaf18SBorislav Petkov 188021afaf18SBorislav Petkov void mce_disable_bank(int bank) 188121afaf18SBorislav Petkov { 188221afaf18SBorislav Petkov if (bank >= mca_cfg.banks) { 188321afaf18SBorislav Petkov pr_warn(FW_BUG 188421afaf18SBorislav Petkov "Ignoring request to disable invalid MCA bank %d.\n", 188521afaf18SBorislav Petkov bank); 188621afaf18SBorislav Petkov return; 188721afaf18SBorislav Petkov } 188821afaf18SBorislav Petkov set_bit(bank, mce_banks_ce_disabled); 188921afaf18SBorislav Petkov on_each_cpu(__mce_disable_bank, &bank, 1); 189021afaf18SBorislav Petkov } 189121afaf18SBorislav Petkov 189221afaf18SBorislav Petkov /* 189321afaf18SBorislav Petkov * mce=off Disables machine check 189421afaf18SBorislav Petkov * mce=no_cmci Disables CMCI 189521afaf18SBorislav Petkov * mce=no_lmce Disables LMCE 189621afaf18SBorislav Petkov * mce=dont_log_ce Clears corrected events silently, no log created for CEs. 189721afaf18SBorislav Petkov * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. 189821afaf18SBorislav Petkov * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) 189921afaf18SBorislav Petkov * monarchtimeout is how long to wait for other CPUs on machine 190021afaf18SBorislav Petkov * check, or 0 to not wait 190121afaf18SBorislav Petkov * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h 190221afaf18SBorislav Petkov and older. 190321afaf18SBorislav Petkov * mce=nobootlog Don't log MCEs from before booting. 190421afaf18SBorislav Petkov * mce=bios_cmci_threshold Don't program the CMCI threshold 190521afaf18SBorislav Petkov * mce=recovery force enable memcpy_mcsafe() 190621afaf18SBorislav Petkov */ 190721afaf18SBorislav Petkov static int __init mcheck_enable(char *str) 190821afaf18SBorislav Petkov { 190921afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 191021afaf18SBorislav Petkov 191121afaf18SBorislav Petkov if (*str == 0) { 191221afaf18SBorislav Petkov enable_p5_mce(); 191321afaf18SBorislav Petkov return 1; 191421afaf18SBorislav Petkov } 191521afaf18SBorislav Petkov if (*str == '=') 191621afaf18SBorislav Petkov str++; 191721afaf18SBorislav Petkov if (!strcmp(str, "off")) 191821afaf18SBorislav Petkov cfg->disabled = 1; 191921afaf18SBorislav Petkov else if (!strcmp(str, "no_cmci")) 192021afaf18SBorislav Petkov cfg->cmci_disabled = true; 192121afaf18SBorislav Petkov else if (!strcmp(str, "no_lmce")) 192221afaf18SBorislav Petkov cfg->lmce_disabled = 1; 192321afaf18SBorislav Petkov else if (!strcmp(str, "dont_log_ce")) 192421afaf18SBorislav Petkov cfg->dont_log_ce = true; 192521afaf18SBorislav Petkov else if (!strcmp(str, "ignore_ce")) 192621afaf18SBorislav Petkov cfg->ignore_ce = true; 192721afaf18SBorislav Petkov else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) 192821afaf18SBorislav Petkov cfg->bootlog = (str[0] == 'b'); 192921afaf18SBorislav Petkov else if (!strcmp(str, "bios_cmci_threshold")) 193021afaf18SBorislav Petkov cfg->bios_cmci_threshold = 1; 193121afaf18SBorislav Petkov else if (!strcmp(str, "recovery")) 193221afaf18SBorislav Petkov cfg->recovery = 1; 193321afaf18SBorislav Petkov else if (isdigit(str[0])) { 193421afaf18SBorislav Petkov if (get_option(&str, &cfg->tolerant) == 2) 193521afaf18SBorislav Petkov get_option(&str, &(cfg->monarch_timeout)); 193621afaf18SBorislav Petkov } else { 193721afaf18SBorislav Petkov pr_info("mce argument %s ignored. Please use /sys\n", str); 193821afaf18SBorislav Petkov return 0; 193921afaf18SBorislav Petkov } 194021afaf18SBorislav Petkov return 1; 194121afaf18SBorislav Petkov } 194221afaf18SBorislav Petkov __setup("mce", mcheck_enable); 194321afaf18SBorislav Petkov 194421afaf18SBorislav Petkov int __init mcheck_init(void) 194521afaf18SBorislav Petkov { 194621afaf18SBorislav Petkov mcheck_intel_therm_init(); 194721afaf18SBorislav Petkov mce_register_decode_chain(&first_nb); 194821afaf18SBorislav Petkov mce_register_decode_chain(&mce_srao_nb); 194921afaf18SBorislav Petkov mce_register_decode_chain(&mce_default_nb); 195021afaf18SBorislav Petkov mcheck_vendor_init_severity(); 195121afaf18SBorislav Petkov 195221afaf18SBorislav Petkov INIT_WORK(&mce_work, mce_gen_pool_process); 195321afaf18SBorislav Petkov init_irq_work(&mce_irq_work, mce_irq_work_cb); 195421afaf18SBorislav Petkov 195521afaf18SBorislav Petkov return 0; 195621afaf18SBorislav Petkov } 195721afaf18SBorislav Petkov 195821afaf18SBorislav Petkov /* 195921afaf18SBorislav Petkov * mce_syscore: PM support 196021afaf18SBorislav Petkov */ 196121afaf18SBorislav Petkov 196221afaf18SBorislav Petkov /* 196321afaf18SBorislav Petkov * Disable machine checks on suspend and shutdown. We can't really handle 196421afaf18SBorislav Petkov * them later. 196521afaf18SBorislav Petkov */ 196621afaf18SBorislav Petkov static void mce_disable_error_reporting(void) 196721afaf18SBorislav Petkov { 196821afaf18SBorislav Petkov int i; 196921afaf18SBorislav Petkov 197021afaf18SBorislav Petkov for (i = 0; i < mca_cfg.banks; i++) { 197121afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 197221afaf18SBorislav Petkov 197321afaf18SBorislav Petkov if (b->init) 197421afaf18SBorislav Petkov wrmsrl(msr_ops.ctl(i), 0); 197521afaf18SBorislav Petkov } 197621afaf18SBorislav Petkov return; 197721afaf18SBorislav Petkov } 197821afaf18SBorislav Petkov 197921afaf18SBorislav Petkov static void vendor_disable_error_reporting(void) 198021afaf18SBorislav Petkov { 198121afaf18SBorislav Petkov /* 198221afaf18SBorislav Petkov * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs 198321afaf18SBorislav Petkov * are socket-wide. 198421afaf18SBorislav Petkov * Disabling them for just a single offlined CPU is bad, since it will 198521afaf18SBorislav Petkov * inhibit reporting for all shared resources on the socket like the 198621afaf18SBorislav Petkov * last level cache (LLC), the integrated memory controller (iMC), etc. 198721afaf18SBorislav Petkov */ 198821afaf18SBorislav Petkov if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || 198921afaf18SBorislav Petkov boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || 199021afaf18SBorislav Petkov boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 199121afaf18SBorislav Petkov return; 199221afaf18SBorislav Petkov 199321afaf18SBorislav Petkov mce_disable_error_reporting(); 199421afaf18SBorislav Petkov } 199521afaf18SBorislav Petkov 199621afaf18SBorislav Petkov static int mce_syscore_suspend(void) 199721afaf18SBorislav Petkov { 199821afaf18SBorislav Petkov vendor_disable_error_reporting(); 199921afaf18SBorislav Petkov return 0; 200021afaf18SBorislav Petkov } 200121afaf18SBorislav Petkov 200221afaf18SBorislav Petkov static void mce_syscore_shutdown(void) 200321afaf18SBorislav Petkov { 200421afaf18SBorislav Petkov vendor_disable_error_reporting(); 200521afaf18SBorislav Petkov } 200621afaf18SBorislav Petkov 200721afaf18SBorislav Petkov /* 200821afaf18SBorislav Petkov * On resume clear all MCE state. Don't want to see leftovers from the BIOS. 200921afaf18SBorislav Petkov * Only one CPU is active at this time, the others get re-added later using 201021afaf18SBorislav Petkov * CPU hotplug: 201121afaf18SBorislav Petkov */ 201221afaf18SBorislav Petkov static void mce_syscore_resume(void) 201321afaf18SBorislav Petkov { 201421afaf18SBorislav Petkov __mcheck_cpu_init_generic(); 201521afaf18SBorislav Petkov __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); 201621afaf18SBorislav Petkov __mcheck_cpu_init_clear_banks(); 201721afaf18SBorislav Petkov } 201821afaf18SBorislav Petkov 201921afaf18SBorislav Petkov static struct syscore_ops mce_syscore_ops = { 202021afaf18SBorislav Petkov .suspend = mce_syscore_suspend, 202121afaf18SBorislav Petkov .shutdown = mce_syscore_shutdown, 202221afaf18SBorislav Petkov .resume = mce_syscore_resume, 202321afaf18SBorislav Petkov }; 202421afaf18SBorislav Petkov 202521afaf18SBorislav Petkov /* 202621afaf18SBorislav Petkov * mce_device: Sysfs support 202721afaf18SBorislav Petkov */ 202821afaf18SBorislav Petkov 202921afaf18SBorislav Petkov static void mce_cpu_restart(void *data) 203021afaf18SBorislav Petkov { 203121afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 203221afaf18SBorislav Petkov return; 203321afaf18SBorislav Petkov __mcheck_cpu_init_generic(); 203421afaf18SBorislav Petkov __mcheck_cpu_init_clear_banks(); 203521afaf18SBorislav Petkov __mcheck_cpu_init_timer(); 203621afaf18SBorislav Petkov } 203721afaf18SBorislav Petkov 203821afaf18SBorislav Petkov /* Reinit MCEs after user configuration changes */ 203921afaf18SBorislav Petkov static void mce_restart(void) 204021afaf18SBorislav Petkov { 204121afaf18SBorislav Petkov mce_timer_delete_all(); 204221afaf18SBorislav Petkov on_each_cpu(mce_cpu_restart, NULL, 1); 204321afaf18SBorislav Petkov } 204421afaf18SBorislav Petkov 204521afaf18SBorislav Petkov /* Toggle features for corrected errors */ 204621afaf18SBorislav Petkov static void mce_disable_cmci(void *data) 204721afaf18SBorislav Petkov { 204821afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 204921afaf18SBorislav Petkov return; 205021afaf18SBorislav Petkov cmci_clear(); 205121afaf18SBorislav Petkov } 205221afaf18SBorislav Petkov 205321afaf18SBorislav Petkov static void mce_enable_ce(void *all) 205421afaf18SBorislav Petkov { 205521afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 205621afaf18SBorislav Petkov return; 205721afaf18SBorislav Petkov cmci_reenable(); 205821afaf18SBorislav Petkov cmci_recheck(); 205921afaf18SBorislav Petkov if (all) 206021afaf18SBorislav Petkov __mcheck_cpu_init_timer(); 206121afaf18SBorislav Petkov } 206221afaf18SBorislav Petkov 206321afaf18SBorislav Petkov static struct bus_type mce_subsys = { 206421afaf18SBorislav Petkov .name = "machinecheck", 206521afaf18SBorislav Petkov .dev_name = "machinecheck", 206621afaf18SBorislav Petkov }; 206721afaf18SBorislav Petkov 206821afaf18SBorislav Petkov DEFINE_PER_CPU(struct device *, mce_device); 206921afaf18SBorislav Petkov 207021afaf18SBorislav Petkov static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) 207121afaf18SBorislav Petkov { 207221afaf18SBorislav Petkov return container_of(attr, struct mce_bank, attr); 207321afaf18SBorislav Petkov } 207421afaf18SBorislav Petkov 207521afaf18SBorislav Petkov static ssize_t show_bank(struct device *s, struct device_attribute *attr, 207621afaf18SBorislav Petkov char *buf) 207721afaf18SBorislav Petkov { 207821afaf18SBorislav Petkov return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl); 207921afaf18SBorislav Petkov } 208021afaf18SBorislav Petkov 208121afaf18SBorislav Petkov static ssize_t set_bank(struct device *s, struct device_attribute *attr, 208221afaf18SBorislav Petkov const char *buf, size_t size) 208321afaf18SBorislav Petkov { 208421afaf18SBorislav Petkov u64 new; 208521afaf18SBorislav Petkov 208621afaf18SBorislav Petkov if (kstrtou64(buf, 0, &new) < 0) 208721afaf18SBorislav Petkov return -EINVAL; 208821afaf18SBorislav Petkov 208921afaf18SBorislav Petkov attr_to_bank(attr)->ctl = new; 209021afaf18SBorislav Petkov mce_restart(); 209121afaf18SBorislav Petkov 209221afaf18SBorislav Petkov return size; 209321afaf18SBorislav Petkov } 209421afaf18SBorislav Petkov 209521afaf18SBorislav Petkov static ssize_t set_ignore_ce(struct device *s, 209621afaf18SBorislav Petkov struct device_attribute *attr, 209721afaf18SBorislav Petkov const char *buf, size_t size) 209821afaf18SBorislav Petkov { 209921afaf18SBorislav Petkov u64 new; 210021afaf18SBorislav Petkov 210121afaf18SBorislav Petkov if (kstrtou64(buf, 0, &new) < 0) 210221afaf18SBorislav Petkov return -EINVAL; 210321afaf18SBorislav Petkov 210421afaf18SBorislav Petkov mutex_lock(&mce_sysfs_mutex); 210521afaf18SBorislav Petkov if (mca_cfg.ignore_ce ^ !!new) { 210621afaf18SBorislav Petkov if (new) { 210721afaf18SBorislav Petkov /* disable ce features */ 210821afaf18SBorislav Petkov mce_timer_delete_all(); 210921afaf18SBorislav Petkov on_each_cpu(mce_disable_cmci, NULL, 1); 211021afaf18SBorislav Petkov mca_cfg.ignore_ce = true; 211121afaf18SBorislav Petkov } else { 211221afaf18SBorislav Petkov /* enable ce features */ 211321afaf18SBorislav Petkov mca_cfg.ignore_ce = false; 211421afaf18SBorislav Petkov on_each_cpu(mce_enable_ce, (void *)1, 1); 211521afaf18SBorislav Petkov } 211621afaf18SBorislav Petkov } 211721afaf18SBorislav Petkov mutex_unlock(&mce_sysfs_mutex); 211821afaf18SBorislav Petkov 211921afaf18SBorislav Petkov return size; 212021afaf18SBorislav Petkov } 212121afaf18SBorislav Petkov 212221afaf18SBorislav Petkov static ssize_t set_cmci_disabled(struct device *s, 212321afaf18SBorislav Petkov struct device_attribute *attr, 212421afaf18SBorislav Petkov const char *buf, size_t size) 212521afaf18SBorislav Petkov { 212621afaf18SBorislav Petkov u64 new; 212721afaf18SBorislav Petkov 212821afaf18SBorislav Petkov if (kstrtou64(buf, 0, &new) < 0) 212921afaf18SBorislav Petkov return -EINVAL; 213021afaf18SBorislav Petkov 213121afaf18SBorislav Petkov mutex_lock(&mce_sysfs_mutex); 213221afaf18SBorislav Petkov if (mca_cfg.cmci_disabled ^ !!new) { 213321afaf18SBorislav Petkov if (new) { 213421afaf18SBorislav Petkov /* disable cmci */ 213521afaf18SBorislav Petkov on_each_cpu(mce_disable_cmci, NULL, 1); 213621afaf18SBorislav Petkov mca_cfg.cmci_disabled = true; 213721afaf18SBorislav Petkov } else { 213821afaf18SBorislav Petkov /* enable cmci */ 213921afaf18SBorislav Petkov mca_cfg.cmci_disabled = false; 214021afaf18SBorislav Petkov on_each_cpu(mce_enable_ce, NULL, 1); 214121afaf18SBorislav Petkov } 214221afaf18SBorislav Petkov } 214321afaf18SBorislav Petkov mutex_unlock(&mce_sysfs_mutex); 214421afaf18SBorislav Petkov 214521afaf18SBorislav Petkov return size; 214621afaf18SBorislav Petkov } 214721afaf18SBorislav Petkov 214821afaf18SBorislav Petkov static ssize_t store_int_with_restart(struct device *s, 214921afaf18SBorislav Petkov struct device_attribute *attr, 215021afaf18SBorislav Petkov const char *buf, size_t size) 215121afaf18SBorislav Petkov { 215221afaf18SBorislav Petkov unsigned long old_check_interval = check_interval; 215321afaf18SBorislav Petkov ssize_t ret = device_store_ulong(s, attr, buf, size); 215421afaf18SBorislav Petkov 215521afaf18SBorislav Petkov if (check_interval == old_check_interval) 215621afaf18SBorislav Petkov return ret; 215721afaf18SBorislav Petkov 215821afaf18SBorislav Petkov mutex_lock(&mce_sysfs_mutex); 215921afaf18SBorislav Petkov mce_restart(); 216021afaf18SBorislav Petkov mutex_unlock(&mce_sysfs_mutex); 216121afaf18SBorislav Petkov 216221afaf18SBorislav Petkov return ret; 216321afaf18SBorislav Petkov } 216421afaf18SBorislav Petkov 216521afaf18SBorislav Petkov static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); 216621afaf18SBorislav Petkov static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); 216721afaf18SBorislav Petkov static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); 216821afaf18SBorislav Petkov 216921afaf18SBorislav Petkov static struct dev_ext_attribute dev_attr_check_interval = { 217021afaf18SBorislav Petkov __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), 217121afaf18SBorislav Petkov &check_interval 217221afaf18SBorislav Petkov }; 217321afaf18SBorislav Petkov 217421afaf18SBorislav Petkov static struct dev_ext_attribute dev_attr_ignore_ce = { 217521afaf18SBorislav Petkov __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), 217621afaf18SBorislav Petkov &mca_cfg.ignore_ce 217721afaf18SBorislav Petkov }; 217821afaf18SBorislav Petkov 217921afaf18SBorislav Petkov static struct dev_ext_attribute dev_attr_cmci_disabled = { 218021afaf18SBorislav Petkov __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), 218121afaf18SBorislav Petkov &mca_cfg.cmci_disabled 218221afaf18SBorislav Petkov }; 218321afaf18SBorislav Petkov 218421afaf18SBorislav Petkov static struct device_attribute *mce_device_attrs[] = { 218521afaf18SBorislav Petkov &dev_attr_tolerant.attr, 218621afaf18SBorislav Petkov &dev_attr_check_interval.attr, 218721afaf18SBorislav Petkov #ifdef CONFIG_X86_MCELOG_LEGACY 218821afaf18SBorislav Petkov &dev_attr_trigger, 218921afaf18SBorislav Petkov #endif 219021afaf18SBorislav Petkov &dev_attr_monarch_timeout.attr, 219121afaf18SBorislav Petkov &dev_attr_dont_log_ce.attr, 219221afaf18SBorislav Petkov &dev_attr_ignore_ce.attr, 219321afaf18SBorislav Petkov &dev_attr_cmci_disabled.attr, 219421afaf18SBorislav Petkov NULL 219521afaf18SBorislav Petkov }; 219621afaf18SBorislav Petkov 219721afaf18SBorislav Petkov static cpumask_var_t mce_device_initialized; 219821afaf18SBorislav Petkov 219921afaf18SBorislav Petkov static void mce_device_release(struct device *dev) 220021afaf18SBorislav Petkov { 220121afaf18SBorislav Petkov kfree(dev); 220221afaf18SBorislav Petkov } 220321afaf18SBorislav Petkov 220421afaf18SBorislav Petkov /* Per cpu device init. All of the cpus still share the same ctrl bank: */ 220521afaf18SBorislav Petkov static int mce_device_create(unsigned int cpu) 220621afaf18SBorislav Petkov { 220721afaf18SBorislav Petkov struct device *dev; 220821afaf18SBorislav Petkov int err; 220921afaf18SBorislav Petkov int i, j; 221021afaf18SBorislav Petkov 221121afaf18SBorislav Petkov if (!mce_available(&boot_cpu_data)) 221221afaf18SBorislav Petkov return -EIO; 221321afaf18SBorislav Petkov 221421afaf18SBorislav Petkov dev = per_cpu(mce_device, cpu); 221521afaf18SBorislav Petkov if (dev) 221621afaf18SBorislav Petkov return 0; 221721afaf18SBorislav Petkov 221821afaf18SBorislav Petkov dev = kzalloc(sizeof(*dev), GFP_KERNEL); 221921afaf18SBorislav Petkov if (!dev) 222021afaf18SBorislav Petkov return -ENOMEM; 222121afaf18SBorislav Petkov dev->id = cpu; 222221afaf18SBorislav Petkov dev->bus = &mce_subsys; 222321afaf18SBorislav Petkov dev->release = &mce_device_release; 222421afaf18SBorislav Petkov 222521afaf18SBorislav Petkov err = device_register(dev); 222621afaf18SBorislav Petkov if (err) { 222721afaf18SBorislav Petkov put_device(dev); 222821afaf18SBorislav Petkov return err; 222921afaf18SBorislav Petkov } 223021afaf18SBorislav Petkov 223121afaf18SBorislav Petkov for (i = 0; mce_device_attrs[i]; i++) { 223221afaf18SBorislav Petkov err = device_create_file(dev, mce_device_attrs[i]); 223321afaf18SBorislav Petkov if (err) 223421afaf18SBorislav Petkov goto error; 223521afaf18SBorislav Petkov } 223621afaf18SBorislav Petkov for (j = 0; j < mca_cfg.banks; j++) { 223721afaf18SBorislav Petkov err = device_create_file(dev, &mce_banks[j].attr); 223821afaf18SBorislav Petkov if (err) 223921afaf18SBorislav Petkov goto error2; 224021afaf18SBorislav Petkov } 224121afaf18SBorislav Petkov cpumask_set_cpu(cpu, mce_device_initialized); 224221afaf18SBorislav Petkov per_cpu(mce_device, cpu) = dev; 224321afaf18SBorislav Petkov 224421afaf18SBorislav Petkov return 0; 224521afaf18SBorislav Petkov error2: 224621afaf18SBorislav Petkov while (--j >= 0) 224721afaf18SBorislav Petkov device_remove_file(dev, &mce_banks[j].attr); 224821afaf18SBorislav Petkov error: 224921afaf18SBorislav Petkov while (--i >= 0) 225021afaf18SBorislav Petkov device_remove_file(dev, mce_device_attrs[i]); 225121afaf18SBorislav Petkov 225221afaf18SBorislav Petkov device_unregister(dev); 225321afaf18SBorislav Petkov 225421afaf18SBorislav Petkov return err; 225521afaf18SBorislav Petkov } 225621afaf18SBorislav Petkov 225721afaf18SBorislav Petkov static void mce_device_remove(unsigned int cpu) 225821afaf18SBorislav Petkov { 225921afaf18SBorislav Petkov struct device *dev = per_cpu(mce_device, cpu); 226021afaf18SBorislav Petkov int i; 226121afaf18SBorislav Petkov 226221afaf18SBorislav Petkov if (!cpumask_test_cpu(cpu, mce_device_initialized)) 226321afaf18SBorislav Petkov return; 226421afaf18SBorislav Petkov 226521afaf18SBorislav Petkov for (i = 0; mce_device_attrs[i]; i++) 226621afaf18SBorislav Petkov device_remove_file(dev, mce_device_attrs[i]); 226721afaf18SBorislav Petkov 226821afaf18SBorislav Petkov for (i = 0; i < mca_cfg.banks; i++) 226921afaf18SBorislav Petkov device_remove_file(dev, &mce_banks[i].attr); 227021afaf18SBorislav Petkov 227121afaf18SBorislav Petkov device_unregister(dev); 227221afaf18SBorislav Petkov cpumask_clear_cpu(cpu, mce_device_initialized); 227321afaf18SBorislav Petkov per_cpu(mce_device, cpu) = NULL; 227421afaf18SBorislav Petkov } 227521afaf18SBorislav Petkov 227621afaf18SBorislav Petkov /* Make sure there are no machine checks on offlined CPUs. */ 227721afaf18SBorislav Petkov static void mce_disable_cpu(void) 227821afaf18SBorislav Petkov { 227921afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 228021afaf18SBorislav Petkov return; 228121afaf18SBorislav Petkov 228221afaf18SBorislav Petkov if (!cpuhp_tasks_frozen) 228321afaf18SBorislav Petkov cmci_clear(); 228421afaf18SBorislav Petkov 228521afaf18SBorislav Petkov vendor_disable_error_reporting(); 228621afaf18SBorislav Petkov } 228721afaf18SBorislav Petkov 228821afaf18SBorislav Petkov static void mce_reenable_cpu(void) 228921afaf18SBorislav Petkov { 229021afaf18SBorislav Petkov int i; 229121afaf18SBorislav Petkov 229221afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 229321afaf18SBorislav Petkov return; 229421afaf18SBorislav Petkov 229521afaf18SBorislav Petkov if (!cpuhp_tasks_frozen) 229621afaf18SBorislav Petkov cmci_reenable(); 229721afaf18SBorislav Petkov for (i = 0; i < mca_cfg.banks; i++) { 229821afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 229921afaf18SBorislav Petkov 230021afaf18SBorislav Petkov if (b->init) 230121afaf18SBorislav Petkov wrmsrl(msr_ops.ctl(i), b->ctl); 230221afaf18SBorislav Petkov } 230321afaf18SBorislav Petkov } 230421afaf18SBorislav Petkov 230521afaf18SBorislav Petkov static int mce_cpu_dead(unsigned int cpu) 230621afaf18SBorislav Petkov { 230721afaf18SBorislav Petkov mce_intel_hcpu_update(cpu); 230821afaf18SBorislav Petkov 230921afaf18SBorislav Petkov /* intentionally ignoring frozen here */ 231021afaf18SBorislav Petkov if (!cpuhp_tasks_frozen) 231121afaf18SBorislav Petkov cmci_rediscover(); 231221afaf18SBorislav Petkov return 0; 231321afaf18SBorislav Petkov } 231421afaf18SBorislav Petkov 231521afaf18SBorislav Petkov static int mce_cpu_online(unsigned int cpu) 231621afaf18SBorislav Petkov { 231721afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 231821afaf18SBorislav Petkov int ret; 231921afaf18SBorislav Petkov 232021afaf18SBorislav Petkov mce_device_create(cpu); 232121afaf18SBorislav Petkov 232221afaf18SBorislav Petkov ret = mce_threshold_create_device(cpu); 232321afaf18SBorislav Petkov if (ret) { 232421afaf18SBorislav Petkov mce_device_remove(cpu); 232521afaf18SBorislav Petkov return ret; 232621afaf18SBorislav Petkov } 232721afaf18SBorislav Petkov mce_reenable_cpu(); 232821afaf18SBorislav Petkov mce_start_timer(t); 232921afaf18SBorislav Petkov return 0; 233021afaf18SBorislav Petkov } 233121afaf18SBorislav Petkov 233221afaf18SBorislav Petkov static int mce_cpu_pre_down(unsigned int cpu) 233321afaf18SBorislav Petkov { 233421afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 233521afaf18SBorislav Petkov 233621afaf18SBorislav Petkov mce_disable_cpu(); 233721afaf18SBorislav Petkov del_timer_sync(t); 233821afaf18SBorislav Petkov mce_threshold_remove_device(cpu); 233921afaf18SBorislav Petkov mce_device_remove(cpu); 234021afaf18SBorislav Petkov return 0; 234121afaf18SBorislav Petkov } 234221afaf18SBorislav Petkov 234321afaf18SBorislav Petkov static __init void mce_init_banks(void) 234421afaf18SBorislav Petkov { 234521afaf18SBorislav Petkov int i; 234621afaf18SBorislav Petkov 234721afaf18SBorislav Petkov for (i = 0; i < mca_cfg.banks; i++) { 234821afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 234921afaf18SBorislav Petkov struct device_attribute *a = &b->attr; 235021afaf18SBorislav Petkov 235121afaf18SBorislav Petkov sysfs_attr_init(&a->attr); 235221afaf18SBorislav Petkov a->attr.name = b->attrname; 235321afaf18SBorislav Petkov snprintf(b->attrname, ATTR_LEN, "bank%d", i); 235421afaf18SBorislav Petkov 235521afaf18SBorislav Petkov a->attr.mode = 0644; 235621afaf18SBorislav Petkov a->show = show_bank; 235721afaf18SBorislav Petkov a->store = set_bank; 235821afaf18SBorislav Petkov } 235921afaf18SBorislav Petkov } 236021afaf18SBorislav Petkov 236121afaf18SBorislav Petkov static __init int mcheck_init_device(void) 236221afaf18SBorislav Petkov { 236321afaf18SBorislav Petkov int err; 236421afaf18SBorislav Petkov 236521afaf18SBorislav Petkov /* 236621afaf18SBorislav Petkov * Check if we have a spare virtual bit. This will only become 236721afaf18SBorislav Petkov * a problem if/when we move beyond 5-level page tables. 236821afaf18SBorislav Petkov */ 236921afaf18SBorislav Petkov MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63); 237021afaf18SBorislav Petkov 237121afaf18SBorislav Petkov if (!mce_available(&boot_cpu_data)) { 237221afaf18SBorislav Petkov err = -EIO; 237321afaf18SBorislav Petkov goto err_out; 237421afaf18SBorislav Petkov } 237521afaf18SBorislav Petkov 237621afaf18SBorislav Petkov if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { 237721afaf18SBorislav Petkov err = -ENOMEM; 237821afaf18SBorislav Petkov goto err_out; 237921afaf18SBorislav Petkov } 238021afaf18SBorislav Petkov 238121afaf18SBorislav Petkov mce_init_banks(); 238221afaf18SBorislav Petkov 238321afaf18SBorislav Petkov err = subsys_system_register(&mce_subsys, NULL); 238421afaf18SBorislav Petkov if (err) 238521afaf18SBorislav Petkov goto err_out_mem; 238621afaf18SBorislav Petkov 238721afaf18SBorislav Petkov err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL, 238821afaf18SBorislav Petkov mce_cpu_dead); 238921afaf18SBorislav Petkov if (err) 239021afaf18SBorislav Petkov goto err_out_mem; 239121afaf18SBorislav Petkov 239221afaf18SBorislav Petkov err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online", 239321afaf18SBorislav Petkov mce_cpu_online, mce_cpu_pre_down); 239421afaf18SBorislav Petkov if (err < 0) 239521afaf18SBorislav Petkov goto err_out_online; 239621afaf18SBorislav Petkov 239721afaf18SBorislav Petkov register_syscore_ops(&mce_syscore_ops); 239821afaf18SBorislav Petkov 239921afaf18SBorislav Petkov return 0; 240021afaf18SBorislav Petkov 240121afaf18SBorislav Petkov err_out_online: 240221afaf18SBorislav Petkov cpuhp_remove_state(CPUHP_X86_MCE_DEAD); 240321afaf18SBorislav Petkov 240421afaf18SBorislav Petkov err_out_mem: 240521afaf18SBorislav Petkov free_cpumask_var(mce_device_initialized); 240621afaf18SBorislav Petkov 240721afaf18SBorislav Petkov err_out: 240821afaf18SBorislav Petkov pr_err("Unable to init MCE device (rc: %d)\n", err); 240921afaf18SBorislav Petkov 241021afaf18SBorislav Petkov return err; 241121afaf18SBorislav Petkov } 241221afaf18SBorislav Petkov device_initcall_sync(mcheck_init_device); 241321afaf18SBorislav Petkov 241421afaf18SBorislav Petkov /* 241521afaf18SBorislav Petkov * Old style boot options parsing. Only for compatibility. 241621afaf18SBorislav Petkov */ 241721afaf18SBorislav Petkov static int __init mcheck_disable(char *str) 241821afaf18SBorislav Petkov { 241921afaf18SBorislav Petkov mca_cfg.disabled = 1; 242021afaf18SBorislav Petkov return 1; 242121afaf18SBorislav Petkov } 242221afaf18SBorislav Petkov __setup("nomce", mcheck_disable); 242321afaf18SBorislav Petkov 242421afaf18SBorislav Petkov #ifdef CONFIG_DEBUG_FS 242521afaf18SBorislav Petkov struct dentry *mce_get_debugfs_dir(void) 242621afaf18SBorislav Petkov { 242721afaf18SBorislav Petkov static struct dentry *dmce; 242821afaf18SBorislav Petkov 242921afaf18SBorislav Petkov if (!dmce) 243021afaf18SBorislav Petkov dmce = debugfs_create_dir("mce", NULL); 243121afaf18SBorislav Petkov 243221afaf18SBorislav Petkov return dmce; 243321afaf18SBorislav Petkov } 243421afaf18SBorislav Petkov 243521afaf18SBorislav Petkov static void mce_reset(void) 243621afaf18SBorislav Petkov { 243721afaf18SBorislav Petkov cpu_missing = 0; 243821afaf18SBorislav Petkov atomic_set(&mce_fake_panicked, 0); 243921afaf18SBorislav Petkov atomic_set(&mce_executing, 0); 244021afaf18SBorislav Petkov atomic_set(&mce_callin, 0); 244121afaf18SBorislav Petkov atomic_set(&global_nwo, 0); 244221afaf18SBorislav Petkov } 244321afaf18SBorislav Petkov 244421afaf18SBorislav Petkov static int fake_panic_get(void *data, u64 *val) 244521afaf18SBorislav Petkov { 244621afaf18SBorislav Petkov *val = fake_panic; 244721afaf18SBorislav Petkov return 0; 244821afaf18SBorislav Petkov } 244921afaf18SBorislav Petkov 245021afaf18SBorislav Petkov static int fake_panic_set(void *data, u64 val) 245121afaf18SBorislav Petkov { 245221afaf18SBorislav Petkov mce_reset(); 245321afaf18SBorislav Petkov fake_panic = val; 245421afaf18SBorislav Petkov return 0; 245521afaf18SBorislav Petkov } 245621afaf18SBorislav Petkov 245721afaf18SBorislav Petkov DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, 245821afaf18SBorislav Petkov fake_panic_set, "%llu\n"); 245921afaf18SBorislav Petkov 246021afaf18SBorislav Petkov static int __init mcheck_debugfs_init(void) 246121afaf18SBorislav Petkov { 246221afaf18SBorislav Petkov struct dentry *dmce, *ffake_panic; 246321afaf18SBorislav Petkov 246421afaf18SBorislav Petkov dmce = mce_get_debugfs_dir(); 246521afaf18SBorislav Petkov if (!dmce) 246621afaf18SBorislav Petkov return -ENOMEM; 246721afaf18SBorislav Petkov ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL, 246821afaf18SBorislav Petkov &fake_panic_fops); 246921afaf18SBorislav Petkov if (!ffake_panic) 247021afaf18SBorislav Petkov return -ENOMEM; 247121afaf18SBorislav Petkov 247221afaf18SBorislav Petkov return 0; 247321afaf18SBorislav Petkov } 247421afaf18SBorislav Petkov #else 247521afaf18SBorislav Petkov static int __init mcheck_debugfs_init(void) { return -EINVAL; } 247621afaf18SBorislav Petkov #endif 247721afaf18SBorislav Petkov 247821afaf18SBorislav Petkov DEFINE_STATIC_KEY_FALSE(mcsafe_key); 247921afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mcsafe_key); 248021afaf18SBorislav Petkov 248121afaf18SBorislav Petkov static int __init mcheck_late_init(void) 248221afaf18SBorislav Petkov { 248321afaf18SBorislav Petkov if (mca_cfg.recovery) 248421afaf18SBorislav Petkov static_branch_inc(&mcsafe_key); 248521afaf18SBorislav Petkov 248621afaf18SBorislav Petkov mcheck_debugfs_init(); 248721afaf18SBorislav Petkov cec_init(); 248821afaf18SBorislav Petkov 248921afaf18SBorislav Petkov /* 249021afaf18SBorislav Petkov * Flush out everything that has been logged during early boot, now that 249121afaf18SBorislav Petkov * everything has been initialized (workqueues, decoders, ...). 249221afaf18SBorislav Petkov */ 249321afaf18SBorislav Petkov mce_schedule_work(); 249421afaf18SBorislav Petkov 249521afaf18SBorislav Petkov return 0; 249621afaf18SBorislav Petkov } 249721afaf18SBorislav Petkov late_initcall(mcheck_late_init); 2498