1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 221afaf18SBorislav Petkov /* 321afaf18SBorislav Petkov * Machine check handler. 421afaf18SBorislav Petkov * 521afaf18SBorislav Petkov * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 621afaf18SBorislav Petkov * Rest from unknown author(s). 721afaf18SBorislav Petkov * 2004 Andi Kleen. Rewrote most of it. 821afaf18SBorislav Petkov * Copyright 2008 Intel Corporation 921afaf18SBorislav Petkov * Author: Andi Kleen 1021afaf18SBorislav Petkov */ 1121afaf18SBorislav Petkov 1221afaf18SBorislav Petkov #include <linux/thread_info.h> 1321afaf18SBorislav Petkov #include <linux/capability.h> 1421afaf18SBorislav Petkov #include <linux/miscdevice.h> 1521afaf18SBorislav Petkov #include <linux/ratelimit.h> 1621afaf18SBorislav Petkov #include <linux/rcupdate.h> 1721afaf18SBorislav Petkov #include <linux/kobject.h> 1821afaf18SBorislav Petkov #include <linux/uaccess.h> 1921afaf18SBorislav Petkov #include <linux/kdebug.h> 2021afaf18SBorislav Petkov #include <linux/kernel.h> 2121afaf18SBorislav Petkov #include <linux/percpu.h> 2221afaf18SBorislav Petkov #include <linux/string.h> 2321afaf18SBorislav Petkov #include <linux/device.h> 2421afaf18SBorislav Petkov #include <linux/syscore_ops.h> 2521afaf18SBorislav Petkov #include <linux/delay.h> 2621afaf18SBorislav Petkov #include <linux/ctype.h> 2721afaf18SBorislav Petkov #include <linux/sched.h> 2821afaf18SBorislav Petkov #include <linux/sysfs.h> 2921afaf18SBorislav Petkov #include <linux/types.h> 3021afaf18SBorislav Petkov #include <linux/slab.h> 3121afaf18SBorislav Petkov #include <linux/init.h> 3221afaf18SBorislav Petkov #include <linux/kmod.h> 3321afaf18SBorislav Petkov #include <linux/poll.h> 3421afaf18SBorislav Petkov #include <linux/nmi.h> 3521afaf18SBorislav Petkov #include <linux/cpu.h> 3621afaf18SBorislav Petkov #include <linux/ras.h> 3721afaf18SBorislav Petkov #include <linux/smp.h> 3821afaf18SBorislav Petkov #include <linux/fs.h> 3921afaf18SBorislav Petkov #include <linux/mm.h> 4021afaf18SBorislav Petkov #include <linux/debugfs.h> 4121afaf18SBorislav Petkov #include <linux/irq_work.h> 4221afaf18SBorislav Petkov #include <linux/export.h> 4321afaf18SBorislav Petkov #include <linux/set_memory.h> 449998a983SRicardo Neri #include <linux/sync_core.h> 455567d11cSPeter Zijlstra #include <linux/task_work.h> 460d00449cSPeter Zijlstra #include <linux/hardirq.h> 4721afaf18SBorislav Petkov 4821afaf18SBorislav Petkov #include <asm/intel-family.h> 4921afaf18SBorislav Petkov #include <asm/processor.h> 5021afaf18SBorislav Petkov #include <asm/traps.h> 5121afaf18SBorislav Petkov #include <asm/tlbflush.h> 5221afaf18SBorislav Petkov #include <asm/mce.h> 5321afaf18SBorislav Petkov #include <asm/msr.h> 5421afaf18SBorislav Petkov #include <asm/reboot.h> 5521afaf18SBorislav Petkov 5621afaf18SBorislav Petkov #include "internal.h" 5721afaf18SBorislav Petkov 5821afaf18SBorislav Petkov /* sysfs synchronization */ 5921afaf18SBorislav Petkov static DEFINE_MUTEX(mce_sysfs_mutex); 6021afaf18SBorislav Petkov 6121afaf18SBorislav Petkov #define CREATE_TRACE_POINTS 6221afaf18SBorislav Petkov #include <trace/events/mce.h> 6321afaf18SBorislav Petkov 6421afaf18SBorislav Petkov #define SPINUNIT 100 /* 100ns */ 6521afaf18SBorislav Petkov 6621afaf18SBorislav Petkov DEFINE_PER_CPU(unsigned, mce_exception_count); 6721afaf18SBorislav Petkov 68c7d314f3SYazen Ghannam DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks); 69c7d314f3SYazen Ghannam 7095fdce6bSYazen Ghannam struct mce_bank { 7195fdce6bSYazen Ghannam u64 ctl; /* subevents to enable */ 7295fdce6bSYazen Ghannam bool init; /* initialise bank? */ 73b4914508SYazen Ghannam }; 74b4914508SYazen Ghannam static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array); 75b4914508SYazen Ghannam 76b4914508SYazen Ghannam #define ATTR_LEN 16 77b4914508SYazen Ghannam /* One object for each MCE bank, shared by all CPUs */ 78b4914508SYazen Ghannam struct mce_bank_dev { 7995fdce6bSYazen Ghannam struct device_attribute attr; /* device attribute */ 8095fdce6bSYazen Ghannam char attrname[ATTR_LEN]; /* attribute name */ 81b4914508SYazen Ghannam u8 bank; /* bank number */ 8295fdce6bSYazen Ghannam }; 83b4914508SYazen Ghannam static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS]; 8495fdce6bSYazen Ghannam 8521afaf18SBorislav Petkov struct mce_vendor_flags mce_flags __read_mostly; 8621afaf18SBorislav Petkov 8721afaf18SBorislav Petkov struct mca_config mca_cfg __read_mostly = { 8821afaf18SBorislav Petkov .bootlog = -1, 8921afaf18SBorislav Petkov /* 9021afaf18SBorislav Petkov * Tolerant levels: 9121afaf18SBorislav Petkov * 0: always panic on uncorrected errors, log corrected errors 9221afaf18SBorislav Petkov * 1: panic or SIGBUS on uncorrected errors, log corrected errors 9321afaf18SBorislav Petkov * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors 9421afaf18SBorislav Petkov * 3: never panic or SIGBUS, log all errors (for testing only) 9521afaf18SBorislav Petkov */ 9621afaf18SBorislav Petkov .tolerant = 1, 9721afaf18SBorislav Petkov .monarch_timeout = -1 9821afaf18SBorislav Petkov }; 9921afaf18SBorislav Petkov 10021afaf18SBorislav Petkov static DEFINE_PER_CPU(struct mce, mces_seen); 10121afaf18SBorislav Petkov static unsigned long mce_need_notify; 10221afaf18SBorislav Petkov static int cpu_missing; 10321afaf18SBorislav Petkov 10421afaf18SBorislav Petkov /* 10521afaf18SBorislav Petkov * MCA banks polled by the period polling timer for corrected events. 10621afaf18SBorislav Petkov * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). 10721afaf18SBorislav Petkov */ 10821afaf18SBorislav Petkov DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { 10921afaf18SBorislav Petkov [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL 11021afaf18SBorislav Petkov }; 11121afaf18SBorislav Petkov 11221afaf18SBorislav Petkov /* 11321afaf18SBorislav Petkov * MCA banks controlled through firmware first for corrected errors. 11421afaf18SBorislav Petkov * This is a global list of banks for which we won't enable CMCI and we 11521afaf18SBorislav Petkov * won't poll. Firmware controls these banks and is responsible for 11621afaf18SBorislav Petkov * reporting corrected errors through GHES. Uncorrected/recoverable 11721afaf18SBorislav Petkov * errors are still notified through a machine check. 11821afaf18SBorislav Petkov */ 11921afaf18SBorislav Petkov mce_banks_t mce_banks_ce_disabled; 12021afaf18SBorislav Petkov 12121afaf18SBorislav Petkov static struct work_struct mce_work; 12221afaf18SBorislav Petkov static struct irq_work mce_irq_work; 12321afaf18SBorislav Petkov 12421afaf18SBorislav Petkov static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); 12521afaf18SBorislav Petkov 12621afaf18SBorislav Petkov /* 12721afaf18SBorislav Petkov * CPU/chipset specific EDAC code can register a notifier call here to print 12821afaf18SBorislav Petkov * MCE errors in a human-readable form. 12921afaf18SBorislav Petkov */ 13021afaf18SBorislav Petkov BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain); 13121afaf18SBorislav Petkov 13221afaf18SBorislav Petkov /* Do initial initialization of a struct mce */ 133865d3a9aSThomas Gleixner noinstr void mce_setup(struct mce *m) 13421afaf18SBorislav Petkov { 13521afaf18SBorislav Petkov memset(m, 0, sizeof(struct mce)); 13621afaf18SBorislav Petkov m->cpu = m->extcpu = smp_processor_id(); 13721afaf18SBorislav Petkov /* need the internal __ version to avoid deadlocks */ 13821afaf18SBorislav Petkov m->time = __ktime_get_real_seconds(); 13921afaf18SBorislav Petkov m->cpuvendor = boot_cpu_data.x86_vendor; 14021afaf18SBorislav Petkov m->cpuid = cpuid_eax(1); 14121afaf18SBorislav Petkov m->socketid = cpu_data(m->extcpu).phys_proc_id; 14221afaf18SBorislav Petkov m->apicid = cpu_data(m->extcpu).initial_apicid; 143865d3a9aSThomas Gleixner m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP); 14421afaf18SBorislav Petkov 14521afaf18SBorislav Petkov if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) 146865d3a9aSThomas Gleixner m->ppin = __rdmsr(MSR_PPIN); 147077168e2SWei Huang else if (this_cpu_has(X86_FEATURE_AMD_PPIN)) 148865d3a9aSThomas Gleixner m->ppin = __rdmsr(MSR_AMD_PPIN); 14921afaf18SBorislav Petkov 15021afaf18SBorislav Petkov m->microcode = boot_cpu_data.microcode; 15121afaf18SBorislav Petkov } 15221afaf18SBorislav Petkov 15321afaf18SBorislav Petkov DEFINE_PER_CPU(struct mce, injectm); 15421afaf18SBorislav Petkov EXPORT_PER_CPU_SYMBOL_GPL(injectm); 15521afaf18SBorislav Petkov 15621afaf18SBorislav Petkov void mce_log(struct mce *m) 15721afaf18SBorislav Petkov { 15821afaf18SBorislav Petkov if (!mce_gen_pool_add(m)) 15921afaf18SBorislav Petkov irq_work_queue(&mce_irq_work); 16021afaf18SBorislav Petkov } 16181736abdSJan H. Schönherr EXPORT_SYMBOL_GPL(mce_log); 16221afaf18SBorislav Petkov 16321afaf18SBorislav Petkov void mce_register_decode_chain(struct notifier_block *nb) 16421afaf18SBorislav Petkov { 16521afaf18SBorislav Petkov if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC)) 16621afaf18SBorislav Petkov return; 16721afaf18SBorislav Petkov 16821afaf18SBorislav Petkov blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); 16921afaf18SBorislav Petkov } 17021afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_register_decode_chain); 17121afaf18SBorislav Petkov 17221afaf18SBorislav Petkov void mce_unregister_decode_chain(struct notifier_block *nb) 17321afaf18SBorislav Petkov { 17421afaf18SBorislav Petkov blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb); 17521afaf18SBorislav Petkov } 17621afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); 17721afaf18SBorislav Petkov 17821afaf18SBorislav Petkov static inline u32 ctl_reg(int bank) 17921afaf18SBorislav Petkov { 18021afaf18SBorislav Petkov return MSR_IA32_MCx_CTL(bank); 18121afaf18SBorislav Petkov } 18221afaf18SBorislav Petkov 18321afaf18SBorislav Petkov static inline u32 status_reg(int bank) 18421afaf18SBorislav Petkov { 18521afaf18SBorislav Petkov return MSR_IA32_MCx_STATUS(bank); 18621afaf18SBorislav Petkov } 18721afaf18SBorislav Petkov 18821afaf18SBorislav Petkov static inline u32 addr_reg(int bank) 18921afaf18SBorislav Petkov { 19021afaf18SBorislav Petkov return MSR_IA32_MCx_ADDR(bank); 19121afaf18SBorislav Petkov } 19221afaf18SBorislav Petkov 19321afaf18SBorislav Petkov static inline u32 misc_reg(int bank) 19421afaf18SBorislav Petkov { 19521afaf18SBorislav Petkov return MSR_IA32_MCx_MISC(bank); 19621afaf18SBorislav Petkov } 19721afaf18SBorislav Petkov 19821afaf18SBorislav Petkov static inline u32 smca_ctl_reg(int bank) 19921afaf18SBorislav Petkov { 20021afaf18SBorislav Petkov return MSR_AMD64_SMCA_MCx_CTL(bank); 20121afaf18SBorislav Petkov } 20221afaf18SBorislav Petkov 20321afaf18SBorislav Petkov static inline u32 smca_status_reg(int bank) 20421afaf18SBorislav Petkov { 20521afaf18SBorislav Petkov return MSR_AMD64_SMCA_MCx_STATUS(bank); 20621afaf18SBorislav Petkov } 20721afaf18SBorislav Petkov 20821afaf18SBorislav Petkov static inline u32 smca_addr_reg(int bank) 20921afaf18SBorislav Petkov { 21021afaf18SBorislav Petkov return MSR_AMD64_SMCA_MCx_ADDR(bank); 21121afaf18SBorislav Petkov } 21221afaf18SBorislav Petkov 21321afaf18SBorislav Petkov static inline u32 smca_misc_reg(int bank) 21421afaf18SBorislav Petkov { 21521afaf18SBorislav Petkov return MSR_AMD64_SMCA_MCx_MISC(bank); 21621afaf18SBorislav Petkov } 21721afaf18SBorislav Petkov 21821afaf18SBorislav Petkov struct mca_msr_regs msr_ops = { 21921afaf18SBorislav Petkov .ctl = ctl_reg, 22021afaf18SBorislav Petkov .status = status_reg, 22121afaf18SBorislav Petkov .addr = addr_reg, 22221afaf18SBorislav Petkov .misc = misc_reg 22321afaf18SBorislav Petkov }; 22421afaf18SBorislav Petkov 22521afaf18SBorislav Petkov static void __print_mce(struct mce *m) 22621afaf18SBorislav Petkov { 22721afaf18SBorislav Petkov pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", 22821afaf18SBorislav Petkov m->extcpu, 22921afaf18SBorislav Petkov (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""), 23021afaf18SBorislav Petkov m->mcgstatus, m->bank, m->status); 23121afaf18SBorislav Petkov 23221afaf18SBorislav Petkov if (m->ip) { 23321afaf18SBorislav Petkov pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", 23421afaf18SBorislav Petkov !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 23521afaf18SBorislav Petkov m->cs, m->ip); 23621afaf18SBorislav Petkov 23721afaf18SBorislav Petkov if (m->cs == __KERNEL_CS) 23821afaf18SBorislav Petkov pr_cont("{%pS}", (void *)(unsigned long)m->ip); 23921afaf18SBorislav Petkov pr_cont("\n"); 24021afaf18SBorislav Petkov } 24121afaf18SBorislav Petkov 24221afaf18SBorislav Petkov pr_emerg(HW_ERR "TSC %llx ", m->tsc); 24321afaf18SBorislav Petkov if (m->addr) 24421afaf18SBorislav Petkov pr_cont("ADDR %llx ", m->addr); 24521afaf18SBorislav Petkov if (m->misc) 24621afaf18SBorislav Petkov pr_cont("MISC %llx ", m->misc); 247bb2de0adSSmita Koralahalli if (m->ppin) 248bb2de0adSSmita Koralahalli pr_cont("PPIN %llx ", m->ppin); 24921afaf18SBorislav Petkov 25021afaf18SBorislav Petkov if (mce_flags.smca) { 25121afaf18SBorislav Petkov if (m->synd) 25221afaf18SBorislav Petkov pr_cont("SYND %llx ", m->synd); 25321afaf18SBorislav Petkov if (m->ipid) 25421afaf18SBorislav Petkov pr_cont("IPID %llx ", m->ipid); 25521afaf18SBorislav Petkov } 25621afaf18SBorislav Petkov 25721afaf18SBorislav Petkov pr_cont("\n"); 258925946cfSTony Luck 25921afaf18SBorislav Petkov /* 26021afaf18SBorislav Petkov * Note this output is parsed by external tools and old fields 26121afaf18SBorislav Petkov * should not be changed. 26221afaf18SBorislav Petkov */ 26321afaf18SBorislav Petkov pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", 26421afaf18SBorislav Petkov m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, 26521afaf18SBorislav Petkov m->microcode); 26621afaf18SBorislav Petkov } 26721afaf18SBorislav Petkov 26821afaf18SBorislav Petkov static void print_mce(struct mce *m) 26921afaf18SBorislav Petkov { 27021afaf18SBorislav Petkov __print_mce(m); 27121afaf18SBorislav Petkov 27221afaf18SBorislav Petkov if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON) 27321afaf18SBorislav Petkov pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); 27421afaf18SBorislav Petkov } 27521afaf18SBorislav Petkov 27621afaf18SBorislav Petkov #define PANIC_TIMEOUT 5 /* 5 seconds */ 27721afaf18SBorislav Petkov 27821afaf18SBorislav Petkov static atomic_t mce_panicked; 27921afaf18SBorislav Petkov 28021afaf18SBorislav Petkov static int fake_panic; 28121afaf18SBorislav Petkov static atomic_t mce_fake_panicked; 28221afaf18SBorislav Petkov 28321afaf18SBorislav Petkov /* Panic in progress. Enable interrupts and wait for final IPI */ 28421afaf18SBorislav Petkov static void wait_for_panic(void) 28521afaf18SBorislav Petkov { 28621afaf18SBorislav Petkov long timeout = PANIC_TIMEOUT*USEC_PER_SEC; 28721afaf18SBorislav Petkov 28821afaf18SBorislav Petkov preempt_disable(); 28921afaf18SBorislav Petkov local_irq_enable(); 29021afaf18SBorislav Petkov while (timeout-- > 0) 29121afaf18SBorislav Petkov udelay(1); 29221afaf18SBorislav Petkov if (panic_timeout == 0) 29321afaf18SBorislav Petkov panic_timeout = mca_cfg.panic_timeout; 29421afaf18SBorislav Petkov panic("Panicing machine check CPU died"); 29521afaf18SBorislav Petkov } 29621afaf18SBorislav Petkov 29721afaf18SBorislav Petkov static void mce_panic(const char *msg, struct mce *final, char *exp) 29821afaf18SBorislav Petkov { 29921afaf18SBorislav Petkov int apei_err = 0; 30021afaf18SBorislav Petkov struct llist_node *pending; 30121afaf18SBorislav Petkov struct mce_evt_llist *l; 30221afaf18SBorislav Petkov 30321afaf18SBorislav Petkov if (!fake_panic) { 30421afaf18SBorislav Petkov /* 30521afaf18SBorislav Petkov * Make sure only one CPU runs in machine check panic 30621afaf18SBorislav Petkov */ 30721afaf18SBorislav Petkov if (atomic_inc_return(&mce_panicked) > 1) 30821afaf18SBorislav Petkov wait_for_panic(); 30921afaf18SBorislav Petkov barrier(); 31021afaf18SBorislav Petkov 31121afaf18SBorislav Petkov bust_spinlocks(1); 31221afaf18SBorislav Petkov console_verbose(); 31321afaf18SBorislav Petkov } else { 31421afaf18SBorislav Petkov /* Don't log too much for fake panic */ 31521afaf18SBorislav Petkov if (atomic_inc_return(&mce_fake_panicked) > 1) 31621afaf18SBorislav Petkov return; 31721afaf18SBorislav Petkov } 31821afaf18SBorislav Petkov pending = mce_gen_pool_prepare_records(); 31921afaf18SBorislav Petkov /* First print corrected ones that are still unlogged */ 32021afaf18SBorislav Petkov llist_for_each_entry(l, pending, llnode) { 32121afaf18SBorislav Petkov struct mce *m = &l->mce; 32221afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_UC)) { 32321afaf18SBorislav Petkov print_mce(m); 32421afaf18SBorislav Petkov if (!apei_err) 32521afaf18SBorislav Petkov apei_err = apei_write_mce(m); 32621afaf18SBorislav Petkov } 32721afaf18SBorislav Petkov } 32821afaf18SBorislav Petkov /* Now print uncorrected but with the final one last */ 32921afaf18SBorislav Petkov llist_for_each_entry(l, pending, llnode) { 33021afaf18SBorislav Petkov struct mce *m = &l->mce; 33121afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_UC)) 33221afaf18SBorislav Petkov continue; 33321afaf18SBorislav Petkov if (!final || mce_cmp(m, final)) { 33421afaf18SBorislav Petkov print_mce(m); 33521afaf18SBorislav Petkov if (!apei_err) 33621afaf18SBorislav Petkov apei_err = apei_write_mce(m); 33721afaf18SBorislav Petkov } 33821afaf18SBorislav Petkov } 33921afaf18SBorislav Petkov if (final) { 34021afaf18SBorislav Petkov print_mce(final); 34121afaf18SBorislav Petkov if (!apei_err) 34221afaf18SBorislav Petkov apei_err = apei_write_mce(final); 34321afaf18SBorislav Petkov } 34421afaf18SBorislav Petkov if (cpu_missing) 34521afaf18SBorislav Petkov pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n"); 34621afaf18SBorislav Petkov if (exp) 34721afaf18SBorislav Petkov pr_emerg(HW_ERR "Machine check: %s\n", exp); 34821afaf18SBorislav Petkov if (!fake_panic) { 34921afaf18SBorislav Petkov if (panic_timeout == 0) 35021afaf18SBorislav Petkov panic_timeout = mca_cfg.panic_timeout; 35121afaf18SBorislav Petkov panic(msg); 35221afaf18SBorislav Petkov } else 35321afaf18SBorislav Petkov pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); 35421afaf18SBorislav Petkov } 35521afaf18SBorislav Petkov 35621afaf18SBorislav Petkov /* Support code for software error injection */ 35721afaf18SBorislav Petkov 35821afaf18SBorislav Petkov static int msr_to_offset(u32 msr) 35921afaf18SBorislav Petkov { 36021afaf18SBorislav Petkov unsigned bank = __this_cpu_read(injectm.bank); 36121afaf18SBorislav Petkov 36221afaf18SBorislav Petkov if (msr == mca_cfg.rip_msr) 36321afaf18SBorislav Petkov return offsetof(struct mce, ip); 36421afaf18SBorislav Petkov if (msr == msr_ops.status(bank)) 36521afaf18SBorislav Petkov return offsetof(struct mce, status); 36621afaf18SBorislav Petkov if (msr == msr_ops.addr(bank)) 36721afaf18SBorislav Petkov return offsetof(struct mce, addr); 36821afaf18SBorislav Petkov if (msr == msr_ops.misc(bank)) 36921afaf18SBorislav Petkov return offsetof(struct mce, misc); 37021afaf18SBorislav Petkov if (msr == MSR_IA32_MCG_STATUS) 37121afaf18SBorislav Petkov return offsetof(struct mce, mcgstatus); 37221afaf18SBorislav Petkov return -1; 37321afaf18SBorislav Petkov } 37421afaf18SBorislav Petkov 375e2def7d4SBorislav Petkov __visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup, 376e2def7d4SBorislav Petkov struct pt_regs *regs, int trapnr, 377e2def7d4SBorislav Petkov unsigned long error_code, 378e2def7d4SBorislav Petkov unsigned long fault_addr) 379e2def7d4SBorislav Petkov { 380e2def7d4SBorislav Petkov pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", 381e2def7d4SBorislav Petkov (unsigned int)regs->cx, regs->ip, (void *)regs->ip); 382e2def7d4SBorislav Petkov 383e2def7d4SBorislav Petkov show_stack_regs(regs); 384e2def7d4SBorislav Petkov 385e2def7d4SBorislav Petkov panic("MCA architectural violation!\n"); 386e2def7d4SBorislav Petkov 387e2def7d4SBorislav Petkov while (true) 388e2def7d4SBorislav Petkov cpu_relax(); 389e2def7d4SBorislav Petkov 390e2def7d4SBorislav Petkov return true; 391e2def7d4SBorislav Petkov } 392e2def7d4SBorislav Petkov 39321afaf18SBorislav Petkov /* MSR access wrappers used for error injection */ 394e1007770SBorislav Petkov static noinstr u64 mce_rdmsrl(u32 msr) 39521afaf18SBorislav Petkov { 396e2def7d4SBorislav Petkov DECLARE_ARGS(val, low, high); 39721afaf18SBorislav Petkov 39821afaf18SBorislav Petkov if (__this_cpu_read(injectm.finished)) { 399e1007770SBorislav Petkov int offset; 400e1007770SBorislav Petkov u64 ret; 40121afaf18SBorislav Petkov 402e1007770SBorislav Petkov instrumentation_begin(); 403e1007770SBorislav Petkov 404e1007770SBorislav Petkov offset = msr_to_offset(msr); 40521afaf18SBorislav Petkov if (offset < 0) 406e1007770SBorislav Petkov ret = 0; 407e1007770SBorislav Petkov else 408e1007770SBorislav Petkov ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); 409e1007770SBorislav Petkov 410e1007770SBorislav Petkov instrumentation_end(); 411e1007770SBorislav Petkov 412e1007770SBorislav Petkov return ret; 41321afaf18SBorislav Petkov } 41421afaf18SBorislav Petkov 41521afaf18SBorislav Petkov /* 416e2def7d4SBorislav Petkov * RDMSR on MCA MSRs should not fault. If they do, this is very much an 417e2def7d4SBorislav Petkov * architectural violation and needs to be reported to hw vendor. Panic 418e2def7d4SBorislav Petkov * the box to not allow any further progress. 41921afaf18SBorislav Petkov */ 420e2def7d4SBorislav Petkov asm volatile("1: rdmsr\n" 421e2def7d4SBorislav Petkov "2:\n" 422e2def7d4SBorislav Petkov _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault) 423e2def7d4SBorislav Petkov : EAX_EDX_RET(val, low, high) : "c" (msr)); 424e2def7d4SBorislav Petkov 425e2def7d4SBorislav Petkov 426e2def7d4SBorislav Petkov return EAX_EDX_VAL(val, low, high); 42721afaf18SBorislav Petkov } 42821afaf18SBorislav Petkov 429e2def7d4SBorislav Petkov __visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup, 430e2def7d4SBorislav Petkov struct pt_regs *regs, int trapnr, 431e2def7d4SBorislav Petkov unsigned long error_code, 432e2def7d4SBorislav Petkov unsigned long fault_addr) 43321afaf18SBorislav Petkov { 434e2def7d4SBorislav Petkov pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", 435e2def7d4SBorislav Petkov (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, 436e2def7d4SBorislav Petkov regs->ip, (void *)regs->ip); 43721afaf18SBorislav Petkov 438e2def7d4SBorislav Petkov show_stack_regs(regs); 439e2def7d4SBorislav Petkov 440e2def7d4SBorislav Petkov panic("MCA architectural violation!\n"); 441e2def7d4SBorislav Petkov 442e2def7d4SBorislav Petkov while (true) 443e2def7d4SBorislav Petkov cpu_relax(); 444e2def7d4SBorislav Petkov 445e2def7d4SBorislav Petkov return true; 44621afaf18SBorislav Petkov } 44721afaf18SBorislav Petkov 448e1007770SBorislav Petkov static noinstr void mce_wrmsrl(u32 msr, u64 v) 44921afaf18SBorislav Petkov { 450e2def7d4SBorislav Petkov u32 low, high; 451e2def7d4SBorislav Petkov 45221afaf18SBorislav Petkov if (__this_cpu_read(injectm.finished)) { 453e1007770SBorislav Petkov int offset; 45421afaf18SBorislav Petkov 455e1007770SBorislav Petkov instrumentation_begin(); 456e1007770SBorislav Petkov 457e1007770SBorislav Petkov offset = msr_to_offset(msr); 45821afaf18SBorislav Petkov if (offset >= 0) 45921afaf18SBorislav Petkov *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; 460e1007770SBorislav Petkov 461e1007770SBorislav Petkov instrumentation_end(); 462e1007770SBorislav Petkov 46321afaf18SBorislav Petkov return; 46421afaf18SBorislav Petkov } 465e2def7d4SBorislav Petkov 466e2def7d4SBorislav Petkov low = (u32)v; 467e2def7d4SBorislav Petkov high = (u32)(v >> 32); 468e2def7d4SBorislav Petkov 469e2def7d4SBorislav Petkov /* See comment in mce_rdmsrl() */ 470e2def7d4SBorislav Petkov asm volatile("1: wrmsr\n" 471e2def7d4SBorislav Petkov "2:\n" 472e2def7d4SBorislav Petkov _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault) 473e2def7d4SBorislav Petkov : : "c" (msr), "a"(low), "d" (high) : "memory"); 47421afaf18SBorislav Petkov } 47521afaf18SBorislav Petkov 47621afaf18SBorislav Petkov /* 47721afaf18SBorislav Petkov * Collect all global (w.r.t. this processor) status about this machine 47821afaf18SBorislav Petkov * check into our "mce" struct so that we can use it later to assess 47921afaf18SBorislav Petkov * the severity of the problem as we read per-bank specific details. 48021afaf18SBorislav Petkov */ 48121afaf18SBorislav Petkov static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) 48221afaf18SBorislav Petkov { 48321afaf18SBorislav Petkov mce_setup(m); 48421afaf18SBorislav Petkov 48521afaf18SBorislav Petkov m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); 48621afaf18SBorislav Petkov if (regs) { 48721afaf18SBorislav Petkov /* 48821afaf18SBorislav Petkov * Get the address of the instruction at the time of 48921afaf18SBorislav Petkov * the machine check error. 49021afaf18SBorislav Petkov */ 49121afaf18SBorislav Petkov if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { 49221afaf18SBorislav Petkov m->ip = regs->ip; 49321afaf18SBorislav Petkov m->cs = regs->cs; 49421afaf18SBorislav Petkov 49521afaf18SBorislav Petkov /* 49621afaf18SBorislav Petkov * When in VM86 mode make the cs look like ring 3 49721afaf18SBorislav Petkov * always. This is a lie, but it's better than passing 49821afaf18SBorislav Petkov * the additional vm86 bit around everywhere. 49921afaf18SBorislav Petkov */ 50021afaf18SBorislav Petkov if (v8086_mode(regs)) 50121afaf18SBorislav Petkov m->cs |= 3; 50221afaf18SBorislav Petkov } 50321afaf18SBorislav Petkov /* Use accurate RIP reporting if available. */ 50421afaf18SBorislav Petkov if (mca_cfg.rip_msr) 50521afaf18SBorislav Petkov m->ip = mce_rdmsrl(mca_cfg.rip_msr); 50621afaf18SBorislav Petkov } 50721afaf18SBorislav Petkov } 50821afaf18SBorislav Petkov 50921afaf18SBorislav Petkov int mce_available(struct cpuinfo_x86 *c) 51021afaf18SBorislav Petkov { 51121afaf18SBorislav Petkov if (mca_cfg.disabled) 51221afaf18SBorislav Petkov return 0; 51321afaf18SBorislav Petkov return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); 51421afaf18SBorislav Petkov } 51521afaf18SBorislav Petkov 51621afaf18SBorislav Petkov static void mce_schedule_work(void) 51721afaf18SBorislav Petkov { 51821afaf18SBorislav Petkov if (!mce_gen_pool_empty()) 51921afaf18SBorislav Petkov schedule_work(&mce_work); 52021afaf18SBorislav Petkov } 52121afaf18SBorislav Petkov 52221afaf18SBorislav Petkov static void mce_irq_work_cb(struct irq_work *entry) 52321afaf18SBorislav Petkov { 52421afaf18SBorislav Petkov mce_schedule_work(); 52521afaf18SBorislav Petkov } 52621afaf18SBorislav Petkov 52721afaf18SBorislav Petkov /* 52821afaf18SBorislav Petkov * Check if the address reported by the CPU is in a format we can parse. 52921afaf18SBorislav Petkov * It would be possible to add code for most other cases, but all would 53021afaf18SBorislav Petkov * be somewhat complicated (e.g. segment offset would require an instruction 53121afaf18SBorislav Petkov * parser). So only support physical addresses up to page granuality for now. 53221afaf18SBorislav Petkov */ 53321afaf18SBorislav Petkov int mce_usable_address(struct mce *m) 53421afaf18SBorislav Petkov { 53521afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_ADDRV)) 53621afaf18SBorislav Petkov return 0; 53721afaf18SBorislav Petkov 5386e898d2bSTony W Wang-oc /* Checks after this one are Intel/Zhaoxin-specific: */ 5396e898d2bSTony W Wang-oc if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && 5406e898d2bSTony W Wang-oc boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) 54121afaf18SBorislav Petkov return 1; 54221afaf18SBorislav Petkov 54321afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_MISCV)) 54421afaf18SBorislav Petkov return 0; 54521afaf18SBorislav Petkov 54621afaf18SBorislav Petkov if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) 54721afaf18SBorislav Petkov return 0; 54821afaf18SBorislav Petkov 54921afaf18SBorislav Petkov if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) 55021afaf18SBorislav Petkov return 0; 55121afaf18SBorislav Petkov 55221afaf18SBorislav Petkov return 1; 55321afaf18SBorislav Petkov } 55421afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_usable_address); 55521afaf18SBorislav Petkov 55621afaf18SBorislav Petkov bool mce_is_memory_error(struct mce *m) 55721afaf18SBorislav Petkov { 5586e898d2bSTony W Wang-oc switch (m->cpuvendor) { 5596e898d2bSTony W Wang-oc case X86_VENDOR_AMD: 5606e898d2bSTony W Wang-oc case X86_VENDOR_HYGON: 56121afaf18SBorislav Petkov return amd_mce_is_memory_error(m); 5626e898d2bSTony W Wang-oc 5636e898d2bSTony W Wang-oc case X86_VENDOR_INTEL: 5646e898d2bSTony W Wang-oc case X86_VENDOR_ZHAOXIN: 56521afaf18SBorislav Petkov /* 56621afaf18SBorislav Petkov * Intel SDM Volume 3B - 15.9.2 Compound Error Codes 56721afaf18SBorislav Petkov * 56821afaf18SBorislav Petkov * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for 56921afaf18SBorislav Petkov * indicating a memory error. Bit 8 is used for indicating a 57021afaf18SBorislav Petkov * cache hierarchy error. The combination of bit 2 and bit 3 57121afaf18SBorislav Petkov * is used for indicating a `generic' cache hierarchy error 57221afaf18SBorislav Petkov * But we can't just blindly check the above bits, because if 57321afaf18SBorislav Petkov * bit 11 is set, then it is a bus/interconnect error - and 57421afaf18SBorislav Petkov * either way the above bits just gives more detail on what 57521afaf18SBorislav Petkov * bus/interconnect error happened. Note that bit 12 can be 57621afaf18SBorislav Petkov * ignored, as it's the "filter" bit. 57721afaf18SBorislav Petkov */ 57821afaf18SBorislav Petkov return (m->status & 0xef80) == BIT(7) || 57921afaf18SBorislav Petkov (m->status & 0xef00) == BIT(8) || 58021afaf18SBorislav Petkov (m->status & 0xeffc) == 0xc; 58121afaf18SBorislav Petkov 5826e898d2bSTony W Wang-oc default: 58321afaf18SBorislav Petkov return false; 58421afaf18SBorislav Petkov } 5856e898d2bSTony W Wang-oc } 58621afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_is_memory_error); 58721afaf18SBorislav Petkov 58817fae129STony Luck static bool whole_page(struct mce *m) 58917fae129STony Luck { 59017fae129STony Luck if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV)) 59117fae129STony Luck return true; 59217fae129STony Luck 59317fae129STony Luck return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT; 59417fae129STony Luck } 59517fae129STony Luck 59621afaf18SBorislav Petkov bool mce_is_correctable(struct mce *m) 59721afaf18SBorislav Petkov { 59821afaf18SBorislav Petkov if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) 59921afaf18SBorislav Petkov return false; 60021afaf18SBorislav Petkov 60121afaf18SBorislav Petkov if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) 60221afaf18SBorislav Petkov return false; 60321afaf18SBorislav Petkov 60421afaf18SBorislav Petkov if (m->status & MCI_STATUS_UC) 60521afaf18SBorislav Petkov return false; 60621afaf18SBorislav Petkov 60721afaf18SBorislav Petkov return true; 60821afaf18SBorislav Petkov } 60921afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_is_correctable); 61021afaf18SBorislav Petkov 611c9c6d216STony Luck static int mce_early_notifier(struct notifier_block *nb, unsigned long val, 61221afaf18SBorislav Petkov void *data) 61321afaf18SBorislav Petkov { 61421afaf18SBorislav Petkov struct mce *m = (struct mce *)data; 61521afaf18SBorislav Petkov 61621afaf18SBorislav Petkov if (!m) 61721afaf18SBorislav Petkov return NOTIFY_DONE; 61821afaf18SBorislav Petkov 61921afaf18SBorislav Petkov /* Emit the trace record: */ 62021afaf18SBorislav Petkov trace_mce_record(m); 62121afaf18SBorislav Petkov 62221afaf18SBorislav Petkov set_bit(0, &mce_need_notify); 62321afaf18SBorislav Petkov 62421afaf18SBorislav Petkov mce_notify_irq(); 62521afaf18SBorislav Petkov 62621afaf18SBorislav Petkov return NOTIFY_DONE; 62721afaf18SBorislav Petkov } 62821afaf18SBorislav Petkov 629c9c6d216STony Luck static struct notifier_block early_nb = { 630c9c6d216STony Luck .notifier_call = mce_early_notifier, 631c9c6d216STony Luck .priority = MCE_PRIO_EARLY, 63221afaf18SBorislav Petkov }; 63321afaf18SBorislav Petkov 6348438b84aSJan H. Schönherr static int uc_decode_notifier(struct notifier_block *nb, unsigned long val, 63521afaf18SBorislav Petkov void *data) 63621afaf18SBorislav Petkov { 63721afaf18SBorislav Petkov struct mce *mce = (struct mce *)data; 63821afaf18SBorislav Petkov unsigned long pfn; 63921afaf18SBorislav Petkov 6408438b84aSJan H. Schönherr if (!mce || !mce_usable_address(mce)) 64121afaf18SBorislav Petkov return NOTIFY_DONE; 64221afaf18SBorislav Petkov 6438438b84aSJan H. Schönherr if (mce->severity != MCE_AO_SEVERITY && 6448438b84aSJan H. Schönherr mce->severity != MCE_DEFERRED_SEVERITY) 6458438b84aSJan H. Schönherr return NOTIFY_DONE; 6468438b84aSJan H. Schönherr 64721afaf18SBorislav Petkov pfn = mce->addr >> PAGE_SHIFT; 64823ba710aSTony Luck if (!memory_failure(pfn, 0)) { 64917fae129STony Luck set_mce_nospec(pfn, whole_page(mce)); 65023ba710aSTony Luck mce->kflags |= MCE_HANDLED_UC; 65123ba710aSTony Luck } 65221afaf18SBorislav Petkov 65321afaf18SBorislav Petkov return NOTIFY_OK; 65421afaf18SBorislav Petkov } 6558438b84aSJan H. Schönherr 6568438b84aSJan H. Schönherr static struct notifier_block mce_uc_nb = { 6578438b84aSJan H. Schönherr .notifier_call = uc_decode_notifier, 6588438b84aSJan H. Schönherr .priority = MCE_PRIO_UC, 65921afaf18SBorislav Petkov }; 66021afaf18SBorislav Petkov 66121afaf18SBorislav Petkov static int mce_default_notifier(struct notifier_block *nb, unsigned long val, 66221afaf18SBorislav Petkov void *data) 66321afaf18SBorislav Petkov { 66421afaf18SBorislav Petkov struct mce *m = (struct mce *)data; 66521afaf18SBorislav Petkov 66621afaf18SBorislav Petkov if (!m) 66721afaf18SBorislav Petkov return NOTIFY_DONE; 66821afaf18SBorislav Petkov 66943505646STony Luck if (mca_cfg.print_all || !m->kflags) 67021afaf18SBorislav Petkov __print_mce(m); 67121afaf18SBorislav Petkov 67221afaf18SBorislav Petkov return NOTIFY_DONE; 67321afaf18SBorislav Petkov } 67421afaf18SBorislav Petkov 67521afaf18SBorislav Petkov static struct notifier_block mce_default_nb = { 67621afaf18SBorislav Petkov .notifier_call = mce_default_notifier, 67721afaf18SBorislav Petkov /* lowest prio, we want it to run last. */ 67821afaf18SBorislav Petkov .priority = MCE_PRIO_LOWEST, 67921afaf18SBorislav Petkov }; 68021afaf18SBorislav Petkov 68121afaf18SBorislav Petkov /* 68221afaf18SBorislav Petkov * Read ADDR and MISC registers. 68321afaf18SBorislav Petkov */ 68421afaf18SBorislav Petkov static void mce_read_aux(struct mce *m, int i) 68521afaf18SBorislav Petkov { 68621afaf18SBorislav Petkov if (m->status & MCI_STATUS_MISCV) 68721afaf18SBorislav Petkov m->misc = mce_rdmsrl(msr_ops.misc(i)); 68821afaf18SBorislav Petkov 68921afaf18SBorislav Petkov if (m->status & MCI_STATUS_ADDRV) { 69021afaf18SBorislav Petkov m->addr = mce_rdmsrl(msr_ops.addr(i)); 69121afaf18SBorislav Petkov 69221afaf18SBorislav Petkov /* 69321afaf18SBorislav Petkov * Mask the reported address by the reported granularity. 69421afaf18SBorislav Petkov */ 69521afaf18SBorislav Petkov if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { 69621afaf18SBorislav Petkov u8 shift = MCI_MISC_ADDR_LSB(m->misc); 69721afaf18SBorislav Petkov m->addr >>= shift; 69821afaf18SBorislav Petkov m->addr <<= shift; 69921afaf18SBorislav Petkov } 70021afaf18SBorislav Petkov 70121afaf18SBorislav Petkov /* 70221afaf18SBorislav Petkov * Extract [55:<lsb>] where lsb is the least significant 70321afaf18SBorislav Petkov * *valid* bit of the address bits. 70421afaf18SBorislav Petkov */ 70521afaf18SBorislav Petkov if (mce_flags.smca) { 70621afaf18SBorislav Petkov u8 lsb = (m->addr >> 56) & 0x3f; 70721afaf18SBorislav Petkov 70821afaf18SBorislav Petkov m->addr &= GENMASK_ULL(55, lsb); 70921afaf18SBorislav Petkov } 71021afaf18SBorislav Petkov } 71121afaf18SBorislav Petkov 71221afaf18SBorislav Petkov if (mce_flags.smca) { 71321afaf18SBorislav Petkov m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i)); 71421afaf18SBorislav Petkov 71521afaf18SBorislav Petkov if (m->status & MCI_STATUS_SYNDV) 71621afaf18SBorislav Petkov m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i)); 71721afaf18SBorislav Petkov } 71821afaf18SBorislav Petkov } 71921afaf18SBorislav Petkov 72021afaf18SBorislav Petkov DEFINE_PER_CPU(unsigned, mce_poll_count); 72121afaf18SBorislav Petkov 72221afaf18SBorislav Petkov /* 72321afaf18SBorislav Petkov * Poll for corrected events or events that happened before reset. 72421afaf18SBorislav Petkov * Those are just logged through /dev/mcelog. 72521afaf18SBorislav Petkov * 72621afaf18SBorislav Petkov * This is executed in standard interrupt context. 72721afaf18SBorislav Petkov * 72821afaf18SBorislav Petkov * Note: spec recommends to panic for fatal unsignalled 72921afaf18SBorislav Petkov * errors here. However this would be quite problematic -- 73021afaf18SBorislav Petkov * we would need to reimplement the Monarch handling and 73121afaf18SBorislav Petkov * it would mess up the exclusion between exception handler 732312a4661SLinus Torvalds * and poll handler -- * so we skip this for now. 73321afaf18SBorislav Petkov * These cases should not happen anyways, or only when the CPU 73421afaf18SBorislav Petkov * is already totally * confused. In this case it's likely it will 73521afaf18SBorislav Petkov * not fully execute the machine check handler either. 73621afaf18SBorislav Petkov */ 73721afaf18SBorislav Petkov bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) 73821afaf18SBorislav Petkov { 739b4914508SYazen Ghannam struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 74021afaf18SBorislav Petkov bool error_seen = false; 74121afaf18SBorislav Petkov struct mce m; 74221afaf18SBorislav Petkov int i; 74321afaf18SBorislav Petkov 74421afaf18SBorislav Petkov this_cpu_inc(mce_poll_count); 74521afaf18SBorislav Petkov 74621afaf18SBorislav Petkov mce_gather_info(&m, NULL); 74721afaf18SBorislav Petkov 74821afaf18SBorislav Petkov if (flags & MCP_TIMESTAMP) 74921afaf18SBorislav Petkov m.tsc = rdtsc(); 75021afaf18SBorislav Petkov 751c7d314f3SYazen Ghannam for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 75221afaf18SBorislav Petkov if (!mce_banks[i].ctl || !test_bit(i, *b)) 75321afaf18SBorislav Petkov continue; 75421afaf18SBorislav Petkov 75521afaf18SBorislav Petkov m.misc = 0; 75621afaf18SBorislav Petkov m.addr = 0; 75721afaf18SBorislav Petkov m.bank = i; 75821afaf18SBorislav Petkov 75921afaf18SBorislav Petkov barrier(); 76021afaf18SBorislav Petkov m.status = mce_rdmsrl(msr_ops.status(i)); 761f19501aaSTony Luck 762f19501aaSTony Luck /* If this entry is not valid, ignore it */ 76321afaf18SBorislav Petkov if (!(m.status & MCI_STATUS_VAL)) 76421afaf18SBorislav Petkov continue; 76521afaf18SBorislav Petkov 76621afaf18SBorislav Petkov /* 767f19501aaSTony Luck * If we are logging everything (at CPU online) or this 768f19501aaSTony Luck * is a corrected error, then we must log it. 76921afaf18SBorislav Petkov */ 770f19501aaSTony Luck if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC)) 771f19501aaSTony Luck goto log_it; 772f19501aaSTony Luck 773f19501aaSTony Luck /* 774f19501aaSTony Luck * Newer Intel systems that support software error 775f19501aaSTony Luck * recovery need to make additional checks. Other 776f19501aaSTony Luck * CPUs should skip over uncorrected errors, but log 777f19501aaSTony Luck * everything else. 778f19501aaSTony Luck */ 779f19501aaSTony Luck if (!mca_cfg.ser) { 780f19501aaSTony Luck if (m.status & MCI_STATUS_UC) 781f19501aaSTony Luck continue; 782f19501aaSTony Luck goto log_it; 783f19501aaSTony Luck } 784f19501aaSTony Luck 785f19501aaSTony Luck /* Log "not enabled" (speculative) errors */ 786f19501aaSTony Luck if (!(m.status & MCI_STATUS_EN)) 787f19501aaSTony Luck goto log_it; 788f19501aaSTony Luck 789f19501aaSTony Luck /* 790f19501aaSTony Luck * Log UCNA (SDM: 15.6.3 "UCR Error Classification") 791f19501aaSTony Luck * UC == 1 && PCC == 0 && S == 0 792f19501aaSTony Luck */ 793f19501aaSTony Luck if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S)) 794f19501aaSTony Luck goto log_it; 795f19501aaSTony Luck 796f19501aaSTony Luck /* 797f19501aaSTony Luck * Skip anything else. Presumption is that our read of this 798f19501aaSTony Luck * bank is racing with a machine check. Leave the log alone 799f19501aaSTony Luck * for do_machine_check() to deal with it. 800f19501aaSTony Luck */ 80121afaf18SBorislav Petkov continue; 80221afaf18SBorislav Petkov 803f19501aaSTony Luck log_it: 80421afaf18SBorislav Petkov error_seen = true; 80521afaf18SBorislav Petkov 80690454e49SJan H. Schönherr if (flags & MCP_DONTLOG) 80790454e49SJan H. Schönherr goto clear_it; 80890454e49SJan H. Schönherr 80921afaf18SBorislav Petkov mce_read_aux(&m, i); 81041ce0564SYouquan Song m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false); 81121afaf18SBorislav Petkov /* 81221afaf18SBorislav Petkov * Don't get the IP here because it's unlikely to 81321afaf18SBorislav Petkov * have anything to do with the actual error location. 81421afaf18SBorislav Petkov */ 81521afaf18SBorislav Petkov 81690454e49SJan H. Schönherr if (mca_cfg.dont_log_ce && !mce_usable_address(&m)) 81790454e49SJan H. Schönherr goto clear_it; 81890454e49SJan H. Schönherr 81990454e49SJan H. Schönherr mce_log(&m); 82090454e49SJan H. Schönherr 82190454e49SJan H. Schönherr clear_it: 82221afaf18SBorislav Petkov /* 82321afaf18SBorislav Petkov * Clear state for this bank. 82421afaf18SBorislav Petkov */ 82521afaf18SBorislav Petkov mce_wrmsrl(msr_ops.status(i), 0); 82621afaf18SBorislav Petkov } 82721afaf18SBorislav Petkov 82821afaf18SBorislav Petkov /* 82921afaf18SBorislav Petkov * Don't clear MCG_STATUS here because it's only defined for 83021afaf18SBorislav Petkov * exceptions. 83121afaf18SBorislav Petkov */ 83221afaf18SBorislav Petkov 83321afaf18SBorislav Petkov sync_core(); 83421afaf18SBorislav Petkov 83521afaf18SBorislav Petkov return error_seen; 83621afaf18SBorislav Petkov } 83721afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(machine_check_poll); 83821afaf18SBorislav Petkov 83921afaf18SBorislav Petkov /* 84021afaf18SBorislav Petkov * Do a quick check if any of the events requires a panic. 84121afaf18SBorislav Petkov * This decides if we keep the events around or clear them. 84221afaf18SBorislav Petkov */ 84321afaf18SBorislav Petkov static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, 84421afaf18SBorislav Petkov struct pt_regs *regs) 84521afaf18SBorislav Petkov { 8467a8bc2b0SJan H. Schönherr char *tmp = *msg; 84721afaf18SBorislav Petkov int i; 84821afaf18SBorislav Petkov 849c7d314f3SYazen Ghannam for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 85021afaf18SBorislav Petkov m->status = mce_rdmsrl(msr_ops.status(i)); 85121afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_VAL)) 85221afaf18SBorislav Petkov continue; 85321afaf18SBorislav Petkov 85421afaf18SBorislav Petkov __set_bit(i, validp); 85521afaf18SBorislav Petkov if (quirk_no_way_out) 85621afaf18SBorislav Petkov quirk_no_way_out(i, m, regs); 85721afaf18SBorislav Petkov 858d28af26fSTony Luck m->bank = i; 85941ce0564SYouquan Song if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 86021afaf18SBorislav Petkov mce_read_aux(m, i); 86121afaf18SBorislav Petkov *msg = tmp; 86221afaf18SBorislav Petkov return 1; 86321afaf18SBorislav Petkov } 86421afaf18SBorislav Petkov } 86521afaf18SBorislav Petkov return 0; 86621afaf18SBorislav Petkov } 86721afaf18SBorislav Petkov 86821afaf18SBorislav Petkov /* 86921afaf18SBorislav Petkov * Variable to establish order between CPUs while scanning. 87021afaf18SBorislav Petkov * Each CPU spins initially until executing is equal its number. 87121afaf18SBorislav Petkov */ 87221afaf18SBorislav Petkov static atomic_t mce_executing; 87321afaf18SBorislav Petkov 87421afaf18SBorislav Petkov /* 87521afaf18SBorislav Petkov * Defines order of CPUs on entry. First CPU becomes Monarch. 87621afaf18SBorislav Petkov */ 87721afaf18SBorislav Petkov static atomic_t mce_callin; 87821afaf18SBorislav Petkov 87921afaf18SBorislav Petkov /* 88021afaf18SBorislav Petkov * Check if a timeout waiting for other CPUs happened. 88121afaf18SBorislav Petkov */ 88221afaf18SBorislav Petkov static int mce_timed_out(u64 *t, const char *msg) 88321afaf18SBorislav Petkov { 88421afaf18SBorislav Petkov /* 88521afaf18SBorislav Petkov * The others already did panic for some reason. 88621afaf18SBorislav Petkov * Bail out like in a timeout. 88721afaf18SBorislav Petkov * rmb() to tell the compiler that system_state 88821afaf18SBorislav Petkov * might have been modified by someone else. 88921afaf18SBorislav Petkov */ 89021afaf18SBorislav Petkov rmb(); 89121afaf18SBorislav Petkov if (atomic_read(&mce_panicked)) 89221afaf18SBorislav Petkov wait_for_panic(); 89321afaf18SBorislav Petkov if (!mca_cfg.monarch_timeout) 89421afaf18SBorislav Petkov goto out; 89521afaf18SBorislav Petkov if ((s64)*t < SPINUNIT) { 89621afaf18SBorislav Petkov if (mca_cfg.tolerant <= 1) 89721afaf18SBorislav Petkov mce_panic(msg, NULL, NULL); 89821afaf18SBorislav Petkov cpu_missing = 1; 89921afaf18SBorislav Petkov return 1; 90021afaf18SBorislav Petkov } 90121afaf18SBorislav Petkov *t -= SPINUNIT; 90221afaf18SBorislav Petkov out: 90321afaf18SBorislav Petkov touch_nmi_watchdog(); 90421afaf18SBorislav Petkov return 0; 90521afaf18SBorislav Petkov } 90621afaf18SBorislav Petkov 90721afaf18SBorislav Petkov /* 90821afaf18SBorislav Petkov * The Monarch's reign. The Monarch is the CPU who entered 90921afaf18SBorislav Petkov * the machine check handler first. It waits for the others to 91021afaf18SBorislav Petkov * raise the exception too and then grades them. When any 91121afaf18SBorislav Petkov * error is fatal panic. Only then let the others continue. 91221afaf18SBorislav Petkov * 91321afaf18SBorislav Petkov * The other CPUs entering the MCE handler will be controlled by the 91421afaf18SBorislav Petkov * Monarch. They are called Subjects. 91521afaf18SBorislav Petkov * 91621afaf18SBorislav Petkov * This way we prevent any potential data corruption in a unrecoverable case 91721afaf18SBorislav Petkov * and also makes sure always all CPU's errors are examined. 91821afaf18SBorislav Petkov * 91921afaf18SBorislav Petkov * Also this detects the case of a machine check event coming from outer 92021afaf18SBorislav Petkov * space (not detected by any CPUs) In this case some external agent wants 92121afaf18SBorislav Petkov * us to shut down, so panic too. 92221afaf18SBorislav Petkov * 92321afaf18SBorislav Petkov * The other CPUs might still decide to panic if the handler happens 92421afaf18SBorislav Petkov * in a unrecoverable place, but in this case the system is in a semi-stable 92521afaf18SBorislav Petkov * state and won't corrupt anything by itself. It's ok to let the others 92621afaf18SBorislav Petkov * continue for a bit first. 92721afaf18SBorislav Petkov * 92821afaf18SBorislav Petkov * All the spin loops have timeouts; when a timeout happens a CPU 92921afaf18SBorislav Petkov * typically elects itself to be Monarch. 93021afaf18SBorislav Petkov */ 93121afaf18SBorislav Petkov static void mce_reign(void) 93221afaf18SBorislav Petkov { 93321afaf18SBorislav Petkov int cpu; 93421afaf18SBorislav Petkov struct mce *m = NULL; 93521afaf18SBorislav Petkov int global_worst = 0; 93621afaf18SBorislav Petkov char *msg = NULL; 93721afaf18SBorislav Petkov 93821afaf18SBorislav Petkov /* 93921afaf18SBorislav Petkov * This CPU is the Monarch and the other CPUs have run 94021afaf18SBorislav Petkov * through their handlers. 94121afaf18SBorislav Petkov * Grade the severity of the errors of all the CPUs. 94221afaf18SBorislav Petkov */ 94321afaf18SBorislav Petkov for_each_possible_cpu(cpu) { 94413c877f4STony Luck struct mce *mtmp = &per_cpu(mces_seen, cpu); 94513c877f4STony Luck 94613c877f4STony Luck if (mtmp->severity > global_worst) { 94713c877f4STony Luck global_worst = mtmp->severity; 94821afaf18SBorislav Petkov m = &per_cpu(mces_seen, cpu); 94921afaf18SBorislav Petkov } 95021afaf18SBorislav Petkov } 95121afaf18SBorislav Petkov 95221afaf18SBorislav Petkov /* 95321afaf18SBorislav Petkov * Cannot recover? Panic here then. 95421afaf18SBorislav Petkov * This dumps all the mces in the log buffer and stops the 95521afaf18SBorislav Petkov * other CPUs. 95621afaf18SBorislav Petkov */ 95713c877f4STony Luck if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { 95813c877f4STony Luck /* call mce_severity() to get "msg" for panic */ 95941ce0564SYouquan Song mce_severity(m, NULL, mca_cfg.tolerant, &msg, true); 96021afaf18SBorislav Petkov mce_panic("Fatal machine check", m, msg); 96113c877f4STony Luck } 96221afaf18SBorislav Petkov 96321afaf18SBorislav Petkov /* 96421afaf18SBorislav Petkov * For UC somewhere we let the CPU who detects it handle it. 96521afaf18SBorislav Petkov * Also must let continue the others, otherwise the handling 96621afaf18SBorislav Petkov * CPU could deadlock on a lock. 96721afaf18SBorislav Petkov */ 96821afaf18SBorislav Petkov 96921afaf18SBorislav Petkov /* 97021afaf18SBorislav Petkov * No machine check event found. Must be some external 97121afaf18SBorislav Petkov * source or one CPU is hung. Panic. 97221afaf18SBorislav Petkov */ 97321afaf18SBorislav Petkov if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) 97421afaf18SBorislav Petkov mce_panic("Fatal machine check from unknown source", NULL, NULL); 97521afaf18SBorislav Petkov 97621afaf18SBorislav Petkov /* 97721afaf18SBorislav Petkov * Now clear all the mces_seen so that they don't reappear on 97821afaf18SBorislav Petkov * the next mce. 97921afaf18SBorislav Petkov */ 98021afaf18SBorislav Petkov for_each_possible_cpu(cpu) 98121afaf18SBorislav Petkov memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); 98221afaf18SBorislav Petkov } 98321afaf18SBorislav Petkov 98421afaf18SBorislav Petkov static atomic_t global_nwo; 98521afaf18SBorislav Petkov 98621afaf18SBorislav Petkov /* 98721afaf18SBorislav Petkov * Start of Monarch synchronization. This waits until all CPUs have 98821afaf18SBorislav Petkov * entered the exception handler and then determines if any of them 98921afaf18SBorislav Petkov * saw a fatal event that requires panic. Then it executes them 99021afaf18SBorislav Petkov * in the entry order. 99121afaf18SBorislav Petkov * TBD double check parallel CPU hotunplug 99221afaf18SBorislav Petkov */ 99321afaf18SBorislav Petkov static int mce_start(int *no_way_out) 99421afaf18SBorislav Petkov { 99521afaf18SBorislav Petkov int order; 99621afaf18SBorislav Petkov int cpus = num_online_cpus(); 99721afaf18SBorislav Petkov u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 99821afaf18SBorislav Petkov 99921afaf18SBorislav Petkov if (!timeout) 100021afaf18SBorislav Petkov return -1; 100121afaf18SBorislav Petkov 100221afaf18SBorislav Petkov atomic_add(*no_way_out, &global_nwo); 100321afaf18SBorislav Petkov /* 100421afaf18SBorislav Petkov * Rely on the implied barrier below, such that global_nwo 100521afaf18SBorislav Petkov * is updated before mce_callin. 100621afaf18SBorislav Petkov */ 100721afaf18SBorislav Petkov order = atomic_inc_return(&mce_callin); 100821afaf18SBorislav Petkov 100921afaf18SBorislav Petkov /* 101021afaf18SBorislav Petkov * Wait for everyone. 101121afaf18SBorislav Petkov */ 101221afaf18SBorislav Petkov while (atomic_read(&mce_callin) != cpus) { 101321afaf18SBorislav Petkov if (mce_timed_out(&timeout, 101421afaf18SBorislav Petkov "Timeout: Not all CPUs entered broadcast exception handler")) { 101521afaf18SBorislav Petkov atomic_set(&global_nwo, 0); 101621afaf18SBorislav Petkov return -1; 101721afaf18SBorislav Petkov } 101821afaf18SBorislav Petkov ndelay(SPINUNIT); 101921afaf18SBorislav Petkov } 102021afaf18SBorislav Petkov 102121afaf18SBorislav Petkov /* 102221afaf18SBorislav Petkov * mce_callin should be read before global_nwo 102321afaf18SBorislav Petkov */ 102421afaf18SBorislav Petkov smp_rmb(); 102521afaf18SBorislav Petkov 102621afaf18SBorislav Petkov if (order == 1) { 102721afaf18SBorislav Petkov /* 102821afaf18SBorislav Petkov * Monarch: Starts executing now, the others wait. 102921afaf18SBorislav Petkov */ 103021afaf18SBorislav Petkov atomic_set(&mce_executing, 1); 103121afaf18SBorislav Petkov } else { 103221afaf18SBorislav Petkov /* 103321afaf18SBorislav Petkov * Subject: Now start the scanning loop one by one in 103421afaf18SBorislav Petkov * the original callin order. 103521afaf18SBorislav Petkov * This way when there are any shared banks it will be 103621afaf18SBorislav Petkov * only seen by one CPU before cleared, avoiding duplicates. 103721afaf18SBorislav Petkov */ 103821afaf18SBorislav Petkov while (atomic_read(&mce_executing) < order) { 103921afaf18SBorislav Petkov if (mce_timed_out(&timeout, 104021afaf18SBorislav Petkov "Timeout: Subject CPUs unable to finish machine check processing")) { 104121afaf18SBorislav Petkov atomic_set(&global_nwo, 0); 104221afaf18SBorislav Petkov return -1; 104321afaf18SBorislav Petkov } 104421afaf18SBorislav Petkov ndelay(SPINUNIT); 104521afaf18SBorislav Petkov } 104621afaf18SBorislav Petkov } 104721afaf18SBorislav Petkov 104821afaf18SBorislav Petkov /* 104921afaf18SBorislav Petkov * Cache the global no_way_out state. 105021afaf18SBorislav Petkov */ 105121afaf18SBorislav Petkov *no_way_out = atomic_read(&global_nwo); 105221afaf18SBorislav Petkov 105321afaf18SBorislav Petkov return order; 105421afaf18SBorislav Petkov } 105521afaf18SBorislav Petkov 105621afaf18SBorislav Petkov /* 105721afaf18SBorislav Petkov * Synchronize between CPUs after main scanning loop. 105821afaf18SBorislav Petkov * This invokes the bulk of the Monarch processing. 105921afaf18SBorislav Petkov */ 106021afaf18SBorislav Petkov static int mce_end(int order) 106121afaf18SBorislav Petkov { 106221afaf18SBorislav Petkov int ret = -1; 106321afaf18SBorislav Petkov u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 106421afaf18SBorislav Petkov 106521afaf18SBorislav Petkov if (!timeout) 106621afaf18SBorislav Petkov goto reset; 106721afaf18SBorislav Petkov if (order < 0) 106821afaf18SBorislav Petkov goto reset; 106921afaf18SBorislav Petkov 107021afaf18SBorislav Petkov /* 107121afaf18SBorislav Petkov * Allow others to run. 107221afaf18SBorislav Petkov */ 107321afaf18SBorislav Petkov atomic_inc(&mce_executing); 107421afaf18SBorislav Petkov 107521afaf18SBorislav Petkov if (order == 1) { 107621afaf18SBorislav Petkov /* CHECKME: Can this race with a parallel hotplug? */ 107721afaf18SBorislav Petkov int cpus = num_online_cpus(); 107821afaf18SBorislav Petkov 107921afaf18SBorislav Petkov /* 108021afaf18SBorislav Petkov * Monarch: Wait for everyone to go through their scanning 108121afaf18SBorislav Petkov * loops. 108221afaf18SBorislav Petkov */ 108321afaf18SBorislav Petkov while (atomic_read(&mce_executing) <= cpus) { 108421afaf18SBorislav Petkov if (mce_timed_out(&timeout, 108521afaf18SBorislav Petkov "Timeout: Monarch CPU unable to finish machine check processing")) 108621afaf18SBorislav Petkov goto reset; 108721afaf18SBorislav Petkov ndelay(SPINUNIT); 108821afaf18SBorislav Petkov } 108921afaf18SBorislav Petkov 109021afaf18SBorislav Petkov mce_reign(); 109121afaf18SBorislav Petkov barrier(); 109221afaf18SBorislav Petkov ret = 0; 109321afaf18SBorislav Petkov } else { 109421afaf18SBorislav Petkov /* 109521afaf18SBorislav Petkov * Subject: Wait for Monarch to finish. 109621afaf18SBorislav Petkov */ 109721afaf18SBorislav Petkov while (atomic_read(&mce_executing) != 0) { 109821afaf18SBorislav Petkov if (mce_timed_out(&timeout, 109921afaf18SBorislav Petkov "Timeout: Monarch CPU did not finish machine check processing")) 110021afaf18SBorislav Petkov goto reset; 110121afaf18SBorislav Petkov ndelay(SPINUNIT); 110221afaf18SBorislav Petkov } 110321afaf18SBorislav Petkov 110421afaf18SBorislav Petkov /* 110521afaf18SBorislav Petkov * Don't reset anything. That's done by the Monarch. 110621afaf18SBorislav Petkov */ 110721afaf18SBorislav Petkov return 0; 110821afaf18SBorislav Petkov } 110921afaf18SBorislav Petkov 111021afaf18SBorislav Petkov /* 111121afaf18SBorislav Petkov * Reset all global state. 111221afaf18SBorislav Petkov */ 111321afaf18SBorislav Petkov reset: 111421afaf18SBorislav Petkov atomic_set(&global_nwo, 0); 111521afaf18SBorislav Petkov atomic_set(&mce_callin, 0); 111621afaf18SBorislav Petkov barrier(); 111721afaf18SBorislav Petkov 111821afaf18SBorislav Petkov /* 111921afaf18SBorislav Petkov * Let others run again. 112021afaf18SBorislav Petkov */ 112121afaf18SBorislav Petkov atomic_set(&mce_executing, 0); 112221afaf18SBorislav Petkov return ret; 112321afaf18SBorislav Petkov } 112421afaf18SBorislav Petkov 112521afaf18SBorislav Petkov static void mce_clear_state(unsigned long *toclear) 112621afaf18SBorislav Petkov { 112721afaf18SBorislav Petkov int i; 112821afaf18SBorislav Petkov 1129c7d314f3SYazen Ghannam for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 113021afaf18SBorislav Petkov if (test_bit(i, toclear)) 113121afaf18SBorislav Petkov mce_wrmsrl(msr_ops.status(i), 0); 113221afaf18SBorislav Petkov } 113321afaf18SBorislav Petkov } 113421afaf18SBorislav Petkov 113521afaf18SBorislav Petkov /* 113621afaf18SBorislav Petkov * Cases where we avoid rendezvous handler timeout: 113721afaf18SBorislav Petkov * 1) If this CPU is offline. 113821afaf18SBorislav Petkov * 113921afaf18SBorislav Petkov * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to 114021afaf18SBorislav Petkov * skip those CPUs which remain looping in the 1st kernel - see 114121afaf18SBorislav Petkov * crash_nmi_callback(). 114221afaf18SBorislav Petkov * 114321afaf18SBorislav Petkov * Note: there still is a small window between kexec-ing and the new, 114421afaf18SBorislav Petkov * kdump kernel establishing a new #MC handler where a broadcasted MCE 114521afaf18SBorislav Petkov * might not get handled properly. 114621afaf18SBorislav Petkov */ 114794a46d31SThomas Gleixner static noinstr bool mce_check_crashing_cpu(void) 114821afaf18SBorislav Petkov { 114994a46d31SThomas Gleixner unsigned int cpu = smp_processor_id(); 115094a46d31SThomas Gleixner 115114d3b376SPeter Zijlstra if (arch_cpu_is_offline(cpu) || 115221afaf18SBorislav Petkov (crashing_cpu != -1 && crashing_cpu != cpu)) { 115321afaf18SBorislav Petkov u64 mcgstatus; 115421afaf18SBorislav Petkov 1155aedbdeabSThomas Gleixner mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); 115670f0c230STony W Wang-oc 115770f0c230STony W Wang-oc if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { 115870f0c230STony W Wang-oc if (mcgstatus & MCG_STATUS_LMCES) 115970f0c230STony W Wang-oc return false; 116070f0c230STony W Wang-oc } 116170f0c230STony W Wang-oc 116221afaf18SBorislav Petkov if (mcgstatus & MCG_STATUS_RIPV) { 1163aedbdeabSThomas Gleixner __wrmsr(MSR_IA32_MCG_STATUS, 0, 0); 116421afaf18SBorislav Petkov return true; 116521afaf18SBorislav Petkov } 116621afaf18SBorislav Petkov } 116721afaf18SBorislav Petkov return false; 116821afaf18SBorislav Petkov } 116921afaf18SBorislav Petkov 117041ce0564SYouquan Song static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final, 117121afaf18SBorislav Petkov unsigned long *toclear, unsigned long *valid_banks, 117221afaf18SBorislav Petkov int no_way_out, int *worst) 117321afaf18SBorislav Petkov { 1174b4914508SYazen Ghannam struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 117521afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 117621afaf18SBorislav Petkov int severity, i; 117721afaf18SBorislav Petkov 1178c7d314f3SYazen Ghannam for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 117921afaf18SBorislav Petkov __clear_bit(i, toclear); 118021afaf18SBorislav Petkov if (!test_bit(i, valid_banks)) 118121afaf18SBorislav Petkov continue; 118221afaf18SBorislav Petkov 118321afaf18SBorislav Petkov if (!mce_banks[i].ctl) 118421afaf18SBorislav Petkov continue; 118521afaf18SBorislav Petkov 118621afaf18SBorislav Petkov m->misc = 0; 118721afaf18SBorislav Petkov m->addr = 0; 118821afaf18SBorislav Petkov m->bank = i; 118921afaf18SBorislav Petkov 119021afaf18SBorislav Petkov m->status = mce_rdmsrl(msr_ops.status(i)); 119121afaf18SBorislav Petkov if (!(m->status & MCI_STATUS_VAL)) 119221afaf18SBorislav Petkov continue; 119321afaf18SBorislav Petkov 119421afaf18SBorislav Petkov /* 119521afaf18SBorislav Petkov * Corrected or non-signaled errors are handled by 119621afaf18SBorislav Petkov * machine_check_poll(). Leave them alone, unless this panics. 119721afaf18SBorislav Petkov */ 119821afaf18SBorislav Petkov if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && 119921afaf18SBorislav Petkov !no_way_out) 120021afaf18SBorislav Petkov continue; 120121afaf18SBorislav Petkov 120221afaf18SBorislav Petkov /* Set taint even when machine check was not enabled. */ 120321afaf18SBorislav Petkov add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 120421afaf18SBorislav Petkov 120541ce0564SYouquan Song severity = mce_severity(m, regs, cfg->tolerant, NULL, true); 120621afaf18SBorislav Petkov 120721afaf18SBorislav Petkov /* 120821afaf18SBorislav Petkov * When machine check was for corrected/deferred handler don't 120921afaf18SBorislav Petkov * touch, unless we're panicking. 121021afaf18SBorislav Petkov */ 121121afaf18SBorislav Petkov if ((severity == MCE_KEEP_SEVERITY || 121221afaf18SBorislav Petkov severity == MCE_UCNA_SEVERITY) && !no_way_out) 121321afaf18SBorislav Petkov continue; 121421afaf18SBorislav Petkov 121521afaf18SBorislav Petkov __set_bit(i, toclear); 121621afaf18SBorislav Petkov 121721afaf18SBorislav Petkov /* Machine check event was not enabled. Clear, but ignore. */ 121821afaf18SBorislav Petkov if (severity == MCE_NO_SEVERITY) 121921afaf18SBorislav Petkov continue; 122021afaf18SBorislav Petkov 122121afaf18SBorislav Petkov mce_read_aux(m, i); 122221afaf18SBorislav Petkov 122321afaf18SBorislav Petkov /* assuming valid severity level != 0 */ 122421afaf18SBorislav Petkov m->severity = severity; 122521afaf18SBorislav Petkov 122621afaf18SBorislav Petkov mce_log(m); 122721afaf18SBorislav Petkov 122821afaf18SBorislav Petkov if (severity > *worst) { 122921afaf18SBorislav Petkov *final = *m; 123021afaf18SBorislav Petkov *worst = severity; 123121afaf18SBorislav Petkov } 123221afaf18SBorislav Petkov } 123321afaf18SBorislav Petkov 123421afaf18SBorislav Petkov /* mce_clear_state will clear *final, save locally for use later */ 123521afaf18SBorislav Petkov *m = *final; 123621afaf18SBorislav Petkov } 123721afaf18SBorislav Petkov 12385567d11cSPeter Zijlstra static void kill_me_now(struct callback_head *ch) 12395567d11cSPeter Zijlstra { 12405567d11cSPeter Zijlstra force_sig(SIGBUS); 12415567d11cSPeter Zijlstra } 12425567d11cSPeter Zijlstra 12435567d11cSPeter Zijlstra static void kill_me_maybe(struct callback_head *cb) 12445567d11cSPeter Zijlstra { 12455567d11cSPeter Zijlstra struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); 12465567d11cSPeter Zijlstra int flags = MF_ACTION_REQUIRED; 12475567d11cSPeter Zijlstra 12485567d11cSPeter Zijlstra pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); 124917fae129STony Luck 125017fae129STony Luck if (!p->mce_ripv) 12515567d11cSPeter Zijlstra flags |= MF_MUST_KILL; 12525567d11cSPeter Zijlstra 125330063810STony Luck if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) && 125430063810STony Luck !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) { 125517fae129STony Luck set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); 12561e36d9c6STony Luck sync_core(); 12575567d11cSPeter Zijlstra return; 12585567d11cSPeter Zijlstra } 12595567d11cSPeter Zijlstra 126030063810STony Luck if (p->mce_vaddr != (void __user *)-1l) { 126130063810STony Luck force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT); 126230063810STony Luck } else { 12635567d11cSPeter Zijlstra pr_err("Memory error not recovered"); 12645567d11cSPeter Zijlstra kill_me_now(cb); 12655567d11cSPeter Zijlstra } 126630063810STony Luck } 12675567d11cSPeter Zijlstra 1268c0ab7ffcSTony Luck static void queue_task_work(struct mce *m, int kill_it) 1269c0ab7ffcSTony Luck { 1270c0ab7ffcSTony Luck current->mce_addr = m->addr; 1271c0ab7ffcSTony Luck current->mce_kflags = m->kflags; 1272c0ab7ffcSTony Luck current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); 1273c0ab7ffcSTony Luck current->mce_whole_page = whole_page(m); 1274c0ab7ffcSTony Luck 1275c0ab7ffcSTony Luck if (kill_it) 1276c0ab7ffcSTony Luck current->mce_kill_me.func = kill_me_now; 1277c0ab7ffcSTony Luck else 1278c0ab7ffcSTony Luck current->mce_kill_me.func = kill_me_maybe; 1279c0ab7ffcSTony Luck 128091989c70SJens Axboe task_work_add(current, ¤t->mce_kill_me, TWA_RESUME); 1281c0ab7ffcSTony Luck } 128221afaf18SBorislav Petkov 128321afaf18SBorislav Petkov /* 128421afaf18SBorislav Petkov * The actual machine check handler. This only handles real 128521afaf18SBorislav Petkov * exceptions when something got corrupted coming in through int 18. 128621afaf18SBorislav Petkov * 128721afaf18SBorislav Petkov * This is executed in NMI context not subject to normal locking rules. This 128821afaf18SBorislav Petkov * implies that most kernel services cannot be safely used. Don't even 128921afaf18SBorislav Petkov * think about putting a printk in there! 129021afaf18SBorislav Petkov * 129121afaf18SBorislav Petkov * On Intel systems this is entered on all CPUs in parallel through 129221afaf18SBorislav Petkov * MCE broadcast. However some CPUs might be broken beyond repair, 129321afaf18SBorislav Petkov * so be always careful when synchronizing with others. 129455ba18d6SAndy Lutomirski * 129555ba18d6SAndy Lutomirski * Tracing and kprobes are disabled: if we interrupted a kernel context 129655ba18d6SAndy Lutomirski * with IF=1, we need to minimize stack usage. There are also recursion 129755ba18d6SAndy Lutomirski * issues: if the machine check was due to a failure of the memory 129855ba18d6SAndy Lutomirski * backing the user stack, tracing that reads the user stack will cause 129955ba18d6SAndy Lutomirski * potentially infinite recursion. 130021afaf18SBorislav Petkov */ 13017f6fa101SIra Weiny noinstr void do_machine_check(struct pt_regs *regs) 130221afaf18SBorislav Petkov { 130321afaf18SBorislav Petkov DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); 130421afaf18SBorislav Petkov DECLARE_BITMAP(toclear, MAX_NR_BANKS); 130521afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 130621afaf18SBorislav Petkov struct mce m, *final; 13077a8bc2b0SJan H. Schönherr char *msg = NULL; 130821afaf18SBorislav Petkov int worst = 0; 130921afaf18SBorislav Petkov 131021afaf18SBorislav Petkov /* 131121afaf18SBorislav Petkov * Establish sequential order between the CPUs entering the machine 131221afaf18SBorislav Petkov * check handler. 131321afaf18SBorislav Petkov */ 131421afaf18SBorislav Petkov int order = -1; 131521afaf18SBorislav Petkov 131621afaf18SBorislav Petkov /* 131721afaf18SBorislav Petkov * If no_way_out gets set, there is no safe way to recover from this 131821afaf18SBorislav Petkov * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. 131921afaf18SBorislav Petkov */ 132021afaf18SBorislav Petkov int no_way_out = 0; 132121afaf18SBorislav Petkov 132221afaf18SBorislav Petkov /* 132321afaf18SBorislav Petkov * If kill_it gets set, there might be a way to recover from this 132421afaf18SBorislav Petkov * error. 132521afaf18SBorislav Petkov */ 132621afaf18SBorislav Petkov int kill_it = 0; 132721afaf18SBorislav Petkov 132821afaf18SBorislav Petkov /* 132921afaf18SBorislav Petkov * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES 133021afaf18SBorislav Petkov * on Intel. 133121afaf18SBorislav Petkov */ 133221afaf18SBorislav Petkov int lmce = 1; 133321afaf18SBorislav Petkov 133421afaf18SBorislav Petkov this_cpu_inc(mce_exception_count); 133521afaf18SBorislav Petkov 133621afaf18SBorislav Petkov mce_gather_info(&m, regs); 133721afaf18SBorislav Petkov m.tsc = rdtsc(); 133821afaf18SBorislav Petkov 133921afaf18SBorislav Petkov final = this_cpu_ptr(&mces_seen); 134021afaf18SBorislav Petkov *final = m; 134121afaf18SBorislav Petkov 134221afaf18SBorislav Petkov memset(valid_banks, 0, sizeof(valid_banks)); 134321afaf18SBorislav Petkov no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); 134421afaf18SBorislav Petkov 134521afaf18SBorislav Petkov barrier(); 134621afaf18SBorislav Petkov 134721afaf18SBorislav Petkov /* 134821afaf18SBorislav Petkov * When no restart IP might need to kill or panic. 134921afaf18SBorislav Petkov * Assume the worst for now, but if we find the 135021afaf18SBorislav Petkov * severity is MCE_AR_SEVERITY we have other options. 135121afaf18SBorislav Petkov */ 135221afaf18SBorislav Petkov if (!(m.mcgstatus & MCG_STATUS_RIPV)) 135321afaf18SBorislav Petkov kill_it = 1; 135421afaf18SBorislav Petkov 135521afaf18SBorislav Petkov /* 135621afaf18SBorislav Petkov * Check if this MCE is signaled to only this logical processor, 135770f0c230STony W Wang-oc * on Intel, Zhaoxin only. 135821afaf18SBorislav Petkov */ 135970f0c230STony W Wang-oc if (m.cpuvendor == X86_VENDOR_INTEL || 136070f0c230STony W Wang-oc m.cpuvendor == X86_VENDOR_ZHAOXIN) 136121afaf18SBorislav Petkov lmce = m.mcgstatus & MCG_STATUS_LMCES; 136221afaf18SBorislav Petkov 136321afaf18SBorislav Petkov /* 136421afaf18SBorislav Petkov * Local machine check may already know that we have to panic. 136521afaf18SBorislav Petkov * Broadcast machine check begins rendezvous in mce_start() 136621afaf18SBorislav Petkov * Go through all banks in exclusion of the other CPUs. This way we 136721afaf18SBorislav Petkov * don't report duplicated events on shared banks because the first one 136821afaf18SBorislav Petkov * to see it will clear it. 136921afaf18SBorislav Petkov */ 137021afaf18SBorislav Petkov if (lmce) { 137121afaf18SBorislav Petkov if (no_way_out) 137221afaf18SBorislav Petkov mce_panic("Fatal local machine check", &m, msg); 137321afaf18SBorislav Petkov } else { 137421afaf18SBorislav Petkov order = mce_start(&no_way_out); 137521afaf18SBorislav Petkov } 137621afaf18SBorislav Petkov 137741ce0564SYouquan Song __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst); 137821afaf18SBorislav Petkov 137921afaf18SBorislav Petkov if (!no_way_out) 138021afaf18SBorislav Petkov mce_clear_state(toclear); 138121afaf18SBorislav Petkov 138221afaf18SBorislav Petkov /* 138321afaf18SBorislav Petkov * Do most of the synchronization with other CPUs. 138421afaf18SBorislav Petkov * When there's any problem use only local no_way_out state. 138521afaf18SBorislav Petkov */ 138621afaf18SBorislav Petkov if (!lmce) { 138721afaf18SBorislav Petkov if (mce_end(order) < 0) 138821afaf18SBorislav Petkov no_way_out = worst >= MCE_PANIC_SEVERITY; 138921afaf18SBorislav Petkov } else { 139021afaf18SBorislav Petkov /* 139121afaf18SBorislav Petkov * If there was a fatal machine check we should have 139221afaf18SBorislav Petkov * already called mce_panic earlier in this function. 139321afaf18SBorislav Petkov * Since we re-read the banks, we might have found 139421afaf18SBorislav Petkov * something new. Check again to see if we found a 139521afaf18SBorislav Petkov * fatal error. We call "mce_severity()" again to 139621afaf18SBorislav Petkov * make sure we have the right "msg". 139721afaf18SBorislav Petkov */ 139821afaf18SBorislav Petkov if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { 139941ce0564SYouquan Song mce_severity(&m, regs, cfg->tolerant, &msg, true); 140021afaf18SBorislav Petkov mce_panic("Local fatal machine check!", &m, msg); 140121afaf18SBorislav Petkov } 140221afaf18SBorislav Petkov } 140321afaf18SBorislav Petkov 140421afaf18SBorislav Petkov /* 140521afaf18SBorislav Petkov * If tolerant is at an insane level we drop requests to kill 140621afaf18SBorislav Petkov * processes and continue even when there is no way out. 140721afaf18SBorislav Petkov */ 140821afaf18SBorislav Petkov if (cfg->tolerant == 3) 140921afaf18SBorislav Petkov kill_it = 0; 141021afaf18SBorislav Petkov else if (no_way_out) 141121afaf18SBorislav Petkov mce_panic("Fatal machine check on current CPU", &m, msg); 141221afaf18SBorislav Petkov 141321afaf18SBorislav Petkov if (worst > 0) 141439f0584eSBorislav Petkov irq_work_queue(&mce_irq_work); 141539f0584eSBorislav Petkov 141621afaf18SBorislav Petkov if (worst != MCE_AR_SEVERITY && !kill_it) 14171e36d9c6STony Luck goto out; 141821afaf18SBorislav Petkov 141921afaf18SBorislav Petkov /* Fault was in user mode and we need to take some action */ 142021afaf18SBorislav Petkov if ((m.cs & 3) == 3) { 1421b052df3dSThomas Gleixner /* If this triggers there is no way to recover. Die hard. */ 1422b052df3dSThomas Gleixner BUG_ON(!on_thread_stack() || !user_mode(regs)); 142321afaf18SBorislav Petkov 1424c0ab7ffcSTony Luck queue_task_work(&m, kill_it); 1425c0ab7ffcSTony Luck 142621afaf18SBorislav Petkov } else { 14271df73b21SBorislav Petkov /* 14281df73b21SBorislav Petkov * Handle an MCE which has happened in kernel space but from 14291df73b21SBorislav Petkov * which the kernel can recover: ex_has_fault_handler() has 14301df73b21SBorislav Petkov * already verified that the rIP at which the error happened is 14311df73b21SBorislav Petkov * a rIP from which the kernel can recover (by jumping to 14321df73b21SBorislav Petkov * recovery code specified in _ASM_EXTABLE_FAULT()) and the 14331df73b21SBorislav Petkov * corresponding exception handler which would do that is the 14341df73b21SBorislav Petkov * proper one. 14351df73b21SBorislav Petkov */ 14361df73b21SBorislav Petkov if (m.kflags & MCE_IN_KERNEL_RECOV) { 14378cd501c1SThomas Gleixner if (!fixup_exception(regs, X86_TRAP_MC, 0, 0)) 14382d806d07SJan H. Schönherr mce_panic("Failed kernel mode recovery", &m, msg); 143921afaf18SBorislav Petkov } 1440c0ab7ffcSTony Luck 1441c0ab7ffcSTony Luck if (m.kflags & MCE_IN_KERNEL_COPYIN) 1442c0ab7ffcSTony Luck queue_task_work(&m, kill_it); 14431df73b21SBorislav Petkov } 14441e36d9c6STony Luck out: 14451e36d9c6STony Luck mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); 144621afaf18SBorislav Petkov } 144721afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(do_machine_check); 144821afaf18SBorislav Petkov 144921afaf18SBorislav Petkov #ifndef CONFIG_MEMORY_FAILURE 145021afaf18SBorislav Petkov int memory_failure(unsigned long pfn, int flags) 145121afaf18SBorislav Petkov { 145221afaf18SBorislav Petkov /* mce_severity() should not hand us an ACTION_REQUIRED error */ 145321afaf18SBorislav Petkov BUG_ON(flags & MF_ACTION_REQUIRED); 145421afaf18SBorislav Petkov pr_err("Uncorrected memory error in page 0x%lx ignored\n" 145521afaf18SBorislav Petkov "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", 145621afaf18SBorislav Petkov pfn); 145721afaf18SBorislav Petkov 145821afaf18SBorislav Petkov return 0; 145921afaf18SBorislav Petkov } 146021afaf18SBorislav Petkov #endif 146121afaf18SBorislav Petkov 146221afaf18SBorislav Petkov /* 146321afaf18SBorislav Petkov * Periodic polling timer for "silent" machine check errors. If the 146421afaf18SBorislav Petkov * poller finds an MCE, poll 2x faster. When the poller finds no more 146521afaf18SBorislav Petkov * errors, poll 2x slower (up to check_interval seconds). 146621afaf18SBorislav Petkov */ 146721afaf18SBorislav Petkov static unsigned long check_interval = INITIAL_CHECK_INTERVAL; 146821afaf18SBorislav Petkov 146921afaf18SBorislav Petkov static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ 147021afaf18SBorislav Petkov static DEFINE_PER_CPU(struct timer_list, mce_timer); 147121afaf18SBorislav Petkov 147221afaf18SBorislav Petkov static unsigned long mce_adjust_timer_default(unsigned long interval) 147321afaf18SBorislav Petkov { 147421afaf18SBorislav Petkov return interval; 147521afaf18SBorislav Petkov } 147621afaf18SBorislav Petkov 147721afaf18SBorislav Petkov static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; 147821afaf18SBorislav Petkov 147921afaf18SBorislav Petkov static void __start_timer(struct timer_list *t, unsigned long interval) 148021afaf18SBorislav Petkov { 148121afaf18SBorislav Petkov unsigned long when = jiffies + interval; 148221afaf18SBorislav Petkov unsigned long flags; 148321afaf18SBorislav Petkov 148421afaf18SBorislav Petkov local_irq_save(flags); 148521afaf18SBorislav Petkov 148621afaf18SBorislav Petkov if (!timer_pending(t) || time_before(when, t->expires)) 148721afaf18SBorislav Petkov mod_timer(t, round_jiffies(when)); 148821afaf18SBorislav Petkov 148921afaf18SBorislav Petkov local_irq_restore(flags); 149021afaf18SBorislav Petkov } 149121afaf18SBorislav Petkov 149221afaf18SBorislav Petkov static void mce_timer_fn(struct timer_list *t) 149321afaf18SBorislav Petkov { 149421afaf18SBorislav Petkov struct timer_list *cpu_t = this_cpu_ptr(&mce_timer); 149521afaf18SBorislav Petkov unsigned long iv; 149621afaf18SBorislav Petkov 149721afaf18SBorislav Petkov WARN_ON(cpu_t != t); 149821afaf18SBorislav Petkov 149921afaf18SBorislav Petkov iv = __this_cpu_read(mce_next_interval); 150021afaf18SBorislav Petkov 150121afaf18SBorislav Petkov if (mce_available(this_cpu_ptr(&cpu_info))) { 150221afaf18SBorislav Petkov machine_check_poll(0, this_cpu_ptr(&mce_poll_banks)); 150321afaf18SBorislav Petkov 150421afaf18SBorislav Petkov if (mce_intel_cmci_poll()) { 150521afaf18SBorislav Petkov iv = mce_adjust_timer(iv); 150621afaf18SBorislav Petkov goto done; 150721afaf18SBorislav Petkov } 150821afaf18SBorislav Petkov } 150921afaf18SBorislav Petkov 151021afaf18SBorislav Petkov /* 151121afaf18SBorislav Petkov * Alert userspace if needed. If we logged an MCE, reduce the polling 151221afaf18SBorislav Petkov * interval, otherwise increase the polling interval. 151321afaf18SBorislav Petkov */ 151421afaf18SBorislav Petkov if (mce_notify_irq()) 151521afaf18SBorislav Petkov iv = max(iv / 2, (unsigned long) HZ/100); 151621afaf18SBorislav Petkov else 151721afaf18SBorislav Petkov iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); 151821afaf18SBorislav Petkov 151921afaf18SBorislav Petkov done: 152021afaf18SBorislav Petkov __this_cpu_write(mce_next_interval, iv); 152121afaf18SBorislav Petkov __start_timer(t, iv); 152221afaf18SBorislav Petkov } 152321afaf18SBorislav Petkov 152421afaf18SBorislav Petkov /* 152521afaf18SBorislav Petkov * Ensure that the timer is firing in @interval from now. 152621afaf18SBorislav Petkov */ 152721afaf18SBorislav Petkov void mce_timer_kick(unsigned long interval) 152821afaf18SBorislav Petkov { 152921afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 153021afaf18SBorislav Petkov unsigned long iv = __this_cpu_read(mce_next_interval); 153121afaf18SBorislav Petkov 153221afaf18SBorislav Petkov __start_timer(t, interval); 153321afaf18SBorislav Petkov 153421afaf18SBorislav Petkov if (interval < iv) 153521afaf18SBorislav Petkov __this_cpu_write(mce_next_interval, interval); 153621afaf18SBorislav Petkov } 153721afaf18SBorislav Petkov 153821afaf18SBorislav Petkov /* Must not be called in IRQ context where del_timer_sync() can deadlock */ 153921afaf18SBorislav Petkov static void mce_timer_delete_all(void) 154021afaf18SBorislav Petkov { 154121afaf18SBorislav Petkov int cpu; 154221afaf18SBorislav Petkov 154321afaf18SBorislav Petkov for_each_online_cpu(cpu) 154421afaf18SBorislav Petkov del_timer_sync(&per_cpu(mce_timer, cpu)); 154521afaf18SBorislav Petkov } 154621afaf18SBorislav Petkov 154721afaf18SBorislav Petkov /* 154821afaf18SBorislav Petkov * Notify the user(s) about new machine check events. 154921afaf18SBorislav Petkov * Can be called from interrupt context, but not from machine check/NMI 155021afaf18SBorislav Petkov * context. 155121afaf18SBorislav Petkov */ 155221afaf18SBorislav Petkov int mce_notify_irq(void) 155321afaf18SBorislav Petkov { 155421afaf18SBorislav Petkov /* Not more than two messages every minute */ 155521afaf18SBorislav Petkov static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); 155621afaf18SBorislav Petkov 155721afaf18SBorislav Petkov if (test_and_clear_bit(0, &mce_need_notify)) { 155821afaf18SBorislav Petkov mce_work_trigger(); 155921afaf18SBorislav Petkov 156021afaf18SBorislav Petkov if (__ratelimit(&ratelimit)) 156121afaf18SBorislav Petkov pr_info(HW_ERR "Machine check events logged\n"); 156221afaf18SBorislav Petkov 156321afaf18SBorislav Petkov return 1; 156421afaf18SBorislav Petkov } 156521afaf18SBorislav Petkov return 0; 156621afaf18SBorislav Petkov } 156721afaf18SBorislav Petkov EXPORT_SYMBOL_GPL(mce_notify_irq); 156821afaf18SBorislav Petkov 1569b4914508SYazen Ghannam static void __mcheck_cpu_mce_banks_init(void) 157021afaf18SBorislav Petkov { 1571b4914508SYazen Ghannam struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1572c7d314f3SYazen Ghannam u8 n_banks = this_cpu_read(mce_num_banks); 157321afaf18SBorislav Petkov int i; 157421afaf18SBorislav Petkov 1575c7d314f3SYazen Ghannam for (i = 0; i < n_banks; i++) { 157621afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 157721afaf18SBorislav Petkov 1578068b053dSYazen Ghannam /* 1579068b053dSYazen Ghannam * Init them all, __mcheck_cpu_apply_quirks() is going to apply 1580068b053dSYazen Ghannam * the required vendor quirks before 1581068b053dSYazen Ghannam * __mcheck_cpu_init_clear_banks() does the final bank setup. 1582068b053dSYazen Ghannam */ 158321afaf18SBorislav Petkov b->ctl = -1ULL; 158421afaf18SBorislav Petkov b->init = 1; 158521afaf18SBorislav Petkov } 158621afaf18SBorislav Petkov } 158721afaf18SBorislav Petkov 158821afaf18SBorislav Petkov /* 158921afaf18SBorislav Petkov * Initialize Machine Checks for a CPU. 159021afaf18SBorislav Petkov */ 1591b4914508SYazen Ghannam static void __mcheck_cpu_cap_init(void) 159221afaf18SBorislav Petkov { 159321afaf18SBorislav Petkov u64 cap; 1594006c0770SYazen Ghannam u8 b; 159521afaf18SBorislav Petkov 159621afaf18SBorislav Petkov rdmsrl(MSR_IA32_MCG_CAP, cap); 159721afaf18SBorislav Petkov 159821afaf18SBorislav Petkov b = cap & MCG_BANKCNT_MASK; 159921afaf18SBorislav Petkov 1600c7d314f3SYazen Ghannam if (b > MAX_NR_BANKS) { 1601c7d314f3SYazen Ghannam pr_warn("CPU%d: Using only %u machine check banks out of %u\n", 1602c7d314f3SYazen Ghannam smp_processor_id(), MAX_NR_BANKS, b); 1603c7d314f3SYazen Ghannam b = MAX_NR_BANKS; 1604c7d314f3SYazen Ghannam } 1605c7d314f3SYazen Ghannam 1606c7d314f3SYazen Ghannam this_cpu_write(mce_num_banks, b); 160721afaf18SBorislav Petkov 1608b4914508SYazen Ghannam __mcheck_cpu_mce_banks_init(); 160921afaf18SBorislav Petkov 161021afaf18SBorislav Petkov /* Use accurate RIP reporting if available. */ 161121afaf18SBorislav Petkov if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) 161221afaf18SBorislav Petkov mca_cfg.rip_msr = MSR_IA32_MCG_EIP; 161321afaf18SBorislav Petkov 161421afaf18SBorislav Petkov if (cap & MCG_SER_P) 161521afaf18SBorislav Petkov mca_cfg.ser = 1; 161621afaf18SBorislav Petkov } 161721afaf18SBorislav Petkov 161821afaf18SBorislav Petkov static void __mcheck_cpu_init_generic(void) 161921afaf18SBorislav Petkov { 162021afaf18SBorislav Petkov enum mcp_flags m_fl = 0; 162121afaf18SBorislav Petkov mce_banks_t all_banks; 162221afaf18SBorislav Petkov u64 cap; 162321afaf18SBorislav Petkov 162421afaf18SBorislav Petkov if (!mca_cfg.bootlog) 162521afaf18SBorislav Petkov m_fl = MCP_DONTLOG; 162621afaf18SBorislav Petkov 162721afaf18SBorislav Petkov /* 162821afaf18SBorislav Petkov * Log the machine checks left over from the previous reset. 162921afaf18SBorislav Petkov */ 163021afaf18SBorislav Petkov bitmap_fill(all_banks, MAX_NR_BANKS); 163121afaf18SBorislav Petkov machine_check_poll(MCP_UC | m_fl, &all_banks); 163221afaf18SBorislav Petkov 163321afaf18SBorislav Petkov cr4_set_bits(X86_CR4_MCE); 163421afaf18SBorislav Petkov 163521afaf18SBorislav Petkov rdmsrl(MSR_IA32_MCG_CAP, cap); 163621afaf18SBorislav Petkov if (cap & MCG_CTL_P) 163721afaf18SBorislav Petkov wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 163821afaf18SBorislav Petkov } 163921afaf18SBorislav Petkov 164021afaf18SBorislav Petkov static void __mcheck_cpu_init_clear_banks(void) 164121afaf18SBorislav Petkov { 1642b4914508SYazen Ghannam struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 164321afaf18SBorislav Petkov int i; 164421afaf18SBorislav Petkov 1645c7d314f3SYazen Ghannam for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 164621afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 164721afaf18SBorislav Petkov 164821afaf18SBorislav Petkov if (!b->init) 164921afaf18SBorislav Petkov continue; 165021afaf18SBorislav Petkov wrmsrl(msr_ops.ctl(i), b->ctl); 165121afaf18SBorislav Petkov wrmsrl(msr_ops.status(i), 0); 165221afaf18SBorislav Petkov } 165321afaf18SBorislav Petkov } 165421afaf18SBorislav Petkov 165521afaf18SBorislav Petkov /* 1656068b053dSYazen Ghannam * Do a final check to see if there are any unused/RAZ banks. 1657068b053dSYazen Ghannam * 1658068b053dSYazen Ghannam * This must be done after the banks have been initialized and any quirks have 1659068b053dSYazen Ghannam * been applied. 1660068b053dSYazen Ghannam * 1661068b053dSYazen Ghannam * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs. 1662068b053dSYazen Ghannam * Otherwise, a user who disables a bank will not be able to re-enable it 1663068b053dSYazen Ghannam * without a system reboot. 1664068b053dSYazen Ghannam */ 1665068b053dSYazen Ghannam static void __mcheck_cpu_check_banks(void) 1666068b053dSYazen Ghannam { 1667068b053dSYazen Ghannam struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1668068b053dSYazen Ghannam u64 msrval; 1669068b053dSYazen Ghannam int i; 1670068b053dSYazen Ghannam 1671068b053dSYazen Ghannam for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1672068b053dSYazen Ghannam struct mce_bank *b = &mce_banks[i]; 1673068b053dSYazen Ghannam 1674068b053dSYazen Ghannam if (!b->init) 1675068b053dSYazen Ghannam continue; 1676068b053dSYazen Ghannam 1677068b053dSYazen Ghannam rdmsrl(msr_ops.ctl(i), msrval); 1678068b053dSYazen Ghannam b->init = !!msrval; 1679068b053dSYazen Ghannam } 1680068b053dSYazen Ghannam } 1681068b053dSYazen Ghannam 1682068b053dSYazen Ghannam /* 168321afaf18SBorislav Petkov * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and 168421afaf18SBorislav Petkov * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM 168521afaf18SBorislav Petkov * Vol 3B Table 15-20). But this confuses both the code that determines 168621afaf18SBorislav Petkov * whether the machine check occurred in kernel or user mode, and also 168721afaf18SBorislav Petkov * the severity assessment code. Pretend that EIPV was set, and take the 168821afaf18SBorislav Petkov * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. 168921afaf18SBorislav Petkov */ 169021afaf18SBorislav Petkov static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) 169121afaf18SBorislav Petkov { 169221afaf18SBorislav Petkov if (bank != 0) 169321afaf18SBorislav Petkov return; 169421afaf18SBorislav Petkov if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) 169521afaf18SBorislav Petkov return; 169621afaf18SBorislav Petkov if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| 169721afaf18SBorislav Petkov MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| 169821afaf18SBorislav Petkov MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| 169921afaf18SBorislav Petkov MCACOD)) != 170021afaf18SBorislav Petkov (MCI_STATUS_UC|MCI_STATUS_EN| 170121afaf18SBorislav Petkov MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| 170221afaf18SBorislav Petkov MCI_STATUS_AR|MCACOD_INSTR)) 170321afaf18SBorislav Petkov return; 170421afaf18SBorislav Petkov 170521afaf18SBorislav Petkov m->mcgstatus |= MCG_STATUS_EIPV; 170621afaf18SBorislav Petkov m->ip = regs->ip; 170721afaf18SBorislav Petkov m->cs = regs->cs; 170821afaf18SBorislav Petkov } 170921afaf18SBorislav Petkov 171021afaf18SBorislav Petkov /* Add per CPU specific workarounds here */ 171121afaf18SBorislav Petkov static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 171221afaf18SBorislav Petkov { 1713b4914508SYazen Ghannam struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 171421afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 171521afaf18SBorislav Petkov 171621afaf18SBorislav Petkov if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 171721afaf18SBorislav Petkov pr_info("unknown CPU type - not enabling MCE support\n"); 171821afaf18SBorislav Petkov return -EOPNOTSUPP; 171921afaf18SBorislav Petkov } 172021afaf18SBorislav Petkov 172121afaf18SBorislav Petkov /* This should be disabled by the BIOS, but isn't always */ 172221afaf18SBorislav Petkov if (c->x86_vendor == X86_VENDOR_AMD) { 1723c7d314f3SYazen Ghannam if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) { 172421afaf18SBorislav Petkov /* 172521afaf18SBorislav Petkov * disable GART TBL walk error reporting, which 172621afaf18SBorislav Petkov * trips off incorrectly with the IOMMU & 3ware 172721afaf18SBorislav Petkov * & Cerberus: 172821afaf18SBorislav Petkov */ 172921afaf18SBorislav Petkov clear_bit(10, (unsigned long *)&mce_banks[4].ctl); 173021afaf18SBorislav Petkov } 173121afaf18SBorislav Petkov if (c->x86 < 0x11 && cfg->bootlog < 0) { 173221afaf18SBorislav Petkov /* 173321afaf18SBorislav Petkov * Lots of broken BIOS around that don't clear them 173421afaf18SBorislav Petkov * by default and leave crap in there. Don't log: 173521afaf18SBorislav Petkov */ 173621afaf18SBorislav Petkov cfg->bootlog = 0; 173721afaf18SBorislav Petkov } 173821afaf18SBorislav Petkov /* 173921afaf18SBorislav Petkov * Various K7s with broken bank 0 around. Always disable 174021afaf18SBorislav Petkov * by default. 174121afaf18SBorislav Petkov */ 1742c7d314f3SYazen Ghannam if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0) 174321afaf18SBorislav Petkov mce_banks[0].ctl = 0; 174421afaf18SBorislav Petkov 174521afaf18SBorislav Petkov /* 174621afaf18SBorislav Petkov * overflow_recov is supported for F15h Models 00h-0fh 174721afaf18SBorislav Petkov * even though we don't have a CPUID bit for it. 174821afaf18SBorislav Petkov */ 174921afaf18SBorislav Petkov if (c->x86 == 0x15 && c->x86_model <= 0xf) 175021afaf18SBorislav Petkov mce_flags.overflow_recov = 1; 175121afaf18SBorislav Petkov 175221afaf18SBorislav Petkov } 175321afaf18SBorislav Petkov 175421afaf18SBorislav Petkov if (c->x86_vendor == X86_VENDOR_INTEL) { 175521afaf18SBorislav Petkov /* 175621afaf18SBorislav Petkov * SDM documents that on family 6 bank 0 should not be written 175721afaf18SBorislav Petkov * because it aliases to another special BIOS controlled 175821afaf18SBorislav Petkov * register. 175921afaf18SBorislav Petkov * But it's not aliased anymore on model 0x1a+ 176021afaf18SBorislav Petkov * Don't ignore bank 0 completely because there could be a 176121afaf18SBorislav Petkov * valid event later, merely don't write CTL0. 176221afaf18SBorislav Petkov */ 176321afaf18SBorislav Petkov 1764c7d314f3SYazen Ghannam if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0) 176521afaf18SBorislav Petkov mce_banks[0].init = 0; 176621afaf18SBorislav Petkov 176721afaf18SBorislav Petkov /* 176821afaf18SBorislav Petkov * All newer Intel systems support MCE broadcasting. Enable 176921afaf18SBorislav Petkov * synchronization with a one second timeout. 177021afaf18SBorislav Petkov */ 177121afaf18SBorislav Petkov if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && 177221afaf18SBorislav Petkov cfg->monarch_timeout < 0) 177321afaf18SBorislav Petkov cfg->monarch_timeout = USEC_PER_SEC; 177421afaf18SBorislav Petkov 177521afaf18SBorislav Petkov /* 177621afaf18SBorislav Petkov * There are also broken BIOSes on some Pentium M and 177721afaf18SBorislav Petkov * earlier systems: 177821afaf18SBorislav Petkov */ 177921afaf18SBorislav Petkov if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) 178021afaf18SBorislav Petkov cfg->bootlog = 0; 178121afaf18SBorislav Petkov 178221afaf18SBorislav Petkov if (c->x86 == 6 && c->x86_model == 45) 178321afaf18SBorislav Petkov quirk_no_way_out = quirk_sandybridge_ifu; 178421afaf18SBorislav Petkov } 17856e898d2bSTony W Wang-oc 17866e898d2bSTony W Wang-oc if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { 17876e898d2bSTony W Wang-oc /* 17886e898d2bSTony W Wang-oc * All newer Zhaoxin CPUs support MCE broadcasting. Enable 17896e898d2bSTony W Wang-oc * synchronization with a one second timeout. 17906e898d2bSTony W Wang-oc */ 17916e898d2bSTony W Wang-oc if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) { 17926e898d2bSTony W Wang-oc if (cfg->monarch_timeout < 0) 17936e898d2bSTony W Wang-oc cfg->monarch_timeout = USEC_PER_SEC; 17946e898d2bSTony W Wang-oc } 17956e898d2bSTony W Wang-oc } 17966e898d2bSTony W Wang-oc 179721afaf18SBorislav Petkov if (cfg->monarch_timeout < 0) 179821afaf18SBorislav Petkov cfg->monarch_timeout = 0; 179921afaf18SBorislav Petkov if (cfg->bootlog != 0) 180021afaf18SBorislav Petkov cfg->panic_timeout = 30; 180121afaf18SBorislav Petkov 180221afaf18SBorislav Petkov return 0; 180321afaf18SBorislav Petkov } 180421afaf18SBorislav Petkov 180521afaf18SBorislav Petkov static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 180621afaf18SBorislav Petkov { 180721afaf18SBorislav Petkov if (c->x86 != 5) 180821afaf18SBorislav Petkov return 0; 180921afaf18SBorislav Petkov 181021afaf18SBorislav Petkov switch (c->x86_vendor) { 181121afaf18SBorislav Petkov case X86_VENDOR_INTEL: 181221afaf18SBorislav Petkov intel_p5_mcheck_init(c); 181321afaf18SBorislav Petkov return 1; 181421afaf18SBorislav Petkov break; 181521afaf18SBorislav Petkov case X86_VENDOR_CENTAUR: 181621afaf18SBorislav Petkov winchip_mcheck_init(c); 181721afaf18SBorislav Petkov return 1; 181821afaf18SBorislav Petkov break; 181921afaf18SBorislav Petkov default: 182021afaf18SBorislav Petkov return 0; 182121afaf18SBorislav Petkov } 182221afaf18SBorislav Petkov 182321afaf18SBorislav Petkov return 0; 182421afaf18SBorislav Petkov } 182521afaf18SBorislav Petkov 182621afaf18SBorislav Petkov /* 182721afaf18SBorislav Petkov * Init basic CPU features needed for early decoding of MCEs. 182821afaf18SBorislav Petkov */ 182921afaf18SBorislav Petkov static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) 183021afaf18SBorislav Petkov { 183121afaf18SBorislav Petkov if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { 183221afaf18SBorislav Petkov mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); 183321afaf18SBorislav Petkov mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); 183421afaf18SBorislav Petkov mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); 1835c9bf318fSThomas Gleixner mce_flags.amd_threshold = 1; 183621afaf18SBorislav Petkov 183721afaf18SBorislav Petkov if (mce_flags.smca) { 183821afaf18SBorislav Petkov msr_ops.ctl = smca_ctl_reg; 183921afaf18SBorislav Petkov msr_ops.status = smca_status_reg; 184021afaf18SBorislav Petkov msr_ops.addr = smca_addr_reg; 184121afaf18SBorislav Petkov msr_ops.misc = smca_misc_reg; 184221afaf18SBorislav Petkov } 184321afaf18SBorislav Petkov } 184421afaf18SBorislav Petkov } 184521afaf18SBorislav Petkov 184621afaf18SBorislav Petkov static void mce_centaur_feature_init(struct cpuinfo_x86 *c) 184721afaf18SBorislav Petkov { 184821afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 184921afaf18SBorislav Petkov 185021afaf18SBorislav Petkov /* 185121afaf18SBorislav Petkov * All newer Centaur CPUs support MCE broadcasting. Enable 185221afaf18SBorislav Petkov * synchronization with a one second timeout. 185321afaf18SBorislav Petkov */ 185421afaf18SBorislav Petkov if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || 185521afaf18SBorislav Petkov c->x86 > 6) { 185621afaf18SBorislav Petkov if (cfg->monarch_timeout < 0) 185721afaf18SBorislav Petkov cfg->monarch_timeout = USEC_PER_SEC; 185821afaf18SBorislav Petkov } 185921afaf18SBorislav Petkov } 186021afaf18SBorislav Petkov 18615a3d56a0STony W Wang-oc static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) 18625a3d56a0STony W Wang-oc { 18635a3d56a0STony W Wang-oc struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 18645a3d56a0STony W Wang-oc 18655a3d56a0STony W Wang-oc /* 18665a3d56a0STony W Wang-oc * These CPUs have MCA bank 8 which reports only one error type called 18675a3d56a0STony W Wang-oc * SVAD (System View Address Decoder). The reporting of that error is 18685a3d56a0STony W Wang-oc * controlled by IA32_MC8.CTL.0. 18695a3d56a0STony W Wang-oc * 18705a3d56a0STony W Wang-oc * If enabled, prefetching on these CPUs will cause SVAD MCE when 18715a3d56a0STony W Wang-oc * virtual machines start and result in a system panic. Always disable 18725a3d56a0STony W Wang-oc * bank 8 SVAD error by default. 18735a3d56a0STony W Wang-oc */ 18745a3d56a0STony W Wang-oc if ((c->x86 == 7 && c->x86_model == 0x1b) || 18755a3d56a0STony W Wang-oc (c->x86_model == 0x19 || c->x86_model == 0x1f)) { 18765a3d56a0STony W Wang-oc if (this_cpu_read(mce_num_banks) > 8) 18775a3d56a0STony W Wang-oc mce_banks[8].ctl = 0; 18785a3d56a0STony W Wang-oc } 18795a3d56a0STony W Wang-oc 18805a3d56a0STony W Wang-oc intel_init_cmci(); 188170f0c230STony W Wang-oc intel_init_lmce(); 18825a3d56a0STony W Wang-oc mce_adjust_timer = cmci_intel_adjust_timer; 18835a3d56a0STony W Wang-oc } 18845a3d56a0STony W Wang-oc 188570f0c230STony W Wang-oc static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c) 188670f0c230STony W Wang-oc { 188770f0c230STony W Wang-oc intel_clear_lmce(); 188870f0c230STony W Wang-oc } 188970f0c230STony W Wang-oc 189021afaf18SBorislav Petkov static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) 189121afaf18SBorislav Petkov { 189221afaf18SBorislav Petkov switch (c->x86_vendor) { 189321afaf18SBorislav Petkov case X86_VENDOR_INTEL: 189421afaf18SBorislav Petkov mce_intel_feature_init(c); 189521afaf18SBorislav Petkov mce_adjust_timer = cmci_intel_adjust_timer; 189621afaf18SBorislav Petkov break; 189721afaf18SBorislav Petkov 189821afaf18SBorislav Petkov case X86_VENDOR_AMD: { 189921afaf18SBorislav Petkov mce_amd_feature_init(c); 190021afaf18SBorislav Petkov break; 190121afaf18SBorislav Petkov } 190221afaf18SBorislav Petkov 190321afaf18SBorislav Petkov case X86_VENDOR_HYGON: 190421afaf18SBorislav Petkov mce_hygon_feature_init(c); 190521afaf18SBorislav Petkov break; 190621afaf18SBorislav Petkov 190721afaf18SBorislav Petkov case X86_VENDOR_CENTAUR: 190821afaf18SBorislav Petkov mce_centaur_feature_init(c); 190921afaf18SBorislav Petkov break; 191021afaf18SBorislav Petkov 19115a3d56a0STony W Wang-oc case X86_VENDOR_ZHAOXIN: 19125a3d56a0STony W Wang-oc mce_zhaoxin_feature_init(c); 19135a3d56a0STony W Wang-oc break; 19145a3d56a0STony W Wang-oc 191521afaf18SBorislav Petkov default: 191621afaf18SBorislav Petkov break; 191721afaf18SBorislav Petkov } 191821afaf18SBorislav Petkov } 191921afaf18SBorislav Petkov 192021afaf18SBorislav Petkov static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) 192121afaf18SBorislav Petkov { 192221afaf18SBorislav Petkov switch (c->x86_vendor) { 192321afaf18SBorislav Petkov case X86_VENDOR_INTEL: 192421afaf18SBorislav Petkov mce_intel_feature_clear(c); 192521afaf18SBorislav Petkov break; 192670f0c230STony W Wang-oc 192770f0c230STony W Wang-oc case X86_VENDOR_ZHAOXIN: 192870f0c230STony W Wang-oc mce_zhaoxin_feature_clear(c); 192970f0c230STony W Wang-oc break; 193070f0c230STony W Wang-oc 193121afaf18SBorislav Petkov default: 193221afaf18SBorislav Petkov break; 193321afaf18SBorislav Petkov } 193421afaf18SBorislav Petkov } 193521afaf18SBorislav Petkov 193621afaf18SBorislav Petkov static void mce_start_timer(struct timer_list *t) 193721afaf18SBorislav Petkov { 193821afaf18SBorislav Petkov unsigned long iv = check_interval * HZ; 193921afaf18SBorislav Petkov 194021afaf18SBorislav Petkov if (mca_cfg.ignore_ce || !iv) 194121afaf18SBorislav Petkov return; 194221afaf18SBorislav Petkov 194321afaf18SBorislav Petkov this_cpu_write(mce_next_interval, iv); 194421afaf18SBorislav Petkov __start_timer(t, iv); 194521afaf18SBorislav Petkov } 194621afaf18SBorislav Petkov 194721afaf18SBorislav Petkov static void __mcheck_cpu_setup_timer(void) 194821afaf18SBorislav Petkov { 194921afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 195021afaf18SBorislav Petkov 195121afaf18SBorislav Petkov timer_setup(t, mce_timer_fn, TIMER_PINNED); 195221afaf18SBorislav Petkov } 195321afaf18SBorislav Petkov 195421afaf18SBorislav Petkov static void __mcheck_cpu_init_timer(void) 195521afaf18SBorislav Petkov { 195621afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 195721afaf18SBorislav Petkov 195821afaf18SBorislav Petkov timer_setup(t, mce_timer_fn, TIMER_PINNED); 195921afaf18SBorislav Petkov mce_start_timer(t); 196021afaf18SBorislav Petkov } 196121afaf18SBorislav Petkov 196245d4b7b9SYazen Ghannam bool filter_mce(struct mce *m) 196345d4b7b9SYazen Ghannam { 196471a84402SYazen Ghannam if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 196571a84402SYazen Ghannam return amd_filter_mce(m); 19662976908eSPrarit Bhargava if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 19672976908eSPrarit Bhargava return intel_filter_mce(m); 196871a84402SYazen Ghannam 196945d4b7b9SYazen Ghannam return false; 197045d4b7b9SYazen Ghannam } 197145d4b7b9SYazen Ghannam 197221afaf18SBorislav Petkov /* Handle unconfigured int18 (should never happen) */ 1973865d3a9aSThomas Gleixner static noinstr void unexpected_machine_check(struct pt_regs *regs) 197421afaf18SBorislav Petkov { 1975865d3a9aSThomas Gleixner instrumentation_begin(); 197621afaf18SBorislav Petkov pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", 197721afaf18SBorislav Petkov smp_processor_id()); 1978865d3a9aSThomas Gleixner instrumentation_end(); 197921afaf18SBorislav Petkov } 198021afaf18SBorislav Petkov 198121afaf18SBorislav Petkov /* Call the installed machine check handler for this CPU setup. */ 19828cd501c1SThomas Gleixner void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check; 198321afaf18SBorislav Petkov 19844c0dcd83SThomas Gleixner static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) 198521afaf18SBorislav Petkov { 1986bc21a291SThomas Gleixner bool irq_state; 1987bc21a291SThomas Gleixner 198813cbc0cdSAndy Lutomirski WARN_ON_ONCE(user_mode(regs)); 198913cbc0cdSAndy Lutomirski 19904c0dcd83SThomas Gleixner /* 19914c0dcd83SThomas Gleixner * Only required when from kernel mode. See 19924c0dcd83SThomas Gleixner * mce_check_crashing_cpu() for details. 19934c0dcd83SThomas Gleixner */ 199494a46d31SThomas Gleixner if (machine_check_vector == do_machine_check && 199594a46d31SThomas Gleixner mce_check_crashing_cpu()) 199694a46d31SThomas Gleixner return; 199794a46d31SThomas Gleixner 1998bc21a291SThomas Gleixner irq_state = idtentry_enter_nmi(regs); 1999865d3a9aSThomas Gleixner /* 2000865d3a9aSThomas Gleixner * The call targets are marked noinstr, but objtool can't figure 2001865d3a9aSThomas Gleixner * that out because it's an indirect call. Annotate it. 2002865d3a9aSThomas Gleixner */ 2003865d3a9aSThomas Gleixner instrumentation_begin(); 2004bf2b3008SPeter Zijlstra trace_hardirqs_off_finish(); 20058cd501c1SThomas Gleixner machine_check_vector(regs); 20063ffdfdceSThomas Gleixner if (regs->flags & X86_EFLAGS_IF) 20073ffdfdceSThomas Gleixner trace_hardirqs_on_prepare(); 2008865d3a9aSThomas Gleixner instrumentation_end(); 2009bc21a291SThomas Gleixner idtentry_exit_nmi(regs, irq_state); 201021afaf18SBorislav Petkov } 201121afaf18SBorislav Petkov 20124c0dcd83SThomas Gleixner static __always_inline void exc_machine_check_user(struct pt_regs *regs) 20134c0dcd83SThomas Gleixner { 2014517e4992SThomas Gleixner irqentry_enter_from_user_mode(regs); 2015865d3a9aSThomas Gleixner instrumentation_begin(); 20164c0dcd83SThomas Gleixner machine_check_vector(regs); 2017865d3a9aSThomas Gleixner instrumentation_end(); 2018517e4992SThomas Gleixner irqentry_exit_to_user_mode(regs); 20194c0dcd83SThomas Gleixner } 20204c0dcd83SThomas Gleixner 20214c0dcd83SThomas Gleixner #ifdef CONFIG_X86_64 20224c0dcd83SThomas Gleixner /* MCE hit kernel mode */ 20234c0dcd83SThomas Gleixner DEFINE_IDTENTRY_MCE(exc_machine_check) 20244c0dcd83SThomas Gleixner { 2025cd840e42SPeter Zijlstra unsigned long dr7; 2026cd840e42SPeter Zijlstra 2027cd840e42SPeter Zijlstra dr7 = local_db_save(); 20284c0dcd83SThomas Gleixner exc_machine_check_kernel(regs); 2029cd840e42SPeter Zijlstra local_db_restore(dr7); 20304c0dcd83SThomas Gleixner } 20314c0dcd83SThomas Gleixner 20324c0dcd83SThomas Gleixner /* The user mode variant. */ 20334c0dcd83SThomas Gleixner DEFINE_IDTENTRY_MCE_USER(exc_machine_check) 20344c0dcd83SThomas Gleixner { 2035cd840e42SPeter Zijlstra unsigned long dr7; 2036cd840e42SPeter Zijlstra 2037cd840e42SPeter Zijlstra dr7 = local_db_save(); 20384c0dcd83SThomas Gleixner exc_machine_check_user(regs); 2039cd840e42SPeter Zijlstra local_db_restore(dr7); 20404c0dcd83SThomas Gleixner } 20414c0dcd83SThomas Gleixner #else 20424c0dcd83SThomas Gleixner /* 32bit unified entry point */ 204313cbc0cdSAndy Lutomirski DEFINE_IDTENTRY_RAW(exc_machine_check) 20444c0dcd83SThomas Gleixner { 2045cd840e42SPeter Zijlstra unsigned long dr7; 2046cd840e42SPeter Zijlstra 2047cd840e42SPeter Zijlstra dr7 = local_db_save(); 20484c0dcd83SThomas Gleixner if (user_mode(regs)) 20494c0dcd83SThomas Gleixner exc_machine_check_user(regs); 20504c0dcd83SThomas Gleixner else 20514c0dcd83SThomas Gleixner exc_machine_check_kernel(regs); 2052cd840e42SPeter Zijlstra local_db_restore(dr7); 20534c0dcd83SThomas Gleixner } 20544c0dcd83SThomas Gleixner #endif 205521afaf18SBorislav Petkov 205621afaf18SBorislav Petkov /* 205721afaf18SBorislav Petkov * Called for each booted CPU to set up machine checks. 205821afaf18SBorislav Petkov * Must be called with preempt off: 205921afaf18SBorislav Petkov */ 206021afaf18SBorislav Petkov void mcheck_cpu_init(struct cpuinfo_x86 *c) 206121afaf18SBorislav Petkov { 206221afaf18SBorislav Petkov if (mca_cfg.disabled) 206321afaf18SBorislav Petkov return; 206421afaf18SBorislav Petkov 206521afaf18SBorislav Petkov if (__mcheck_cpu_ancient_init(c)) 206621afaf18SBorislav Petkov return; 206721afaf18SBorislav Petkov 206821afaf18SBorislav Petkov if (!mce_available(c)) 206921afaf18SBorislav Petkov return; 207021afaf18SBorislav Petkov 2071b4914508SYazen Ghannam __mcheck_cpu_cap_init(); 2072b4914508SYazen Ghannam 2073b4914508SYazen Ghannam if (__mcheck_cpu_apply_quirks(c) < 0) { 207421afaf18SBorislav Petkov mca_cfg.disabled = 1; 207521afaf18SBorislav Petkov return; 207621afaf18SBorislav Petkov } 207721afaf18SBorislav Petkov 207821afaf18SBorislav Petkov if (mce_gen_pool_init()) { 207921afaf18SBorislav Petkov mca_cfg.disabled = 1; 208021afaf18SBorislav Petkov pr_emerg("Couldn't allocate MCE records pool!\n"); 208121afaf18SBorislav Petkov return; 208221afaf18SBorislav Petkov } 208321afaf18SBorislav Petkov 208421afaf18SBorislav Petkov machine_check_vector = do_machine_check; 208521afaf18SBorislav Petkov 208621afaf18SBorislav Petkov __mcheck_cpu_init_early(c); 208721afaf18SBorislav Petkov __mcheck_cpu_init_generic(); 208821afaf18SBorislav Petkov __mcheck_cpu_init_vendor(c); 208921afaf18SBorislav Petkov __mcheck_cpu_init_clear_banks(); 2090068b053dSYazen Ghannam __mcheck_cpu_check_banks(); 209121afaf18SBorislav Petkov __mcheck_cpu_setup_timer(); 209221afaf18SBorislav Petkov } 209321afaf18SBorislav Petkov 209421afaf18SBorislav Petkov /* 209521afaf18SBorislav Petkov * Called for each booted CPU to clear some machine checks opt-ins 209621afaf18SBorislav Petkov */ 209721afaf18SBorislav Petkov void mcheck_cpu_clear(struct cpuinfo_x86 *c) 209821afaf18SBorislav Petkov { 209921afaf18SBorislav Petkov if (mca_cfg.disabled) 210021afaf18SBorislav Petkov return; 210121afaf18SBorislav Petkov 210221afaf18SBorislav Petkov if (!mce_available(c)) 210321afaf18SBorislav Petkov return; 210421afaf18SBorislav Petkov 210521afaf18SBorislav Petkov /* 210621afaf18SBorislav Petkov * Possibly to clear general settings generic to x86 210721afaf18SBorislav Petkov * __mcheck_cpu_clear_generic(c); 210821afaf18SBorislav Petkov */ 210921afaf18SBorislav Petkov __mcheck_cpu_clear_vendor(c); 211021afaf18SBorislav Petkov 211121afaf18SBorislav Petkov } 211221afaf18SBorislav Petkov 211321afaf18SBorislav Petkov static void __mce_disable_bank(void *arg) 211421afaf18SBorislav Petkov { 211521afaf18SBorislav Petkov int bank = *((int *)arg); 211621afaf18SBorislav Petkov __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); 211721afaf18SBorislav Petkov cmci_disable_bank(bank); 211821afaf18SBorislav Petkov } 211921afaf18SBorislav Petkov 212021afaf18SBorislav Petkov void mce_disable_bank(int bank) 212121afaf18SBorislav Petkov { 2122c7d314f3SYazen Ghannam if (bank >= this_cpu_read(mce_num_banks)) { 212321afaf18SBorislav Petkov pr_warn(FW_BUG 212421afaf18SBorislav Petkov "Ignoring request to disable invalid MCA bank %d.\n", 212521afaf18SBorislav Petkov bank); 212621afaf18SBorislav Petkov return; 212721afaf18SBorislav Petkov } 212821afaf18SBorislav Petkov set_bit(bank, mce_banks_ce_disabled); 212921afaf18SBorislav Petkov on_each_cpu(__mce_disable_bank, &bank, 1); 213021afaf18SBorislav Petkov } 213121afaf18SBorislav Petkov 213221afaf18SBorislav Petkov /* 213321afaf18SBorislav Petkov * mce=off Disables machine check 213421afaf18SBorislav Petkov * mce=no_cmci Disables CMCI 213521afaf18SBorislav Petkov * mce=no_lmce Disables LMCE 213621afaf18SBorislav Petkov * mce=dont_log_ce Clears corrected events silently, no log created for CEs. 213743505646STony Luck * mce=print_all Print all machine check logs to console 213821afaf18SBorislav Petkov * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. 213921afaf18SBorislav Petkov * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) 214021afaf18SBorislav Petkov * monarchtimeout is how long to wait for other CPUs on machine 214121afaf18SBorislav Petkov * check, or 0 to not wait 214221afaf18SBorislav Petkov * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h 214321afaf18SBorislav Petkov and older. 214421afaf18SBorislav Petkov * mce=nobootlog Don't log MCEs from before booting. 214521afaf18SBorislav Petkov * mce=bios_cmci_threshold Don't program the CMCI threshold 2146ec6347bbSDan Williams * mce=recovery force enable copy_mc_fragile() 214721afaf18SBorislav Petkov */ 214821afaf18SBorislav Petkov static int __init mcheck_enable(char *str) 214921afaf18SBorislav Petkov { 215021afaf18SBorislav Petkov struct mca_config *cfg = &mca_cfg; 215121afaf18SBorislav Petkov 215221afaf18SBorislav Petkov if (*str == 0) { 215321afaf18SBorislav Petkov enable_p5_mce(); 215421afaf18SBorislav Petkov return 1; 215521afaf18SBorislav Petkov } 215621afaf18SBorislav Petkov if (*str == '=') 215721afaf18SBorislav Petkov str++; 215821afaf18SBorislav Petkov if (!strcmp(str, "off")) 215921afaf18SBorislav Petkov cfg->disabled = 1; 216021afaf18SBorislav Petkov else if (!strcmp(str, "no_cmci")) 216121afaf18SBorislav Petkov cfg->cmci_disabled = true; 216221afaf18SBorislav Petkov else if (!strcmp(str, "no_lmce")) 216321afaf18SBorislav Petkov cfg->lmce_disabled = 1; 216421afaf18SBorislav Petkov else if (!strcmp(str, "dont_log_ce")) 216521afaf18SBorislav Petkov cfg->dont_log_ce = true; 216643505646STony Luck else if (!strcmp(str, "print_all")) 216743505646STony Luck cfg->print_all = true; 216821afaf18SBorislav Petkov else if (!strcmp(str, "ignore_ce")) 216921afaf18SBorislav Petkov cfg->ignore_ce = true; 217021afaf18SBorislav Petkov else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) 217121afaf18SBorislav Petkov cfg->bootlog = (str[0] == 'b'); 217221afaf18SBorislav Petkov else if (!strcmp(str, "bios_cmci_threshold")) 217321afaf18SBorislav Petkov cfg->bios_cmci_threshold = 1; 217421afaf18SBorislav Petkov else if (!strcmp(str, "recovery")) 217521afaf18SBorislav Petkov cfg->recovery = 1; 217621afaf18SBorislav Petkov else if (isdigit(str[0])) { 217721afaf18SBorislav Petkov if (get_option(&str, &cfg->tolerant) == 2) 217821afaf18SBorislav Petkov get_option(&str, &(cfg->monarch_timeout)); 217921afaf18SBorislav Petkov } else { 218021afaf18SBorislav Petkov pr_info("mce argument %s ignored. Please use /sys\n", str); 218121afaf18SBorislav Petkov return 0; 218221afaf18SBorislav Petkov } 218321afaf18SBorislav Petkov return 1; 218421afaf18SBorislav Petkov } 218521afaf18SBorislav Petkov __setup("mce", mcheck_enable); 218621afaf18SBorislav Petkov 218721afaf18SBorislav Petkov int __init mcheck_init(void) 218821afaf18SBorislav Petkov { 218921afaf18SBorislav Petkov mcheck_intel_therm_init(); 2190c9c6d216STony Luck mce_register_decode_chain(&early_nb); 21918438b84aSJan H. Schönherr mce_register_decode_chain(&mce_uc_nb); 219221afaf18SBorislav Petkov mce_register_decode_chain(&mce_default_nb); 219321afaf18SBorislav Petkov mcheck_vendor_init_severity(); 219421afaf18SBorislav Petkov 219521afaf18SBorislav Petkov INIT_WORK(&mce_work, mce_gen_pool_process); 219621afaf18SBorislav Petkov init_irq_work(&mce_irq_work, mce_irq_work_cb); 219721afaf18SBorislav Petkov 219821afaf18SBorislav Petkov return 0; 219921afaf18SBorislav Petkov } 220021afaf18SBorislav Petkov 220121afaf18SBorislav Petkov /* 220221afaf18SBorislav Petkov * mce_syscore: PM support 220321afaf18SBorislav Petkov */ 220421afaf18SBorislav Petkov 220521afaf18SBorislav Petkov /* 220621afaf18SBorislav Petkov * Disable machine checks on suspend and shutdown. We can't really handle 220721afaf18SBorislav Petkov * them later. 220821afaf18SBorislav Petkov */ 220921afaf18SBorislav Petkov static void mce_disable_error_reporting(void) 221021afaf18SBorislav Petkov { 2211b4914508SYazen Ghannam struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 221221afaf18SBorislav Petkov int i; 221321afaf18SBorislav Petkov 2214c7d314f3SYazen Ghannam for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 221521afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 221621afaf18SBorislav Petkov 221721afaf18SBorislav Petkov if (b->init) 221821afaf18SBorislav Petkov wrmsrl(msr_ops.ctl(i), 0); 221921afaf18SBorislav Petkov } 222021afaf18SBorislav Petkov return; 222121afaf18SBorislav Petkov } 222221afaf18SBorislav Petkov 222321afaf18SBorislav Petkov static void vendor_disable_error_reporting(void) 222421afaf18SBorislav Petkov { 222521afaf18SBorislav Petkov /* 22266e898d2bSTony W Wang-oc * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these 22276e898d2bSTony W Wang-oc * MSRs are socket-wide. Disabling them for just a single offlined CPU 22286e898d2bSTony W Wang-oc * is bad, since it will inhibit reporting for all shared resources on 22296e898d2bSTony W Wang-oc * the socket like the last level cache (LLC), the integrated memory 22306e898d2bSTony W Wang-oc * controller (iMC), etc. 223121afaf18SBorislav Petkov */ 223221afaf18SBorislav Petkov if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || 223321afaf18SBorislav Petkov boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || 22346e898d2bSTony W Wang-oc boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 22356e898d2bSTony W Wang-oc boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) 223621afaf18SBorislav Petkov return; 223721afaf18SBorislav Petkov 223821afaf18SBorislav Petkov mce_disable_error_reporting(); 223921afaf18SBorislav Petkov } 224021afaf18SBorislav Petkov 224121afaf18SBorislav Petkov static int mce_syscore_suspend(void) 224221afaf18SBorislav Petkov { 224321afaf18SBorislav Petkov vendor_disable_error_reporting(); 224421afaf18SBorislav Petkov return 0; 224521afaf18SBorislav Petkov } 224621afaf18SBorislav Petkov 224721afaf18SBorislav Petkov static void mce_syscore_shutdown(void) 224821afaf18SBorislav Petkov { 224921afaf18SBorislav Petkov vendor_disable_error_reporting(); 225021afaf18SBorislav Petkov } 225121afaf18SBorislav Petkov 225221afaf18SBorislav Petkov /* 225321afaf18SBorislav Petkov * On resume clear all MCE state. Don't want to see leftovers from the BIOS. 225421afaf18SBorislav Petkov * Only one CPU is active at this time, the others get re-added later using 225521afaf18SBorislav Petkov * CPU hotplug: 225621afaf18SBorislav Petkov */ 225721afaf18SBorislav Petkov static void mce_syscore_resume(void) 225821afaf18SBorislav Petkov { 225921afaf18SBorislav Petkov __mcheck_cpu_init_generic(); 226021afaf18SBorislav Petkov __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); 226121afaf18SBorislav Petkov __mcheck_cpu_init_clear_banks(); 226221afaf18SBorislav Petkov } 226321afaf18SBorislav Petkov 226421afaf18SBorislav Petkov static struct syscore_ops mce_syscore_ops = { 226521afaf18SBorislav Petkov .suspend = mce_syscore_suspend, 226621afaf18SBorislav Petkov .shutdown = mce_syscore_shutdown, 226721afaf18SBorislav Petkov .resume = mce_syscore_resume, 226821afaf18SBorislav Petkov }; 226921afaf18SBorislav Petkov 227021afaf18SBorislav Petkov /* 227121afaf18SBorislav Petkov * mce_device: Sysfs support 227221afaf18SBorislav Petkov */ 227321afaf18SBorislav Petkov 227421afaf18SBorislav Petkov static void mce_cpu_restart(void *data) 227521afaf18SBorislav Petkov { 227621afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 227721afaf18SBorislav Petkov return; 227821afaf18SBorislav Petkov __mcheck_cpu_init_generic(); 227921afaf18SBorislav Petkov __mcheck_cpu_init_clear_banks(); 228021afaf18SBorislav Petkov __mcheck_cpu_init_timer(); 228121afaf18SBorislav Petkov } 228221afaf18SBorislav Petkov 228321afaf18SBorislav Petkov /* Reinit MCEs after user configuration changes */ 228421afaf18SBorislav Petkov static void mce_restart(void) 228521afaf18SBorislav Petkov { 228621afaf18SBorislav Petkov mce_timer_delete_all(); 228721afaf18SBorislav Petkov on_each_cpu(mce_cpu_restart, NULL, 1); 228821afaf18SBorislav Petkov } 228921afaf18SBorislav Petkov 229021afaf18SBorislav Petkov /* Toggle features for corrected errors */ 229121afaf18SBorislav Petkov static void mce_disable_cmci(void *data) 229221afaf18SBorislav Petkov { 229321afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 229421afaf18SBorislav Petkov return; 229521afaf18SBorislav Petkov cmci_clear(); 229621afaf18SBorislav Petkov } 229721afaf18SBorislav Petkov 229821afaf18SBorislav Petkov static void mce_enable_ce(void *all) 229921afaf18SBorislav Petkov { 230021afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 230121afaf18SBorislav Petkov return; 230221afaf18SBorislav Petkov cmci_reenable(); 230321afaf18SBorislav Petkov cmci_recheck(); 230421afaf18SBorislav Petkov if (all) 230521afaf18SBorislav Petkov __mcheck_cpu_init_timer(); 230621afaf18SBorislav Petkov } 230721afaf18SBorislav Petkov 230821afaf18SBorislav Petkov static struct bus_type mce_subsys = { 230921afaf18SBorislav Petkov .name = "machinecheck", 231021afaf18SBorislav Petkov .dev_name = "machinecheck", 231121afaf18SBorislav Petkov }; 231221afaf18SBorislav Petkov 231321afaf18SBorislav Petkov DEFINE_PER_CPU(struct device *, mce_device); 231421afaf18SBorislav Petkov 2315b4914508SYazen Ghannam static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr) 231621afaf18SBorislav Petkov { 2317b4914508SYazen Ghannam return container_of(attr, struct mce_bank_dev, attr); 231821afaf18SBorislav Petkov } 231921afaf18SBorislav Petkov 232021afaf18SBorislav Petkov static ssize_t show_bank(struct device *s, struct device_attribute *attr, 232121afaf18SBorislav Petkov char *buf) 232221afaf18SBorislav Petkov { 2323b4914508SYazen Ghannam u8 bank = attr_to_bank(attr)->bank; 2324b4914508SYazen Ghannam struct mce_bank *b; 2325b4914508SYazen Ghannam 2326c7d314f3SYazen Ghannam if (bank >= per_cpu(mce_num_banks, s->id)) 2327b4914508SYazen Ghannam return -EINVAL; 2328b4914508SYazen Ghannam 2329b4914508SYazen Ghannam b = &per_cpu(mce_banks_array, s->id)[bank]; 2330b4914508SYazen Ghannam 2331068b053dSYazen Ghannam if (!b->init) 2332068b053dSYazen Ghannam return -ENODEV; 2333068b053dSYazen Ghannam 2334b4914508SYazen Ghannam return sprintf(buf, "%llx\n", b->ctl); 233521afaf18SBorislav Petkov } 233621afaf18SBorislav Petkov 233721afaf18SBorislav Petkov static ssize_t set_bank(struct device *s, struct device_attribute *attr, 233821afaf18SBorislav Petkov const char *buf, size_t size) 233921afaf18SBorislav Petkov { 2340b4914508SYazen Ghannam u8 bank = attr_to_bank(attr)->bank; 2341b4914508SYazen Ghannam struct mce_bank *b; 234221afaf18SBorislav Petkov u64 new; 234321afaf18SBorislav Petkov 234421afaf18SBorislav Petkov if (kstrtou64(buf, 0, &new) < 0) 234521afaf18SBorislav Petkov return -EINVAL; 234621afaf18SBorislav Petkov 2347c7d314f3SYazen Ghannam if (bank >= per_cpu(mce_num_banks, s->id)) 2348b4914508SYazen Ghannam return -EINVAL; 2349b4914508SYazen Ghannam 2350b4914508SYazen Ghannam b = &per_cpu(mce_banks_array, s->id)[bank]; 2351b4914508SYazen Ghannam 2352068b053dSYazen Ghannam if (!b->init) 2353068b053dSYazen Ghannam return -ENODEV; 2354068b053dSYazen Ghannam 2355b4914508SYazen Ghannam b->ctl = new; 235621afaf18SBorislav Petkov mce_restart(); 235721afaf18SBorislav Petkov 235821afaf18SBorislav Petkov return size; 235921afaf18SBorislav Petkov } 236021afaf18SBorislav Petkov 236121afaf18SBorislav Petkov static ssize_t set_ignore_ce(struct device *s, 236221afaf18SBorislav Petkov struct device_attribute *attr, 236321afaf18SBorislav Petkov const char *buf, size_t size) 236421afaf18SBorislav Petkov { 236521afaf18SBorislav Petkov u64 new; 236621afaf18SBorislav Petkov 236721afaf18SBorislav Petkov if (kstrtou64(buf, 0, &new) < 0) 236821afaf18SBorislav Petkov return -EINVAL; 236921afaf18SBorislav Petkov 237021afaf18SBorislav Petkov mutex_lock(&mce_sysfs_mutex); 237121afaf18SBorislav Petkov if (mca_cfg.ignore_ce ^ !!new) { 237221afaf18SBorislav Petkov if (new) { 237321afaf18SBorislav Petkov /* disable ce features */ 237421afaf18SBorislav Petkov mce_timer_delete_all(); 237521afaf18SBorislav Petkov on_each_cpu(mce_disable_cmci, NULL, 1); 237621afaf18SBorislav Petkov mca_cfg.ignore_ce = true; 237721afaf18SBorislav Petkov } else { 237821afaf18SBorislav Petkov /* enable ce features */ 237921afaf18SBorislav Petkov mca_cfg.ignore_ce = false; 238021afaf18SBorislav Petkov on_each_cpu(mce_enable_ce, (void *)1, 1); 238121afaf18SBorislav Petkov } 238221afaf18SBorislav Petkov } 238321afaf18SBorislav Petkov mutex_unlock(&mce_sysfs_mutex); 238421afaf18SBorislav Petkov 238521afaf18SBorislav Petkov return size; 238621afaf18SBorislav Petkov } 238721afaf18SBorislav Petkov 238821afaf18SBorislav Petkov static ssize_t set_cmci_disabled(struct device *s, 238921afaf18SBorislav Petkov struct device_attribute *attr, 239021afaf18SBorislav Petkov const char *buf, size_t size) 239121afaf18SBorislav Petkov { 239221afaf18SBorislav Petkov u64 new; 239321afaf18SBorislav Petkov 239421afaf18SBorislav Petkov if (kstrtou64(buf, 0, &new) < 0) 239521afaf18SBorislav Petkov return -EINVAL; 239621afaf18SBorislav Petkov 239721afaf18SBorislav Petkov mutex_lock(&mce_sysfs_mutex); 239821afaf18SBorislav Petkov if (mca_cfg.cmci_disabled ^ !!new) { 239921afaf18SBorislav Petkov if (new) { 240021afaf18SBorislav Petkov /* disable cmci */ 240121afaf18SBorislav Petkov on_each_cpu(mce_disable_cmci, NULL, 1); 240221afaf18SBorislav Petkov mca_cfg.cmci_disabled = true; 240321afaf18SBorislav Petkov } else { 240421afaf18SBorislav Petkov /* enable cmci */ 240521afaf18SBorislav Petkov mca_cfg.cmci_disabled = false; 240621afaf18SBorislav Petkov on_each_cpu(mce_enable_ce, NULL, 1); 240721afaf18SBorislav Petkov } 240821afaf18SBorislav Petkov } 240921afaf18SBorislav Petkov mutex_unlock(&mce_sysfs_mutex); 241021afaf18SBorislav Petkov 241121afaf18SBorislav Petkov return size; 241221afaf18SBorislav Petkov } 241321afaf18SBorislav Petkov 241421afaf18SBorislav Petkov static ssize_t store_int_with_restart(struct device *s, 241521afaf18SBorislav Petkov struct device_attribute *attr, 241621afaf18SBorislav Petkov const char *buf, size_t size) 241721afaf18SBorislav Petkov { 241821afaf18SBorislav Petkov unsigned long old_check_interval = check_interval; 241921afaf18SBorislav Petkov ssize_t ret = device_store_ulong(s, attr, buf, size); 242021afaf18SBorislav Petkov 242121afaf18SBorislav Petkov if (check_interval == old_check_interval) 242221afaf18SBorislav Petkov return ret; 242321afaf18SBorislav Petkov 242421afaf18SBorislav Petkov mutex_lock(&mce_sysfs_mutex); 242521afaf18SBorislav Petkov mce_restart(); 242621afaf18SBorislav Petkov mutex_unlock(&mce_sysfs_mutex); 242721afaf18SBorislav Petkov 242821afaf18SBorislav Petkov return ret; 242921afaf18SBorislav Petkov } 243021afaf18SBorislav Petkov 243121afaf18SBorislav Petkov static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); 243221afaf18SBorislav Petkov static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); 243321afaf18SBorislav Petkov static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); 243443505646STony Luck static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all); 243521afaf18SBorislav Petkov 243621afaf18SBorislav Petkov static struct dev_ext_attribute dev_attr_check_interval = { 243721afaf18SBorislav Petkov __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), 243821afaf18SBorislav Petkov &check_interval 243921afaf18SBorislav Petkov }; 244021afaf18SBorislav Petkov 244121afaf18SBorislav Petkov static struct dev_ext_attribute dev_attr_ignore_ce = { 244221afaf18SBorislav Petkov __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), 244321afaf18SBorislav Petkov &mca_cfg.ignore_ce 244421afaf18SBorislav Petkov }; 244521afaf18SBorislav Petkov 244621afaf18SBorislav Petkov static struct dev_ext_attribute dev_attr_cmci_disabled = { 244721afaf18SBorislav Petkov __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), 244821afaf18SBorislav Petkov &mca_cfg.cmci_disabled 244921afaf18SBorislav Petkov }; 245021afaf18SBorislav Petkov 245121afaf18SBorislav Petkov static struct device_attribute *mce_device_attrs[] = { 245221afaf18SBorislav Petkov &dev_attr_tolerant.attr, 245321afaf18SBorislav Petkov &dev_attr_check_interval.attr, 245421afaf18SBorislav Petkov #ifdef CONFIG_X86_MCELOG_LEGACY 245521afaf18SBorislav Petkov &dev_attr_trigger, 245621afaf18SBorislav Petkov #endif 245721afaf18SBorislav Petkov &dev_attr_monarch_timeout.attr, 245821afaf18SBorislav Petkov &dev_attr_dont_log_ce.attr, 245943505646STony Luck &dev_attr_print_all.attr, 246021afaf18SBorislav Petkov &dev_attr_ignore_ce.attr, 246121afaf18SBorislav Petkov &dev_attr_cmci_disabled.attr, 246221afaf18SBorislav Petkov NULL 246321afaf18SBorislav Petkov }; 246421afaf18SBorislav Petkov 246521afaf18SBorislav Petkov static cpumask_var_t mce_device_initialized; 246621afaf18SBorislav Petkov 246721afaf18SBorislav Petkov static void mce_device_release(struct device *dev) 246821afaf18SBorislav Petkov { 246921afaf18SBorislav Petkov kfree(dev); 247021afaf18SBorislav Petkov } 247121afaf18SBorislav Petkov 2472b4914508SYazen Ghannam /* Per CPU device init. All of the CPUs still share the same bank device: */ 247321afaf18SBorislav Petkov static int mce_device_create(unsigned int cpu) 247421afaf18SBorislav Petkov { 247521afaf18SBorislav Petkov struct device *dev; 247621afaf18SBorislav Petkov int err; 247721afaf18SBorislav Petkov int i, j; 247821afaf18SBorislav Petkov 247921afaf18SBorislav Petkov if (!mce_available(&boot_cpu_data)) 248021afaf18SBorislav Petkov return -EIO; 248121afaf18SBorislav Petkov 248221afaf18SBorislav Petkov dev = per_cpu(mce_device, cpu); 248321afaf18SBorislav Petkov if (dev) 248421afaf18SBorislav Petkov return 0; 248521afaf18SBorislav Petkov 248621afaf18SBorislav Petkov dev = kzalloc(sizeof(*dev), GFP_KERNEL); 248721afaf18SBorislav Petkov if (!dev) 248821afaf18SBorislav Petkov return -ENOMEM; 248921afaf18SBorislav Petkov dev->id = cpu; 249021afaf18SBorislav Petkov dev->bus = &mce_subsys; 249121afaf18SBorislav Petkov dev->release = &mce_device_release; 249221afaf18SBorislav Petkov 249321afaf18SBorislav Petkov err = device_register(dev); 249421afaf18SBorislav Petkov if (err) { 249521afaf18SBorislav Petkov put_device(dev); 249621afaf18SBorislav Petkov return err; 249721afaf18SBorislav Petkov } 249821afaf18SBorislav Petkov 249921afaf18SBorislav Petkov for (i = 0; mce_device_attrs[i]; i++) { 250021afaf18SBorislav Petkov err = device_create_file(dev, mce_device_attrs[i]); 250121afaf18SBorislav Petkov if (err) 250221afaf18SBorislav Petkov goto error; 250321afaf18SBorislav Petkov } 2504c7d314f3SYazen Ghannam for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) { 2505b4914508SYazen Ghannam err = device_create_file(dev, &mce_bank_devs[j].attr); 250621afaf18SBorislav Petkov if (err) 250721afaf18SBorislav Petkov goto error2; 250821afaf18SBorislav Petkov } 250921afaf18SBorislav Petkov cpumask_set_cpu(cpu, mce_device_initialized); 251021afaf18SBorislav Petkov per_cpu(mce_device, cpu) = dev; 251121afaf18SBorislav Petkov 251221afaf18SBorislav Petkov return 0; 251321afaf18SBorislav Petkov error2: 251421afaf18SBorislav Petkov while (--j >= 0) 2515b4914508SYazen Ghannam device_remove_file(dev, &mce_bank_devs[j].attr); 251621afaf18SBorislav Petkov error: 251721afaf18SBorislav Petkov while (--i >= 0) 251821afaf18SBorislav Petkov device_remove_file(dev, mce_device_attrs[i]); 251921afaf18SBorislav Petkov 252021afaf18SBorislav Petkov device_unregister(dev); 252121afaf18SBorislav Petkov 252221afaf18SBorislav Petkov return err; 252321afaf18SBorislav Petkov } 252421afaf18SBorislav Petkov 252521afaf18SBorislav Petkov static void mce_device_remove(unsigned int cpu) 252621afaf18SBorislav Petkov { 252721afaf18SBorislav Petkov struct device *dev = per_cpu(mce_device, cpu); 252821afaf18SBorislav Petkov int i; 252921afaf18SBorislav Petkov 253021afaf18SBorislav Petkov if (!cpumask_test_cpu(cpu, mce_device_initialized)) 253121afaf18SBorislav Petkov return; 253221afaf18SBorislav Petkov 253321afaf18SBorislav Petkov for (i = 0; mce_device_attrs[i]; i++) 253421afaf18SBorislav Petkov device_remove_file(dev, mce_device_attrs[i]); 253521afaf18SBorislav Petkov 2536c7d314f3SYazen Ghannam for (i = 0; i < per_cpu(mce_num_banks, cpu); i++) 2537b4914508SYazen Ghannam device_remove_file(dev, &mce_bank_devs[i].attr); 253821afaf18SBorislav Petkov 253921afaf18SBorislav Petkov device_unregister(dev); 254021afaf18SBorislav Petkov cpumask_clear_cpu(cpu, mce_device_initialized); 254121afaf18SBorislav Petkov per_cpu(mce_device, cpu) = NULL; 254221afaf18SBorislav Petkov } 254321afaf18SBorislav Petkov 254421afaf18SBorislav Petkov /* Make sure there are no machine checks on offlined CPUs. */ 254521afaf18SBorislav Petkov static void mce_disable_cpu(void) 254621afaf18SBorislav Petkov { 254721afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 254821afaf18SBorislav Petkov return; 254921afaf18SBorislav Petkov 255021afaf18SBorislav Petkov if (!cpuhp_tasks_frozen) 255121afaf18SBorislav Petkov cmci_clear(); 255221afaf18SBorislav Petkov 255321afaf18SBorislav Petkov vendor_disable_error_reporting(); 255421afaf18SBorislav Petkov } 255521afaf18SBorislav Petkov 255621afaf18SBorislav Petkov static void mce_reenable_cpu(void) 255721afaf18SBorislav Petkov { 2558b4914508SYazen Ghannam struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 255921afaf18SBorislav Petkov int i; 256021afaf18SBorislav Petkov 256121afaf18SBorislav Petkov if (!mce_available(raw_cpu_ptr(&cpu_info))) 256221afaf18SBorislav Petkov return; 256321afaf18SBorislav Petkov 256421afaf18SBorislav Petkov if (!cpuhp_tasks_frozen) 256521afaf18SBorislav Petkov cmci_reenable(); 2566c7d314f3SYazen Ghannam for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 256721afaf18SBorislav Petkov struct mce_bank *b = &mce_banks[i]; 256821afaf18SBorislav Petkov 256921afaf18SBorislav Petkov if (b->init) 257021afaf18SBorislav Petkov wrmsrl(msr_ops.ctl(i), b->ctl); 257121afaf18SBorislav Petkov } 257221afaf18SBorislav Petkov } 257321afaf18SBorislav Petkov 257421afaf18SBorislav Petkov static int mce_cpu_dead(unsigned int cpu) 257521afaf18SBorislav Petkov { 257621afaf18SBorislav Petkov mce_intel_hcpu_update(cpu); 257721afaf18SBorislav Petkov 257821afaf18SBorislav Petkov /* intentionally ignoring frozen here */ 257921afaf18SBorislav Petkov if (!cpuhp_tasks_frozen) 258021afaf18SBorislav Petkov cmci_rediscover(); 258121afaf18SBorislav Petkov return 0; 258221afaf18SBorislav Petkov } 258321afaf18SBorislav Petkov 258421afaf18SBorislav Petkov static int mce_cpu_online(unsigned int cpu) 258521afaf18SBorislav Petkov { 258621afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 258721afaf18SBorislav Petkov int ret; 258821afaf18SBorislav Petkov 258921afaf18SBorislav Petkov mce_device_create(cpu); 259021afaf18SBorislav Petkov 259121afaf18SBorislav Petkov ret = mce_threshold_create_device(cpu); 259221afaf18SBorislav Petkov if (ret) { 259321afaf18SBorislav Petkov mce_device_remove(cpu); 259421afaf18SBorislav Petkov return ret; 259521afaf18SBorislav Petkov } 259621afaf18SBorislav Petkov mce_reenable_cpu(); 259721afaf18SBorislav Petkov mce_start_timer(t); 259821afaf18SBorislav Petkov return 0; 259921afaf18SBorislav Petkov } 260021afaf18SBorislav Petkov 260121afaf18SBorislav Petkov static int mce_cpu_pre_down(unsigned int cpu) 260221afaf18SBorislav Petkov { 260321afaf18SBorislav Petkov struct timer_list *t = this_cpu_ptr(&mce_timer); 260421afaf18SBorislav Petkov 260521afaf18SBorislav Petkov mce_disable_cpu(); 260621afaf18SBorislav Petkov del_timer_sync(t); 260721afaf18SBorislav Petkov mce_threshold_remove_device(cpu); 260821afaf18SBorislav Petkov mce_device_remove(cpu); 260921afaf18SBorislav Petkov return 0; 261021afaf18SBorislav Petkov } 261121afaf18SBorislav Petkov 261221afaf18SBorislav Petkov static __init void mce_init_banks(void) 261321afaf18SBorislav Petkov { 261421afaf18SBorislav Petkov int i; 261521afaf18SBorislav Petkov 2616b4914508SYazen Ghannam for (i = 0; i < MAX_NR_BANKS; i++) { 2617b4914508SYazen Ghannam struct mce_bank_dev *b = &mce_bank_devs[i]; 261821afaf18SBorislav Petkov struct device_attribute *a = &b->attr; 261921afaf18SBorislav Petkov 2620b4914508SYazen Ghannam b->bank = i; 2621b4914508SYazen Ghannam 262221afaf18SBorislav Petkov sysfs_attr_init(&a->attr); 262321afaf18SBorislav Petkov a->attr.name = b->attrname; 262421afaf18SBorislav Petkov snprintf(b->attrname, ATTR_LEN, "bank%d", i); 262521afaf18SBorislav Petkov 262621afaf18SBorislav Petkov a->attr.mode = 0644; 262721afaf18SBorislav Petkov a->show = show_bank; 262821afaf18SBorislav Petkov a->store = set_bank; 262921afaf18SBorislav Petkov } 263021afaf18SBorislav Petkov } 263121afaf18SBorislav Petkov 26326e7a41c6SThomas Gleixner /* 26336e7a41c6SThomas Gleixner * When running on XEN, this initcall is ordered against the XEN mcelog 26346e7a41c6SThomas Gleixner * initcall: 26356e7a41c6SThomas Gleixner * 26366e7a41c6SThomas Gleixner * device_initcall(xen_late_init_mcelog); 26376e7a41c6SThomas Gleixner * device_initcall_sync(mcheck_init_device); 26386e7a41c6SThomas Gleixner */ 263921afaf18SBorislav Petkov static __init int mcheck_init_device(void) 264021afaf18SBorislav Petkov { 264121afaf18SBorislav Petkov int err; 264221afaf18SBorislav Petkov 264321afaf18SBorislav Petkov /* 264421afaf18SBorislav Petkov * Check if we have a spare virtual bit. This will only become 264521afaf18SBorislav Petkov * a problem if/when we move beyond 5-level page tables. 264621afaf18SBorislav Petkov */ 264721afaf18SBorislav Petkov MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63); 264821afaf18SBorislav Petkov 264921afaf18SBorislav Petkov if (!mce_available(&boot_cpu_data)) { 265021afaf18SBorislav Petkov err = -EIO; 265121afaf18SBorislav Petkov goto err_out; 265221afaf18SBorislav Petkov } 265321afaf18SBorislav Petkov 265421afaf18SBorislav Petkov if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { 265521afaf18SBorislav Petkov err = -ENOMEM; 265621afaf18SBorislav Petkov goto err_out; 265721afaf18SBorislav Petkov } 265821afaf18SBorislav Petkov 265921afaf18SBorislav Petkov mce_init_banks(); 266021afaf18SBorislav Petkov 266121afaf18SBorislav Petkov err = subsys_system_register(&mce_subsys, NULL); 266221afaf18SBorislav Petkov if (err) 266321afaf18SBorislav Petkov goto err_out_mem; 266421afaf18SBorislav Petkov 266521afaf18SBorislav Petkov err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL, 266621afaf18SBorislav Petkov mce_cpu_dead); 266721afaf18SBorislav Petkov if (err) 266821afaf18SBorislav Petkov goto err_out_mem; 266921afaf18SBorislav Petkov 26706e7a41c6SThomas Gleixner /* 26716e7a41c6SThomas Gleixner * Invokes mce_cpu_online() on all CPUs which are online when 26726e7a41c6SThomas Gleixner * the state is installed. 26736e7a41c6SThomas Gleixner */ 267421afaf18SBorislav Petkov err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online", 267521afaf18SBorislav Petkov mce_cpu_online, mce_cpu_pre_down); 267621afaf18SBorislav Petkov if (err < 0) 267721afaf18SBorislav Petkov goto err_out_online; 267821afaf18SBorislav Petkov 267921afaf18SBorislav Petkov register_syscore_ops(&mce_syscore_ops); 268021afaf18SBorislav Petkov 268121afaf18SBorislav Petkov return 0; 268221afaf18SBorislav Petkov 268321afaf18SBorislav Petkov err_out_online: 268421afaf18SBorislav Petkov cpuhp_remove_state(CPUHP_X86_MCE_DEAD); 268521afaf18SBorislav Petkov 268621afaf18SBorislav Petkov err_out_mem: 268721afaf18SBorislav Petkov free_cpumask_var(mce_device_initialized); 268821afaf18SBorislav Petkov 268921afaf18SBorislav Petkov err_out: 269021afaf18SBorislav Petkov pr_err("Unable to init MCE device (rc: %d)\n", err); 269121afaf18SBorislav Petkov 269221afaf18SBorislav Petkov return err; 269321afaf18SBorislav Petkov } 269421afaf18SBorislav Petkov device_initcall_sync(mcheck_init_device); 269521afaf18SBorislav Petkov 269621afaf18SBorislav Petkov /* 269721afaf18SBorislav Petkov * Old style boot options parsing. Only for compatibility. 269821afaf18SBorislav Petkov */ 269921afaf18SBorislav Petkov static int __init mcheck_disable(char *str) 270021afaf18SBorislav Petkov { 270121afaf18SBorislav Petkov mca_cfg.disabled = 1; 270221afaf18SBorislav Petkov return 1; 270321afaf18SBorislav Petkov } 270421afaf18SBorislav Petkov __setup("nomce", mcheck_disable); 270521afaf18SBorislav Petkov 270621afaf18SBorislav Petkov #ifdef CONFIG_DEBUG_FS 270721afaf18SBorislav Petkov struct dentry *mce_get_debugfs_dir(void) 270821afaf18SBorislav Petkov { 270921afaf18SBorislav Petkov static struct dentry *dmce; 271021afaf18SBorislav Petkov 271121afaf18SBorislav Petkov if (!dmce) 271221afaf18SBorislav Petkov dmce = debugfs_create_dir("mce", NULL); 271321afaf18SBorislav Petkov 271421afaf18SBorislav Petkov return dmce; 271521afaf18SBorislav Petkov } 271621afaf18SBorislav Petkov 271721afaf18SBorislav Petkov static void mce_reset(void) 271821afaf18SBorislav Petkov { 271921afaf18SBorislav Petkov cpu_missing = 0; 272021afaf18SBorislav Petkov atomic_set(&mce_fake_panicked, 0); 272121afaf18SBorislav Petkov atomic_set(&mce_executing, 0); 272221afaf18SBorislav Petkov atomic_set(&mce_callin, 0); 272321afaf18SBorislav Petkov atomic_set(&global_nwo, 0); 272421afaf18SBorislav Petkov } 272521afaf18SBorislav Petkov 272621afaf18SBorislav Petkov static int fake_panic_get(void *data, u64 *val) 272721afaf18SBorislav Petkov { 272821afaf18SBorislav Petkov *val = fake_panic; 272921afaf18SBorislav Petkov return 0; 273021afaf18SBorislav Petkov } 273121afaf18SBorislav Petkov 273221afaf18SBorislav Petkov static int fake_panic_set(void *data, u64 val) 273321afaf18SBorislav Petkov { 273421afaf18SBorislav Petkov mce_reset(); 273521afaf18SBorislav Petkov fake_panic = val; 273621afaf18SBorislav Petkov return 0; 273721afaf18SBorislav Petkov } 273821afaf18SBorislav Petkov 273928156d76SYueHaibing DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set, 274028156d76SYueHaibing "%llu\n"); 274121afaf18SBorislav Petkov 27426e4f929eSGreg Kroah-Hartman static void __init mcheck_debugfs_init(void) 274321afaf18SBorislav Petkov { 27446e4f929eSGreg Kroah-Hartman struct dentry *dmce; 274521afaf18SBorislav Petkov 274621afaf18SBorislav Petkov dmce = mce_get_debugfs_dir(); 27476e4f929eSGreg Kroah-Hartman debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL, 27486e4f929eSGreg Kroah-Hartman &fake_panic_fops); 274921afaf18SBorislav Petkov } 275021afaf18SBorislav Petkov #else 27516e4f929eSGreg Kroah-Hartman static void __init mcheck_debugfs_init(void) { } 275221afaf18SBorislav Petkov #endif 275321afaf18SBorislav Petkov 275421afaf18SBorislav Petkov static int __init mcheck_late_init(void) 275521afaf18SBorislav Petkov { 275621afaf18SBorislav Petkov if (mca_cfg.recovery) 2757ec6347bbSDan Williams enable_copy_mc_fragile(); 275821afaf18SBorislav Petkov 275921afaf18SBorislav Petkov mcheck_debugfs_init(); 276021afaf18SBorislav Petkov 276121afaf18SBorislav Petkov /* 276221afaf18SBorislav Petkov * Flush out everything that has been logged during early boot, now that 276321afaf18SBorislav Petkov * everything has been initialized (workqueues, decoders, ...). 276421afaf18SBorislav Petkov */ 276521afaf18SBorislav Petkov mce_schedule_work(); 276621afaf18SBorislav Petkov 276721afaf18SBorislav Petkov return 0; 276821afaf18SBorislav Petkov } 276921afaf18SBorislav Petkov late_initcall(mcheck_late_init); 2770