1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Machine check handler. 4 * 5 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. 6 * Rest from unknown author(s). 7 * 2004 Andi Kleen. Rewrote most of it. 8 * Copyright 2008 Intel Corporation 9 * Author: Andi Kleen 10 */ 11 12 #include <linux/thread_info.h> 13 #include <linux/capability.h> 14 #include <linux/miscdevice.h> 15 #include <linux/ratelimit.h> 16 #include <linux/rcupdate.h> 17 #include <linux/kobject.h> 18 #include <linux/uaccess.h> 19 #include <linux/kdebug.h> 20 #include <linux/kernel.h> 21 #include <linux/percpu.h> 22 #include <linux/string.h> 23 #include <linux/device.h> 24 #include <linux/syscore_ops.h> 25 #include <linux/delay.h> 26 #include <linux/ctype.h> 27 #include <linux/sched.h> 28 #include <linux/sysfs.h> 29 #include <linux/types.h> 30 #include <linux/slab.h> 31 #include <linux/init.h> 32 #include <linux/kmod.h> 33 #include <linux/poll.h> 34 #include <linux/nmi.h> 35 #include <linux/cpu.h> 36 #include <linux/ras.h> 37 #include <linux/smp.h> 38 #include <linux/fs.h> 39 #include <linux/mm.h> 40 #include <linux/debugfs.h> 41 #include <linux/irq_work.h> 42 #include <linux/export.h> 43 #include <linux/set_memory.h> 44 #include <linux/sync_core.h> 45 #include <linux/task_work.h> 46 #include <linux/hardirq.h> 47 48 #include <asm/intel-family.h> 49 #include <asm/processor.h> 50 #include <asm/traps.h> 51 #include <asm/tlbflush.h> 52 #include <asm/mce.h> 53 #include <asm/msr.h> 54 #include <asm/reboot.h> 55 56 #include "internal.h" 57 58 /* sysfs synchronization */ 59 static DEFINE_MUTEX(mce_sysfs_mutex); 60 61 #define CREATE_TRACE_POINTS 62 #include <trace/events/mce.h> 63 64 #define SPINUNIT 100 /* 100ns */ 65 66 DEFINE_PER_CPU(unsigned, mce_exception_count); 67 68 DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks); 69 70 struct mce_bank { 71 u64 ctl; /* subevents to enable */ 72 bool init; /* initialise bank? */ 73 }; 74 static DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array); 75 76 #define ATTR_LEN 16 77 /* One object for each MCE bank, shared by all CPUs */ 78 struct mce_bank_dev { 79 struct device_attribute attr; /* device attribute */ 80 char attrname[ATTR_LEN]; /* attribute name */ 81 u8 bank; /* bank number */ 82 }; 83 static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS]; 84 85 struct mce_vendor_flags mce_flags __read_mostly; 86 87 struct mca_config mca_cfg __read_mostly = { 88 .bootlog = -1, 89 /* 90 * Tolerant levels: 91 * 0: always panic on uncorrected errors, log corrected errors 92 * 1: panic or SIGBUS on uncorrected errors, log corrected errors 93 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors 94 * 3: never panic or SIGBUS, log all errors (for testing only) 95 */ 96 .tolerant = 1, 97 .monarch_timeout = -1 98 }; 99 100 static DEFINE_PER_CPU(struct mce, mces_seen); 101 static unsigned long mce_need_notify; 102 103 /* 104 * MCA banks polled by the period polling timer for corrected events. 105 * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). 106 */ 107 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { 108 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL 109 }; 110 111 /* 112 * MCA banks controlled through firmware first for corrected errors. 113 * This is a global list of banks for which we won't enable CMCI and we 114 * won't poll. Firmware controls these banks and is responsible for 115 * reporting corrected errors through GHES. Uncorrected/recoverable 116 * errors are still notified through a machine check. 117 */ 118 mce_banks_t mce_banks_ce_disabled; 119 120 static struct work_struct mce_work; 121 static struct irq_work mce_irq_work; 122 123 /* 124 * CPU/chipset specific EDAC code can register a notifier call here to print 125 * MCE errors in a human-readable form. 126 */ 127 BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain); 128 129 /* Do initial initialization of a struct mce */ 130 void mce_setup(struct mce *m) 131 { 132 memset(m, 0, sizeof(struct mce)); 133 m->cpu = m->extcpu = smp_processor_id(); 134 /* need the internal __ version to avoid deadlocks */ 135 m->time = __ktime_get_real_seconds(); 136 m->cpuvendor = boot_cpu_data.x86_vendor; 137 m->cpuid = cpuid_eax(1); 138 m->socketid = cpu_data(m->extcpu).phys_proc_id; 139 m->apicid = cpu_data(m->extcpu).initial_apicid; 140 m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP); 141 142 if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) 143 m->ppin = __rdmsr(MSR_PPIN); 144 else if (this_cpu_has(X86_FEATURE_AMD_PPIN)) 145 m->ppin = __rdmsr(MSR_AMD_PPIN); 146 147 m->microcode = boot_cpu_data.microcode; 148 } 149 150 DEFINE_PER_CPU(struct mce, injectm); 151 EXPORT_PER_CPU_SYMBOL_GPL(injectm); 152 153 void mce_log(struct mce *m) 154 { 155 if (!mce_gen_pool_add(m)) 156 irq_work_queue(&mce_irq_work); 157 } 158 EXPORT_SYMBOL_GPL(mce_log); 159 160 void mce_register_decode_chain(struct notifier_block *nb) 161 { 162 if (WARN_ON(nb->priority < MCE_PRIO_LOWEST || 163 nb->priority > MCE_PRIO_HIGHEST)) 164 return; 165 166 blocking_notifier_chain_register(&x86_mce_decoder_chain, nb); 167 } 168 EXPORT_SYMBOL_GPL(mce_register_decode_chain); 169 170 void mce_unregister_decode_chain(struct notifier_block *nb) 171 { 172 blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb); 173 } 174 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain); 175 176 u32 mca_msr_reg(int bank, enum mca_msr reg) 177 { 178 if (mce_flags.smca) { 179 switch (reg) { 180 case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); 181 case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); 182 case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); 183 case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank); 184 } 185 } 186 187 switch (reg) { 188 case MCA_CTL: return MSR_IA32_MCx_CTL(bank); 189 case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); 190 case MCA_MISC: return MSR_IA32_MCx_MISC(bank); 191 case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank); 192 } 193 194 return 0; 195 } 196 197 static void __print_mce(struct mce *m) 198 { 199 pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n", 200 m->extcpu, 201 (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""), 202 m->mcgstatus, m->bank, m->status); 203 204 if (m->ip) { 205 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ", 206 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", 207 m->cs, m->ip); 208 209 if (m->cs == __KERNEL_CS) 210 pr_cont("{%pS}", (void *)(unsigned long)m->ip); 211 pr_cont("\n"); 212 } 213 214 pr_emerg(HW_ERR "TSC %llx ", m->tsc); 215 if (m->addr) 216 pr_cont("ADDR %llx ", m->addr); 217 if (m->misc) 218 pr_cont("MISC %llx ", m->misc); 219 if (m->ppin) 220 pr_cont("PPIN %llx ", m->ppin); 221 222 if (mce_flags.smca) { 223 if (m->synd) 224 pr_cont("SYND %llx ", m->synd); 225 if (m->ipid) 226 pr_cont("IPID %llx ", m->ipid); 227 } 228 229 pr_cont("\n"); 230 231 /* 232 * Note this output is parsed by external tools and old fields 233 * should not be changed. 234 */ 235 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", 236 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, 237 m->microcode); 238 } 239 240 static void print_mce(struct mce *m) 241 { 242 __print_mce(m); 243 244 if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON) 245 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); 246 } 247 248 #define PANIC_TIMEOUT 5 /* 5 seconds */ 249 250 static atomic_t mce_panicked; 251 252 static int fake_panic; 253 static atomic_t mce_fake_panicked; 254 255 /* Panic in progress. Enable interrupts and wait for final IPI */ 256 static void wait_for_panic(void) 257 { 258 long timeout = PANIC_TIMEOUT*USEC_PER_SEC; 259 260 preempt_disable(); 261 local_irq_enable(); 262 while (timeout-- > 0) 263 udelay(1); 264 if (panic_timeout == 0) 265 panic_timeout = mca_cfg.panic_timeout; 266 panic("Panicing machine check CPU died"); 267 } 268 269 static noinstr void mce_panic(const char *msg, struct mce *final, char *exp) 270 { 271 struct llist_node *pending; 272 struct mce_evt_llist *l; 273 int apei_err = 0; 274 275 /* 276 * Allow instrumentation around external facilities usage. Not that it 277 * matters a whole lot since the machine is going to panic anyway. 278 */ 279 instrumentation_begin(); 280 281 if (!fake_panic) { 282 /* 283 * Make sure only one CPU runs in machine check panic 284 */ 285 if (atomic_inc_return(&mce_panicked) > 1) 286 wait_for_panic(); 287 barrier(); 288 289 bust_spinlocks(1); 290 console_verbose(); 291 } else { 292 /* Don't log too much for fake panic */ 293 if (atomic_inc_return(&mce_fake_panicked) > 1) 294 goto out; 295 } 296 pending = mce_gen_pool_prepare_records(); 297 /* First print corrected ones that are still unlogged */ 298 llist_for_each_entry(l, pending, llnode) { 299 struct mce *m = &l->mce; 300 if (!(m->status & MCI_STATUS_UC)) { 301 print_mce(m); 302 if (!apei_err) 303 apei_err = apei_write_mce(m); 304 } 305 } 306 /* Now print uncorrected but with the final one last */ 307 llist_for_each_entry(l, pending, llnode) { 308 struct mce *m = &l->mce; 309 if (!(m->status & MCI_STATUS_UC)) 310 continue; 311 if (!final || mce_cmp(m, final)) { 312 print_mce(m); 313 if (!apei_err) 314 apei_err = apei_write_mce(m); 315 } 316 } 317 if (final) { 318 print_mce(final); 319 if (!apei_err) 320 apei_err = apei_write_mce(final); 321 } 322 if (exp) 323 pr_emerg(HW_ERR "Machine check: %s\n", exp); 324 if (!fake_panic) { 325 if (panic_timeout == 0) 326 panic_timeout = mca_cfg.panic_timeout; 327 panic(msg); 328 } else 329 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); 330 331 out: 332 instrumentation_end(); 333 } 334 335 /* Support code for software error injection */ 336 337 static int msr_to_offset(u32 msr) 338 { 339 unsigned bank = __this_cpu_read(injectm.bank); 340 341 if (msr == mca_cfg.rip_msr) 342 return offsetof(struct mce, ip); 343 if (msr == mca_msr_reg(bank, MCA_STATUS)) 344 return offsetof(struct mce, status); 345 if (msr == mca_msr_reg(bank, MCA_ADDR)) 346 return offsetof(struct mce, addr); 347 if (msr == mca_msr_reg(bank, MCA_MISC)) 348 return offsetof(struct mce, misc); 349 if (msr == MSR_IA32_MCG_STATUS) 350 return offsetof(struct mce, mcgstatus); 351 return -1; 352 } 353 354 void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr) 355 { 356 if (wrmsr) { 357 pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n", 358 (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax, 359 regs->ip, (void *)regs->ip); 360 } else { 361 pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n", 362 (unsigned int)regs->cx, regs->ip, (void *)regs->ip); 363 } 364 365 show_stack_regs(regs); 366 367 panic("MCA architectural violation!\n"); 368 369 while (true) 370 cpu_relax(); 371 } 372 373 /* MSR access wrappers used for error injection */ 374 noinstr u64 mce_rdmsrl(u32 msr) 375 { 376 DECLARE_ARGS(val, low, high); 377 378 if (__this_cpu_read(injectm.finished)) { 379 int offset; 380 u64 ret; 381 382 instrumentation_begin(); 383 384 offset = msr_to_offset(msr); 385 if (offset < 0) 386 ret = 0; 387 else 388 ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset); 389 390 instrumentation_end(); 391 392 return ret; 393 } 394 395 /* 396 * RDMSR on MCA MSRs should not fault. If they do, this is very much an 397 * architectural violation and needs to be reported to hw vendor. Panic 398 * the box to not allow any further progress. 399 */ 400 asm volatile("1: rdmsr\n" 401 "2:\n" 402 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR_IN_MCE) 403 : EAX_EDX_RET(val, low, high) : "c" (msr)); 404 405 406 return EAX_EDX_VAL(val, low, high); 407 } 408 409 static noinstr void mce_wrmsrl(u32 msr, u64 v) 410 { 411 u32 low, high; 412 413 if (__this_cpu_read(injectm.finished)) { 414 int offset; 415 416 instrumentation_begin(); 417 418 offset = msr_to_offset(msr); 419 if (offset >= 0) 420 *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v; 421 422 instrumentation_end(); 423 424 return; 425 } 426 427 low = (u32)v; 428 high = (u32)(v >> 32); 429 430 /* See comment in mce_rdmsrl() */ 431 asm volatile("1: wrmsr\n" 432 "2:\n" 433 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE) 434 : : "c" (msr), "a"(low), "d" (high) : "memory"); 435 } 436 437 /* 438 * Collect all global (w.r.t. this processor) status about this machine 439 * check into our "mce" struct so that we can use it later to assess 440 * the severity of the problem as we read per-bank specific details. 441 */ 442 static noinstr void mce_gather_info(struct mce *m, struct pt_regs *regs) 443 { 444 /* 445 * Enable instrumentation around mce_setup() which calls external 446 * facilities. 447 */ 448 instrumentation_begin(); 449 mce_setup(m); 450 instrumentation_end(); 451 452 m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); 453 if (regs) { 454 /* 455 * Get the address of the instruction at the time of 456 * the machine check error. 457 */ 458 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { 459 m->ip = regs->ip; 460 m->cs = regs->cs; 461 462 /* 463 * When in VM86 mode make the cs look like ring 3 464 * always. This is a lie, but it's better than passing 465 * the additional vm86 bit around everywhere. 466 */ 467 if (v8086_mode(regs)) 468 m->cs |= 3; 469 } 470 /* Use accurate RIP reporting if available. */ 471 if (mca_cfg.rip_msr) 472 m->ip = mce_rdmsrl(mca_cfg.rip_msr); 473 } 474 } 475 476 int mce_available(struct cpuinfo_x86 *c) 477 { 478 if (mca_cfg.disabled) 479 return 0; 480 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); 481 } 482 483 static void mce_schedule_work(void) 484 { 485 if (!mce_gen_pool_empty()) 486 schedule_work(&mce_work); 487 } 488 489 static void mce_irq_work_cb(struct irq_work *entry) 490 { 491 mce_schedule_work(); 492 } 493 494 /* 495 * Check if the address reported by the CPU is in a format we can parse. 496 * It would be possible to add code for most other cases, but all would 497 * be somewhat complicated (e.g. segment offset would require an instruction 498 * parser). So only support physical addresses up to page granularity for now. 499 */ 500 int mce_usable_address(struct mce *m) 501 { 502 if (!(m->status & MCI_STATUS_ADDRV)) 503 return 0; 504 505 /* Checks after this one are Intel/Zhaoxin-specific: */ 506 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && 507 boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) 508 return 1; 509 510 if (!(m->status & MCI_STATUS_MISCV)) 511 return 0; 512 513 if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT) 514 return 0; 515 516 if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS) 517 return 0; 518 519 return 1; 520 } 521 EXPORT_SYMBOL_GPL(mce_usable_address); 522 523 bool mce_is_memory_error(struct mce *m) 524 { 525 switch (m->cpuvendor) { 526 case X86_VENDOR_AMD: 527 case X86_VENDOR_HYGON: 528 return amd_mce_is_memory_error(m); 529 530 case X86_VENDOR_INTEL: 531 case X86_VENDOR_ZHAOXIN: 532 /* 533 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes 534 * 535 * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for 536 * indicating a memory error. Bit 8 is used for indicating a 537 * cache hierarchy error. The combination of bit 2 and bit 3 538 * is used for indicating a `generic' cache hierarchy error 539 * But we can't just blindly check the above bits, because if 540 * bit 11 is set, then it is a bus/interconnect error - and 541 * either way the above bits just gives more detail on what 542 * bus/interconnect error happened. Note that bit 12 can be 543 * ignored, as it's the "filter" bit. 544 */ 545 return (m->status & 0xef80) == BIT(7) || 546 (m->status & 0xef00) == BIT(8) || 547 (m->status & 0xeffc) == 0xc; 548 549 default: 550 return false; 551 } 552 } 553 EXPORT_SYMBOL_GPL(mce_is_memory_error); 554 555 static bool whole_page(struct mce *m) 556 { 557 if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV)) 558 return true; 559 560 return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT; 561 } 562 563 bool mce_is_correctable(struct mce *m) 564 { 565 if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) 566 return false; 567 568 if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) 569 return false; 570 571 if (m->status & MCI_STATUS_UC) 572 return false; 573 574 return true; 575 } 576 EXPORT_SYMBOL_GPL(mce_is_correctable); 577 578 static int mce_early_notifier(struct notifier_block *nb, unsigned long val, 579 void *data) 580 { 581 struct mce *m = (struct mce *)data; 582 583 if (!m) 584 return NOTIFY_DONE; 585 586 /* Emit the trace record: */ 587 trace_mce_record(m); 588 589 set_bit(0, &mce_need_notify); 590 591 mce_notify_irq(); 592 593 return NOTIFY_DONE; 594 } 595 596 static struct notifier_block early_nb = { 597 .notifier_call = mce_early_notifier, 598 .priority = MCE_PRIO_EARLY, 599 }; 600 601 static int uc_decode_notifier(struct notifier_block *nb, unsigned long val, 602 void *data) 603 { 604 struct mce *mce = (struct mce *)data; 605 unsigned long pfn; 606 607 if (!mce || !mce_usable_address(mce)) 608 return NOTIFY_DONE; 609 610 if (mce->severity != MCE_AO_SEVERITY && 611 mce->severity != MCE_DEFERRED_SEVERITY) 612 return NOTIFY_DONE; 613 614 pfn = mce->addr >> PAGE_SHIFT; 615 if (!memory_failure(pfn, 0)) { 616 set_mce_nospec(pfn, whole_page(mce)); 617 mce->kflags |= MCE_HANDLED_UC; 618 } 619 620 return NOTIFY_OK; 621 } 622 623 static struct notifier_block mce_uc_nb = { 624 .notifier_call = uc_decode_notifier, 625 .priority = MCE_PRIO_UC, 626 }; 627 628 static int mce_default_notifier(struct notifier_block *nb, unsigned long val, 629 void *data) 630 { 631 struct mce *m = (struct mce *)data; 632 633 if (!m) 634 return NOTIFY_DONE; 635 636 if (mca_cfg.print_all || !m->kflags) 637 __print_mce(m); 638 639 return NOTIFY_DONE; 640 } 641 642 static struct notifier_block mce_default_nb = { 643 .notifier_call = mce_default_notifier, 644 /* lowest prio, we want it to run last. */ 645 .priority = MCE_PRIO_LOWEST, 646 }; 647 648 /* 649 * Read ADDR and MISC registers. 650 */ 651 static noinstr void mce_read_aux(struct mce *m, int i) 652 { 653 if (m->status & MCI_STATUS_MISCV) 654 m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC)); 655 656 if (m->status & MCI_STATUS_ADDRV) { 657 m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR)); 658 659 /* 660 * Mask the reported address by the reported granularity. 661 */ 662 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) { 663 u8 shift = MCI_MISC_ADDR_LSB(m->misc); 664 m->addr >>= shift; 665 m->addr <<= shift; 666 } 667 668 /* 669 * Extract [55:<lsb>] where lsb is the least significant 670 * *valid* bit of the address bits. 671 */ 672 if (mce_flags.smca) { 673 u8 lsb = (m->addr >> 56) & 0x3f; 674 675 m->addr &= GENMASK_ULL(55, lsb); 676 } 677 } 678 679 if (mce_flags.smca) { 680 m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i)); 681 682 if (m->status & MCI_STATUS_SYNDV) 683 m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i)); 684 } 685 } 686 687 DEFINE_PER_CPU(unsigned, mce_poll_count); 688 689 /* 690 * Poll for corrected events or events that happened before reset. 691 * Those are just logged through /dev/mcelog. 692 * 693 * This is executed in standard interrupt context. 694 * 695 * Note: spec recommends to panic for fatal unsignalled 696 * errors here. However this would be quite problematic -- 697 * we would need to reimplement the Monarch handling and 698 * it would mess up the exclusion between exception handler 699 * and poll handler -- * so we skip this for now. 700 * These cases should not happen anyways, or only when the CPU 701 * is already totally * confused. In this case it's likely it will 702 * not fully execute the machine check handler either. 703 */ 704 bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) 705 { 706 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 707 bool error_seen = false; 708 struct mce m; 709 int i; 710 711 this_cpu_inc(mce_poll_count); 712 713 mce_gather_info(&m, NULL); 714 715 if (flags & MCP_TIMESTAMP) 716 m.tsc = rdtsc(); 717 718 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 719 if (!mce_banks[i].ctl || !test_bit(i, *b)) 720 continue; 721 722 m.misc = 0; 723 m.addr = 0; 724 m.bank = i; 725 726 barrier(); 727 m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); 728 729 /* If this entry is not valid, ignore it */ 730 if (!(m.status & MCI_STATUS_VAL)) 731 continue; 732 733 /* 734 * If we are logging everything (at CPU online) or this 735 * is a corrected error, then we must log it. 736 */ 737 if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC)) 738 goto log_it; 739 740 /* 741 * Newer Intel systems that support software error 742 * recovery need to make additional checks. Other 743 * CPUs should skip over uncorrected errors, but log 744 * everything else. 745 */ 746 if (!mca_cfg.ser) { 747 if (m.status & MCI_STATUS_UC) 748 continue; 749 goto log_it; 750 } 751 752 /* Log "not enabled" (speculative) errors */ 753 if (!(m.status & MCI_STATUS_EN)) 754 goto log_it; 755 756 /* 757 * Log UCNA (SDM: 15.6.3 "UCR Error Classification") 758 * UC == 1 && PCC == 0 && S == 0 759 */ 760 if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S)) 761 goto log_it; 762 763 /* 764 * Skip anything else. Presumption is that our read of this 765 * bank is racing with a machine check. Leave the log alone 766 * for do_machine_check() to deal with it. 767 */ 768 continue; 769 770 log_it: 771 error_seen = true; 772 773 if (flags & MCP_DONTLOG) 774 goto clear_it; 775 776 mce_read_aux(&m, i); 777 m.severity = mce_severity(&m, NULL, mca_cfg.tolerant, NULL, false); 778 /* 779 * Don't get the IP here because it's unlikely to 780 * have anything to do with the actual error location. 781 */ 782 783 if (mca_cfg.dont_log_ce && !mce_usable_address(&m)) 784 goto clear_it; 785 786 if (flags & MCP_QUEUE_LOG) 787 mce_gen_pool_add(&m); 788 else 789 mce_log(&m); 790 791 clear_it: 792 /* 793 * Clear state for this bank. 794 */ 795 mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); 796 } 797 798 /* 799 * Don't clear MCG_STATUS here because it's only defined for 800 * exceptions. 801 */ 802 803 sync_core(); 804 805 return error_seen; 806 } 807 EXPORT_SYMBOL_GPL(machine_check_poll); 808 809 /* 810 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and 811 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM 812 * Vol 3B Table 15-20). But this confuses both the code that determines 813 * whether the machine check occurred in kernel or user mode, and also 814 * the severity assessment code. Pretend that EIPV was set, and take the 815 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. 816 */ 817 static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) 818 { 819 if (bank != 0) 820 return; 821 if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) 822 return; 823 if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| 824 MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| 825 MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| 826 MCACOD)) != 827 (MCI_STATUS_UC|MCI_STATUS_EN| 828 MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| 829 MCI_STATUS_AR|MCACOD_INSTR)) 830 return; 831 832 m->mcgstatus |= MCG_STATUS_EIPV; 833 m->ip = regs->ip; 834 m->cs = regs->cs; 835 } 836 837 /* 838 * Do a quick check if any of the events requires a panic. 839 * This decides if we keep the events around or clear them. 840 */ 841 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, 842 struct pt_regs *regs) 843 { 844 char *tmp = *msg; 845 int i; 846 847 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 848 m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); 849 if (!(m->status & MCI_STATUS_VAL)) 850 continue; 851 852 __set_bit(i, validp); 853 if (mce_flags.snb_ifu_quirk) 854 quirk_sandybridge_ifu(i, m, regs); 855 856 m->bank = i; 857 if (mce_severity(m, regs, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { 858 mce_read_aux(m, i); 859 *msg = tmp; 860 return 1; 861 } 862 } 863 return 0; 864 } 865 866 /* 867 * Variable to establish order between CPUs while scanning. 868 * Each CPU spins initially until executing is equal its number. 869 */ 870 static atomic_t mce_executing; 871 872 /* 873 * Defines order of CPUs on entry. First CPU becomes Monarch. 874 */ 875 static atomic_t mce_callin; 876 877 /* 878 * Track which CPUs entered the MCA broadcast synchronization and which not in 879 * order to print holdouts. 880 */ 881 static cpumask_t mce_missing_cpus = CPU_MASK_ALL; 882 883 /* 884 * Check if a timeout waiting for other CPUs happened. 885 */ 886 static noinstr int mce_timed_out(u64 *t, const char *msg) 887 { 888 int ret = 0; 889 890 /* Enable instrumentation around calls to external facilities */ 891 instrumentation_begin(); 892 893 /* 894 * The others already did panic for some reason. 895 * Bail out like in a timeout. 896 * rmb() to tell the compiler that system_state 897 * might have been modified by someone else. 898 */ 899 rmb(); 900 if (atomic_read(&mce_panicked)) 901 wait_for_panic(); 902 if (!mca_cfg.monarch_timeout) 903 goto out; 904 if ((s64)*t < SPINUNIT) { 905 if (mca_cfg.tolerant <= 1) { 906 if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus)) 907 pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n", 908 cpumask_pr_args(&mce_missing_cpus)); 909 mce_panic(msg, NULL, NULL); 910 } 911 ret = 1; 912 goto out; 913 } 914 *t -= SPINUNIT; 915 916 out: 917 touch_nmi_watchdog(); 918 919 instrumentation_end(); 920 921 return ret; 922 } 923 924 /* 925 * The Monarch's reign. The Monarch is the CPU who entered 926 * the machine check handler first. It waits for the others to 927 * raise the exception too and then grades them. When any 928 * error is fatal panic. Only then let the others continue. 929 * 930 * The other CPUs entering the MCE handler will be controlled by the 931 * Monarch. They are called Subjects. 932 * 933 * This way we prevent any potential data corruption in a unrecoverable case 934 * and also makes sure always all CPU's errors are examined. 935 * 936 * Also this detects the case of a machine check event coming from outer 937 * space (not detected by any CPUs) In this case some external agent wants 938 * us to shut down, so panic too. 939 * 940 * The other CPUs might still decide to panic if the handler happens 941 * in a unrecoverable place, but in this case the system is in a semi-stable 942 * state and won't corrupt anything by itself. It's ok to let the others 943 * continue for a bit first. 944 * 945 * All the spin loops have timeouts; when a timeout happens a CPU 946 * typically elects itself to be Monarch. 947 */ 948 static void mce_reign(void) 949 { 950 int cpu; 951 struct mce *m = NULL; 952 int global_worst = 0; 953 char *msg = NULL; 954 955 /* 956 * This CPU is the Monarch and the other CPUs have run 957 * through their handlers. 958 * Grade the severity of the errors of all the CPUs. 959 */ 960 for_each_possible_cpu(cpu) { 961 struct mce *mtmp = &per_cpu(mces_seen, cpu); 962 963 if (mtmp->severity > global_worst) { 964 global_worst = mtmp->severity; 965 m = &per_cpu(mces_seen, cpu); 966 } 967 } 968 969 /* 970 * Cannot recover? Panic here then. 971 * This dumps all the mces in the log buffer and stops the 972 * other CPUs. 973 */ 974 if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { 975 /* call mce_severity() to get "msg" for panic */ 976 mce_severity(m, NULL, mca_cfg.tolerant, &msg, true); 977 mce_panic("Fatal machine check", m, msg); 978 } 979 980 /* 981 * For UC somewhere we let the CPU who detects it handle it. 982 * Also must let continue the others, otherwise the handling 983 * CPU could deadlock on a lock. 984 */ 985 986 /* 987 * No machine check event found. Must be some external 988 * source or one CPU is hung. Panic. 989 */ 990 if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) 991 mce_panic("Fatal machine check from unknown source", NULL, NULL); 992 993 /* 994 * Now clear all the mces_seen so that they don't reappear on 995 * the next mce. 996 */ 997 for_each_possible_cpu(cpu) 998 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); 999 } 1000 1001 static atomic_t global_nwo; 1002 1003 /* 1004 * Start of Monarch synchronization. This waits until all CPUs have 1005 * entered the exception handler and then determines if any of them 1006 * saw a fatal event that requires panic. Then it executes them 1007 * in the entry order. 1008 * TBD double check parallel CPU hotunplug 1009 */ 1010 static noinstr int mce_start(int *no_way_out) 1011 { 1012 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 1013 int order, ret = -1; 1014 1015 if (!timeout) 1016 return ret; 1017 1018 atomic_add(*no_way_out, &global_nwo); 1019 /* 1020 * Rely on the implied barrier below, such that global_nwo 1021 * is updated before mce_callin. 1022 */ 1023 order = atomic_inc_return(&mce_callin); 1024 cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus); 1025 1026 /* Enable instrumentation around calls to external facilities */ 1027 instrumentation_begin(); 1028 1029 /* 1030 * Wait for everyone. 1031 */ 1032 while (atomic_read(&mce_callin) != num_online_cpus()) { 1033 if (mce_timed_out(&timeout, 1034 "Timeout: Not all CPUs entered broadcast exception handler")) { 1035 atomic_set(&global_nwo, 0); 1036 goto out; 1037 } 1038 ndelay(SPINUNIT); 1039 } 1040 1041 /* 1042 * mce_callin should be read before global_nwo 1043 */ 1044 smp_rmb(); 1045 1046 if (order == 1) { 1047 /* 1048 * Monarch: Starts executing now, the others wait. 1049 */ 1050 atomic_set(&mce_executing, 1); 1051 } else { 1052 /* 1053 * Subject: Now start the scanning loop one by one in 1054 * the original callin order. 1055 * This way when there are any shared banks it will be 1056 * only seen by one CPU before cleared, avoiding duplicates. 1057 */ 1058 while (atomic_read(&mce_executing) < order) { 1059 if (mce_timed_out(&timeout, 1060 "Timeout: Subject CPUs unable to finish machine check processing")) { 1061 atomic_set(&global_nwo, 0); 1062 goto out; 1063 } 1064 ndelay(SPINUNIT); 1065 } 1066 } 1067 1068 /* 1069 * Cache the global no_way_out state. 1070 */ 1071 *no_way_out = atomic_read(&global_nwo); 1072 1073 ret = order; 1074 1075 out: 1076 instrumentation_end(); 1077 1078 return ret; 1079 } 1080 1081 /* 1082 * Synchronize between CPUs after main scanning loop. 1083 * This invokes the bulk of the Monarch processing. 1084 */ 1085 static noinstr int mce_end(int order) 1086 { 1087 u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; 1088 int ret = -1; 1089 1090 /* Allow instrumentation around external facilities. */ 1091 instrumentation_begin(); 1092 1093 if (!timeout) 1094 goto reset; 1095 if (order < 0) 1096 goto reset; 1097 1098 /* 1099 * Allow others to run. 1100 */ 1101 atomic_inc(&mce_executing); 1102 1103 if (order == 1) { 1104 /* 1105 * Monarch: Wait for everyone to go through their scanning 1106 * loops. 1107 */ 1108 while (atomic_read(&mce_executing) <= num_online_cpus()) { 1109 if (mce_timed_out(&timeout, 1110 "Timeout: Monarch CPU unable to finish machine check processing")) 1111 goto reset; 1112 ndelay(SPINUNIT); 1113 } 1114 1115 mce_reign(); 1116 barrier(); 1117 ret = 0; 1118 } else { 1119 /* 1120 * Subject: Wait for Monarch to finish. 1121 */ 1122 while (atomic_read(&mce_executing) != 0) { 1123 if (mce_timed_out(&timeout, 1124 "Timeout: Monarch CPU did not finish machine check processing")) 1125 goto reset; 1126 ndelay(SPINUNIT); 1127 } 1128 1129 /* 1130 * Don't reset anything. That's done by the Monarch. 1131 */ 1132 ret = 0; 1133 goto out; 1134 } 1135 1136 /* 1137 * Reset all global state. 1138 */ 1139 reset: 1140 atomic_set(&global_nwo, 0); 1141 atomic_set(&mce_callin, 0); 1142 cpumask_setall(&mce_missing_cpus); 1143 barrier(); 1144 1145 /* 1146 * Let others run again. 1147 */ 1148 atomic_set(&mce_executing, 0); 1149 1150 out: 1151 instrumentation_end(); 1152 1153 return ret; 1154 } 1155 1156 static void mce_clear_state(unsigned long *toclear) 1157 { 1158 int i; 1159 1160 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1161 if (test_bit(i, toclear)) 1162 mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); 1163 } 1164 } 1165 1166 /* 1167 * Cases where we avoid rendezvous handler timeout: 1168 * 1) If this CPU is offline. 1169 * 1170 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to 1171 * skip those CPUs which remain looping in the 1st kernel - see 1172 * crash_nmi_callback(). 1173 * 1174 * Note: there still is a small window between kexec-ing and the new, 1175 * kdump kernel establishing a new #MC handler where a broadcasted MCE 1176 * might not get handled properly. 1177 */ 1178 static noinstr bool mce_check_crashing_cpu(void) 1179 { 1180 unsigned int cpu = smp_processor_id(); 1181 1182 if (arch_cpu_is_offline(cpu) || 1183 (crashing_cpu != -1 && crashing_cpu != cpu)) { 1184 u64 mcgstatus; 1185 1186 mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); 1187 1188 if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { 1189 if (mcgstatus & MCG_STATUS_LMCES) 1190 return false; 1191 } 1192 1193 if (mcgstatus & MCG_STATUS_RIPV) { 1194 __wrmsr(MSR_IA32_MCG_STATUS, 0, 0); 1195 return true; 1196 } 1197 } 1198 return false; 1199 } 1200 1201 static __always_inline int 1202 __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final, 1203 unsigned long *toclear, unsigned long *valid_banks, int no_way_out, 1204 int *worst) 1205 { 1206 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1207 struct mca_config *cfg = &mca_cfg; 1208 int severity, i, taint = 0; 1209 1210 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1211 __clear_bit(i, toclear); 1212 if (!test_bit(i, valid_banks)) 1213 continue; 1214 1215 if (!mce_banks[i].ctl) 1216 continue; 1217 1218 m->misc = 0; 1219 m->addr = 0; 1220 m->bank = i; 1221 1222 m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS)); 1223 if (!(m->status & MCI_STATUS_VAL)) 1224 continue; 1225 1226 /* 1227 * Corrected or non-signaled errors are handled by 1228 * machine_check_poll(). Leave them alone, unless this panics. 1229 */ 1230 if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && 1231 !no_way_out) 1232 continue; 1233 1234 /* Set taint even when machine check was not enabled. */ 1235 taint++; 1236 1237 severity = mce_severity(m, regs, cfg->tolerant, NULL, true); 1238 1239 /* 1240 * When machine check was for corrected/deferred handler don't 1241 * touch, unless we're panicking. 1242 */ 1243 if ((severity == MCE_KEEP_SEVERITY || 1244 severity == MCE_UCNA_SEVERITY) && !no_way_out) 1245 continue; 1246 1247 __set_bit(i, toclear); 1248 1249 /* Machine check event was not enabled. Clear, but ignore. */ 1250 if (severity == MCE_NO_SEVERITY) 1251 continue; 1252 1253 mce_read_aux(m, i); 1254 1255 /* assuming valid severity level != 0 */ 1256 m->severity = severity; 1257 1258 /* 1259 * Enable instrumentation around the mce_log() call which is 1260 * done in #MC context, where instrumentation is disabled. 1261 */ 1262 instrumentation_begin(); 1263 mce_log(m); 1264 instrumentation_end(); 1265 1266 if (severity > *worst) { 1267 *final = *m; 1268 *worst = severity; 1269 } 1270 } 1271 1272 /* mce_clear_state will clear *final, save locally for use later */ 1273 *m = *final; 1274 1275 return taint; 1276 } 1277 1278 static void kill_me_now(struct callback_head *ch) 1279 { 1280 struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me); 1281 1282 p->mce_count = 0; 1283 force_sig(SIGBUS); 1284 } 1285 1286 static void kill_me_maybe(struct callback_head *cb) 1287 { 1288 struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); 1289 int flags = MF_ACTION_REQUIRED; 1290 int ret; 1291 1292 p->mce_count = 0; 1293 pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); 1294 1295 if (!p->mce_ripv) 1296 flags |= MF_MUST_KILL; 1297 1298 ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags); 1299 if (!ret) { 1300 set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); 1301 sync_core(); 1302 return; 1303 } 1304 1305 /* 1306 * -EHWPOISON from memory_failure() means that it already sent SIGBUS 1307 * to the current process with the proper error info, so no need to 1308 * send SIGBUS here again. 1309 */ 1310 if (ret == -EHWPOISON) 1311 return; 1312 1313 pr_err("Memory error not recovered"); 1314 kill_me_now(cb); 1315 } 1316 1317 static void kill_me_never(struct callback_head *cb) 1318 { 1319 struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); 1320 1321 p->mce_count = 0; 1322 pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr); 1323 if (!memory_failure(p->mce_addr >> PAGE_SHIFT, 0)) 1324 set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); 1325 } 1326 1327 static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *)) 1328 { 1329 int count = ++current->mce_count; 1330 1331 /* First call, save all the details */ 1332 if (count == 1) { 1333 current->mce_addr = m->addr; 1334 current->mce_kflags = m->kflags; 1335 current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); 1336 current->mce_whole_page = whole_page(m); 1337 current->mce_kill_me.func = func; 1338 } 1339 1340 /* Ten is likely overkill. Don't expect more than two faults before task_work() */ 1341 if (count > 10) 1342 mce_panic("Too many consecutive machine checks while accessing user data", m, msg); 1343 1344 /* Second or later call, make sure page address matches the one from first call */ 1345 if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT)) 1346 mce_panic("Consecutive machine checks to different user pages", m, msg); 1347 1348 /* Do not call task_work_add() more than once */ 1349 if (count > 1) 1350 return; 1351 1352 task_work_add(current, ¤t->mce_kill_me, TWA_RESUME); 1353 } 1354 1355 /* Handle unconfigured int18 (should never happen) */ 1356 static noinstr void unexpected_machine_check(struct pt_regs *regs) 1357 { 1358 instrumentation_begin(); 1359 pr_err("CPU#%d: Unexpected int18 (Machine Check)\n", 1360 smp_processor_id()); 1361 instrumentation_end(); 1362 } 1363 1364 /* 1365 * The actual machine check handler. This only handles real exceptions when 1366 * something got corrupted coming in through int 18. 1367 * 1368 * This is executed in #MC context not subject to normal locking rules. 1369 * This implies that most kernel services cannot be safely used. Don't even 1370 * think about putting a printk in there! 1371 * 1372 * On Intel systems this is entered on all CPUs in parallel through 1373 * MCE broadcast. However some CPUs might be broken beyond repair, 1374 * so be always careful when synchronizing with others. 1375 * 1376 * Tracing and kprobes are disabled: if we interrupted a kernel context 1377 * with IF=1, we need to minimize stack usage. There are also recursion 1378 * issues: if the machine check was due to a failure of the memory 1379 * backing the user stack, tracing that reads the user stack will cause 1380 * potentially infinite recursion. 1381 * 1382 * Currently, the #MC handler calls out to a number of external facilities 1383 * and, therefore, allows instrumentation around them. The optimal thing to 1384 * have would be to do the absolutely minimal work required in #MC context 1385 * and have instrumentation disabled only around that. Further processing can 1386 * then happen in process context where instrumentation is allowed. Achieving 1387 * that requires careful auditing and modifications. Until then, the code 1388 * allows instrumentation temporarily, where required. * 1389 */ 1390 noinstr void do_machine_check(struct pt_regs *regs) 1391 { 1392 int worst = 0, order, no_way_out, kill_current_task, lmce, taint = 0; 1393 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS) = { 0 }; 1394 DECLARE_BITMAP(toclear, MAX_NR_BANKS) = { 0 }; 1395 struct mca_config *cfg = &mca_cfg; 1396 struct mce m, *final; 1397 char *msg = NULL; 1398 1399 if (unlikely(mce_flags.p5)) 1400 return pentium_machine_check(regs); 1401 else if (unlikely(mce_flags.winchip)) 1402 return winchip_machine_check(regs); 1403 else if (unlikely(!mca_cfg.initialized)) 1404 return unexpected_machine_check(regs); 1405 1406 /* 1407 * Establish sequential order between the CPUs entering the machine 1408 * check handler. 1409 */ 1410 order = -1; 1411 1412 /* 1413 * If no_way_out gets set, there is no safe way to recover from this 1414 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. 1415 */ 1416 no_way_out = 0; 1417 1418 /* 1419 * If kill_current_task is not set, there might be a way to recover from this 1420 * error. 1421 */ 1422 kill_current_task = 0; 1423 1424 /* 1425 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES 1426 * on Intel. 1427 */ 1428 lmce = 1; 1429 1430 this_cpu_inc(mce_exception_count); 1431 1432 mce_gather_info(&m, regs); 1433 m.tsc = rdtsc(); 1434 1435 final = this_cpu_ptr(&mces_seen); 1436 *final = m; 1437 1438 no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); 1439 1440 barrier(); 1441 1442 /* 1443 * When no restart IP might need to kill or panic. 1444 * Assume the worst for now, but if we find the 1445 * severity is MCE_AR_SEVERITY we have other options. 1446 */ 1447 if (!(m.mcgstatus & MCG_STATUS_RIPV)) 1448 kill_current_task = (cfg->tolerant == 3) ? 0 : 1; 1449 /* 1450 * Check if this MCE is signaled to only this logical processor, 1451 * on Intel, Zhaoxin only. 1452 */ 1453 if (m.cpuvendor == X86_VENDOR_INTEL || 1454 m.cpuvendor == X86_VENDOR_ZHAOXIN) 1455 lmce = m.mcgstatus & MCG_STATUS_LMCES; 1456 1457 /* 1458 * Local machine check may already know that we have to panic. 1459 * Broadcast machine check begins rendezvous in mce_start() 1460 * Go through all banks in exclusion of the other CPUs. This way we 1461 * don't report duplicated events on shared banks because the first one 1462 * to see it will clear it. 1463 */ 1464 if (lmce) { 1465 if (no_way_out && cfg->tolerant < 3) 1466 mce_panic("Fatal local machine check", &m, msg); 1467 } else { 1468 order = mce_start(&no_way_out); 1469 } 1470 1471 taint = __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst); 1472 1473 if (!no_way_out) 1474 mce_clear_state(toclear); 1475 1476 /* 1477 * Do most of the synchronization with other CPUs. 1478 * When there's any problem use only local no_way_out state. 1479 */ 1480 if (!lmce) { 1481 if (mce_end(order) < 0) { 1482 if (!no_way_out) 1483 no_way_out = worst >= MCE_PANIC_SEVERITY; 1484 1485 if (no_way_out && cfg->tolerant < 3) 1486 mce_panic("Fatal machine check on current CPU", &m, msg); 1487 } 1488 } else { 1489 /* 1490 * If there was a fatal machine check we should have 1491 * already called mce_panic earlier in this function. 1492 * Since we re-read the banks, we might have found 1493 * something new. Check again to see if we found a 1494 * fatal error. We call "mce_severity()" again to 1495 * make sure we have the right "msg". 1496 */ 1497 if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { 1498 mce_severity(&m, regs, cfg->tolerant, &msg, true); 1499 mce_panic("Local fatal machine check!", &m, msg); 1500 } 1501 } 1502 1503 /* 1504 * Enable instrumentation around the external facilities like task_work_add() 1505 * (via queue_task_work()), fixup_exception() etc. For now, that is. Fixing this 1506 * properly would need a lot more involved reorganization. 1507 */ 1508 instrumentation_begin(); 1509 1510 if (taint) 1511 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 1512 1513 if (worst != MCE_AR_SEVERITY && !kill_current_task) 1514 goto out; 1515 1516 /* Fault was in user mode and we need to take some action */ 1517 if ((m.cs & 3) == 3) { 1518 /* If this triggers there is no way to recover. Die hard. */ 1519 BUG_ON(!on_thread_stack() || !user_mode(regs)); 1520 1521 if (kill_current_task) 1522 queue_task_work(&m, msg, kill_me_now); 1523 else 1524 queue_task_work(&m, msg, kill_me_maybe); 1525 1526 } else { 1527 /* 1528 * Handle an MCE which has happened in kernel space but from 1529 * which the kernel can recover: ex_has_fault_handler() has 1530 * already verified that the rIP at which the error happened is 1531 * a rIP from which the kernel can recover (by jumping to 1532 * recovery code specified in _ASM_EXTABLE_FAULT()) and the 1533 * corresponding exception handler which would do that is the 1534 * proper one. 1535 */ 1536 if (m.kflags & MCE_IN_KERNEL_RECOV) { 1537 if (!fixup_exception(regs, X86_TRAP_MC, 0, 0)) 1538 mce_panic("Failed kernel mode recovery", &m, msg); 1539 } 1540 1541 if (m.kflags & MCE_IN_KERNEL_COPYIN) 1542 queue_task_work(&m, msg, kill_me_never); 1543 } 1544 1545 out: 1546 instrumentation_end(); 1547 1548 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); 1549 } 1550 EXPORT_SYMBOL_GPL(do_machine_check); 1551 1552 #ifndef CONFIG_MEMORY_FAILURE 1553 int memory_failure(unsigned long pfn, int flags) 1554 { 1555 /* mce_severity() should not hand us an ACTION_REQUIRED error */ 1556 BUG_ON(flags & MF_ACTION_REQUIRED); 1557 pr_err("Uncorrected memory error in page 0x%lx ignored\n" 1558 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", 1559 pfn); 1560 1561 return 0; 1562 } 1563 #endif 1564 1565 /* 1566 * Periodic polling timer for "silent" machine check errors. If the 1567 * poller finds an MCE, poll 2x faster. When the poller finds no more 1568 * errors, poll 2x slower (up to check_interval seconds). 1569 */ 1570 static unsigned long check_interval = INITIAL_CHECK_INTERVAL; 1571 1572 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ 1573 static DEFINE_PER_CPU(struct timer_list, mce_timer); 1574 1575 static unsigned long mce_adjust_timer_default(unsigned long interval) 1576 { 1577 return interval; 1578 } 1579 1580 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; 1581 1582 static void __start_timer(struct timer_list *t, unsigned long interval) 1583 { 1584 unsigned long when = jiffies + interval; 1585 unsigned long flags; 1586 1587 local_irq_save(flags); 1588 1589 if (!timer_pending(t) || time_before(when, t->expires)) 1590 mod_timer(t, round_jiffies(when)); 1591 1592 local_irq_restore(flags); 1593 } 1594 1595 static void mce_timer_fn(struct timer_list *t) 1596 { 1597 struct timer_list *cpu_t = this_cpu_ptr(&mce_timer); 1598 unsigned long iv; 1599 1600 WARN_ON(cpu_t != t); 1601 1602 iv = __this_cpu_read(mce_next_interval); 1603 1604 if (mce_available(this_cpu_ptr(&cpu_info))) { 1605 machine_check_poll(0, this_cpu_ptr(&mce_poll_banks)); 1606 1607 if (mce_intel_cmci_poll()) { 1608 iv = mce_adjust_timer(iv); 1609 goto done; 1610 } 1611 } 1612 1613 /* 1614 * Alert userspace if needed. If we logged an MCE, reduce the polling 1615 * interval, otherwise increase the polling interval. 1616 */ 1617 if (mce_notify_irq()) 1618 iv = max(iv / 2, (unsigned long) HZ/100); 1619 else 1620 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); 1621 1622 done: 1623 __this_cpu_write(mce_next_interval, iv); 1624 __start_timer(t, iv); 1625 } 1626 1627 /* 1628 * Ensure that the timer is firing in @interval from now. 1629 */ 1630 void mce_timer_kick(unsigned long interval) 1631 { 1632 struct timer_list *t = this_cpu_ptr(&mce_timer); 1633 unsigned long iv = __this_cpu_read(mce_next_interval); 1634 1635 __start_timer(t, interval); 1636 1637 if (interval < iv) 1638 __this_cpu_write(mce_next_interval, interval); 1639 } 1640 1641 /* Must not be called in IRQ context where del_timer_sync() can deadlock */ 1642 static void mce_timer_delete_all(void) 1643 { 1644 int cpu; 1645 1646 for_each_online_cpu(cpu) 1647 del_timer_sync(&per_cpu(mce_timer, cpu)); 1648 } 1649 1650 /* 1651 * Notify the user(s) about new machine check events. 1652 * Can be called from interrupt context, but not from machine check/NMI 1653 * context. 1654 */ 1655 int mce_notify_irq(void) 1656 { 1657 /* Not more than two messages every minute */ 1658 static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); 1659 1660 if (test_and_clear_bit(0, &mce_need_notify)) { 1661 mce_work_trigger(); 1662 1663 if (__ratelimit(&ratelimit)) 1664 pr_info(HW_ERR "Machine check events logged\n"); 1665 1666 return 1; 1667 } 1668 return 0; 1669 } 1670 EXPORT_SYMBOL_GPL(mce_notify_irq); 1671 1672 static void __mcheck_cpu_mce_banks_init(void) 1673 { 1674 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1675 u8 n_banks = this_cpu_read(mce_num_banks); 1676 int i; 1677 1678 for (i = 0; i < n_banks; i++) { 1679 struct mce_bank *b = &mce_banks[i]; 1680 1681 /* 1682 * Init them all, __mcheck_cpu_apply_quirks() is going to apply 1683 * the required vendor quirks before 1684 * __mcheck_cpu_init_clear_banks() does the final bank setup. 1685 */ 1686 b->ctl = -1ULL; 1687 b->init = true; 1688 } 1689 } 1690 1691 /* 1692 * Initialize Machine Checks for a CPU. 1693 */ 1694 static void __mcheck_cpu_cap_init(void) 1695 { 1696 u64 cap; 1697 u8 b; 1698 1699 rdmsrl(MSR_IA32_MCG_CAP, cap); 1700 1701 b = cap & MCG_BANKCNT_MASK; 1702 1703 if (b > MAX_NR_BANKS) { 1704 pr_warn("CPU%d: Using only %u machine check banks out of %u\n", 1705 smp_processor_id(), MAX_NR_BANKS, b); 1706 b = MAX_NR_BANKS; 1707 } 1708 1709 this_cpu_write(mce_num_banks, b); 1710 1711 __mcheck_cpu_mce_banks_init(); 1712 1713 /* Use accurate RIP reporting if available. */ 1714 if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9) 1715 mca_cfg.rip_msr = MSR_IA32_MCG_EIP; 1716 1717 if (cap & MCG_SER_P) 1718 mca_cfg.ser = 1; 1719 } 1720 1721 static void __mcheck_cpu_init_generic(void) 1722 { 1723 enum mcp_flags m_fl = 0; 1724 mce_banks_t all_banks; 1725 u64 cap; 1726 1727 if (!mca_cfg.bootlog) 1728 m_fl = MCP_DONTLOG; 1729 1730 /* 1731 * Log the machine checks left over from the previous reset. Log them 1732 * only, do not start processing them. That will happen in mcheck_late_init() 1733 * when all consumers have been registered on the notifier chain. 1734 */ 1735 bitmap_fill(all_banks, MAX_NR_BANKS); 1736 machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks); 1737 1738 cr4_set_bits(X86_CR4_MCE); 1739 1740 rdmsrl(MSR_IA32_MCG_CAP, cap); 1741 if (cap & MCG_CTL_P) 1742 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 1743 } 1744 1745 static void __mcheck_cpu_init_clear_banks(void) 1746 { 1747 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1748 int i; 1749 1750 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1751 struct mce_bank *b = &mce_banks[i]; 1752 1753 if (!b->init) 1754 continue; 1755 wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); 1756 wrmsrl(mca_msr_reg(i, MCA_STATUS), 0); 1757 } 1758 } 1759 1760 /* 1761 * Do a final check to see if there are any unused/RAZ banks. 1762 * 1763 * This must be done after the banks have been initialized and any quirks have 1764 * been applied. 1765 * 1766 * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs. 1767 * Otherwise, a user who disables a bank will not be able to re-enable it 1768 * without a system reboot. 1769 */ 1770 static void __mcheck_cpu_check_banks(void) 1771 { 1772 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1773 u64 msrval; 1774 int i; 1775 1776 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 1777 struct mce_bank *b = &mce_banks[i]; 1778 1779 if (!b->init) 1780 continue; 1781 1782 rdmsrl(mca_msr_reg(i, MCA_CTL), msrval); 1783 b->init = !!msrval; 1784 } 1785 } 1786 1787 /* Add per CPU specific workarounds here */ 1788 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1789 { 1790 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1791 struct mca_config *cfg = &mca_cfg; 1792 1793 if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 1794 pr_info("unknown CPU type - not enabling MCE support\n"); 1795 return -EOPNOTSUPP; 1796 } 1797 1798 /* This should be disabled by the BIOS, but isn't always */ 1799 if (c->x86_vendor == X86_VENDOR_AMD) { 1800 if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) { 1801 /* 1802 * disable GART TBL walk error reporting, which 1803 * trips off incorrectly with the IOMMU & 3ware 1804 * & Cerberus: 1805 */ 1806 clear_bit(10, (unsigned long *)&mce_banks[4].ctl); 1807 } 1808 if (c->x86 < 0x11 && cfg->bootlog < 0) { 1809 /* 1810 * Lots of broken BIOS around that don't clear them 1811 * by default and leave crap in there. Don't log: 1812 */ 1813 cfg->bootlog = 0; 1814 } 1815 /* 1816 * Various K7s with broken bank 0 around. Always disable 1817 * by default. 1818 */ 1819 if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0) 1820 mce_banks[0].ctl = 0; 1821 1822 /* 1823 * overflow_recov is supported for F15h Models 00h-0fh 1824 * even though we don't have a CPUID bit for it. 1825 */ 1826 if (c->x86 == 0x15 && c->x86_model <= 0xf) 1827 mce_flags.overflow_recov = 1; 1828 1829 } 1830 1831 if (c->x86_vendor == X86_VENDOR_INTEL) { 1832 /* 1833 * SDM documents that on family 6 bank 0 should not be written 1834 * because it aliases to another special BIOS controlled 1835 * register. 1836 * But it's not aliased anymore on model 0x1a+ 1837 * Don't ignore bank 0 completely because there could be a 1838 * valid event later, merely don't write CTL0. 1839 */ 1840 1841 if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0) 1842 mce_banks[0].init = false; 1843 1844 /* 1845 * All newer Intel systems support MCE broadcasting. Enable 1846 * synchronization with a one second timeout. 1847 */ 1848 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && 1849 cfg->monarch_timeout < 0) 1850 cfg->monarch_timeout = USEC_PER_SEC; 1851 1852 /* 1853 * There are also broken BIOSes on some Pentium M and 1854 * earlier systems: 1855 */ 1856 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) 1857 cfg->bootlog = 0; 1858 1859 if (c->x86 == 6 && c->x86_model == 45) 1860 mce_flags.snb_ifu_quirk = 1; 1861 } 1862 1863 if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { 1864 /* 1865 * All newer Zhaoxin CPUs support MCE broadcasting. Enable 1866 * synchronization with a one second timeout. 1867 */ 1868 if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) { 1869 if (cfg->monarch_timeout < 0) 1870 cfg->monarch_timeout = USEC_PER_SEC; 1871 } 1872 } 1873 1874 if (cfg->monarch_timeout < 0) 1875 cfg->monarch_timeout = 0; 1876 if (cfg->bootlog != 0) 1877 cfg->panic_timeout = 30; 1878 1879 return 0; 1880 } 1881 1882 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1883 { 1884 if (c->x86 != 5) 1885 return 0; 1886 1887 switch (c->x86_vendor) { 1888 case X86_VENDOR_INTEL: 1889 intel_p5_mcheck_init(c); 1890 mce_flags.p5 = 1; 1891 return 1; 1892 case X86_VENDOR_CENTAUR: 1893 winchip_mcheck_init(c); 1894 mce_flags.winchip = 1; 1895 return 1; 1896 default: 1897 return 0; 1898 } 1899 1900 return 0; 1901 } 1902 1903 /* 1904 * Init basic CPU features needed for early decoding of MCEs. 1905 */ 1906 static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) 1907 { 1908 if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { 1909 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); 1910 mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); 1911 mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); 1912 mce_flags.amd_threshold = 1; 1913 } 1914 } 1915 1916 static void mce_centaur_feature_init(struct cpuinfo_x86 *c) 1917 { 1918 struct mca_config *cfg = &mca_cfg; 1919 1920 /* 1921 * All newer Centaur CPUs support MCE broadcasting. Enable 1922 * synchronization with a one second timeout. 1923 */ 1924 if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || 1925 c->x86 > 6) { 1926 if (cfg->monarch_timeout < 0) 1927 cfg->monarch_timeout = USEC_PER_SEC; 1928 } 1929 } 1930 1931 static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) 1932 { 1933 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 1934 1935 /* 1936 * These CPUs have MCA bank 8 which reports only one error type called 1937 * SVAD (System View Address Decoder). The reporting of that error is 1938 * controlled by IA32_MC8.CTL.0. 1939 * 1940 * If enabled, prefetching on these CPUs will cause SVAD MCE when 1941 * virtual machines start and result in a system panic. Always disable 1942 * bank 8 SVAD error by default. 1943 */ 1944 if ((c->x86 == 7 && c->x86_model == 0x1b) || 1945 (c->x86_model == 0x19 || c->x86_model == 0x1f)) { 1946 if (this_cpu_read(mce_num_banks) > 8) 1947 mce_banks[8].ctl = 0; 1948 } 1949 1950 intel_init_cmci(); 1951 intel_init_lmce(); 1952 mce_adjust_timer = cmci_intel_adjust_timer; 1953 } 1954 1955 static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c) 1956 { 1957 intel_clear_lmce(); 1958 } 1959 1960 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) 1961 { 1962 switch (c->x86_vendor) { 1963 case X86_VENDOR_INTEL: 1964 mce_intel_feature_init(c); 1965 mce_adjust_timer = cmci_intel_adjust_timer; 1966 break; 1967 1968 case X86_VENDOR_AMD: { 1969 mce_amd_feature_init(c); 1970 break; 1971 } 1972 1973 case X86_VENDOR_HYGON: 1974 mce_hygon_feature_init(c); 1975 break; 1976 1977 case X86_VENDOR_CENTAUR: 1978 mce_centaur_feature_init(c); 1979 break; 1980 1981 case X86_VENDOR_ZHAOXIN: 1982 mce_zhaoxin_feature_init(c); 1983 break; 1984 1985 default: 1986 break; 1987 } 1988 } 1989 1990 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) 1991 { 1992 switch (c->x86_vendor) { 1993 case X86_VENDOR_INTEL: 1994 mce_intel_feature_clear(c); 1995 break; 1996 1997 case X86_VENDOR_ZHAOXIN: 1998 mce_zhaoxin_feature_clear(c); 1999 break; 2000 2001 default: 2002 break; 2003 } 2004 } 2005 2006 static void mce_start_timer(struct timer_list *t) 2007 { 2008 unsigned long iv = check_interval * HZ; 2009 2010 if (mca_cfg.ignore_ce || !iv) 2011 return; 2012 2013 this_cpu_write(mce_next_interval, iv); 2014 __start_timer(t, iv); 2015 } 2016 2017 static void __mcheck_cpu_setup_timer(void) 2018 { 2019 struct timer_list *t = this_cpu_ptr(&mce_timer); 2020 2021 timer_setup(t, mce_timer_fn, TIMER_PINNED); 2022 } 2023 2024 static void __mcheck_cpu_init_timer(void) 2025 { 2026 struct timer_list *t = this_cpu_ptr(&mce_timer); 2027 2028 timer_setup(t, mce_timer_fn, TIMER_PINNED); 2029 mce_start_timer(t); 2030 } 2031 2032 bool filter_mce(struct mce *m) 2033 { 2034 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 2035 return amd_filter_mce(m); 2036 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2037 return intel_filter_mce(m); 2038 2039 return false; 2040 } 2041 2042 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) 2043 { 2044 irqentry_state_t irq_state; 2045 2046 WARN_ON_ONCE(user_mode(regs)); 2047 2048 /* 2049 * Only required when from kernel mode. See 2050 * mce_check_crashing_cpu() for details. 2051 */ 2052 if (mca_cfg.initialized && mce_check_crashing_cpu()) 2053 return; 2054 2055 irq_state = irqentry_nmi_enter(regs); 2056 2057 do_machine_check(regs); 2058 2059 irqentry_nmi_exit(regs, irq_state); 2060 } 2061 2062 static __always_inline void exc_machine_check_user(struct pt_regs *regs) 2063 { 2064 irqentry_enter_from_user_mode(regs); 2065 2066 do_machine_check(regs); 2067 2068 irqentry_exit_to_user_mode(regs); 2069 } 2070 2071 #ifdef CONFIG_X86_64 2072 /* MCE hit kernel mode */ 2073 DEFINE_IDTENTRY_MCE(exc_machine_check) 2074 { 2075 unsigned long dr7; 2076 2077 dr7 = local_db_save(); 2078 exc_machine_check_kernel(regs); 2079 local_db_restore(dr7); 2080 } 2081 2082 /* The user mode variant. */ 2083 DEFINE_IDTENTRY_MCE_USER(exc_machine_check) 2084 { 2085 unsigned long dr7; 2086 2087 dr7 = local_db_save(); 2088 exc_machine_check_user(regs); 2089 local_db_restore(dr7); 2090 } 2091 #else 2092 /* 32bit unified entry point */ 2093 DEFINE_IDTENTRY_RAW(exc_machine_check) 2094 { 2095 unsigned long dr7; 2096 2097 dr7 = local_db_save(); 2098 if (user_mode(regs)) 2099 exc_machine_check_user(regs); 2100 else 2101 exc_machine_check_kernel(regs); 2102 local_db_restore(dr7); 2103 } 2104 #endif 2105 2106 /* 2107 * Called for each booted CPU to set up machine checks. 2108 * Must be called with preempt off: 2109 */ 2110 void mcheck_cpu_init(struct cpuinfo_x86 *c) 2111 { 2112 if (mca_cfg.disabled) 2113 return; 2114 2115 if (__mcheck_cpu_ancient_init(c)) 2116 return; 2117 2118 if (!mce_available(c)) 2119 return; 2120 2121 __mcheck_cpu_cap_init(); 2122 2123 if (__mcheck_cpu_apply_quirks(c) < 0) { 2124 mca_cfg.disabled = 1; 2125 return; 2126 } 2127 2128 if (mce_gen_pool_init()) { 2129 mca_cfg.disabled = 1; 2130 pr_emerg("Couldn't allocate MCE records pool!\n"); 2131 return; 2132 } 2133 2134 mca_cfg.initialized = 1; 2135 2136 __mcheck_cpu_init_early(c); 2137 __mcheck_cpu_init_generic(); 2138 __mcheck_cpu_init_vendor(c); 2139 __mcheck_cpu_init_clear_banks(); 2140 __mcheck_cpu_check_banks(); 2141 __mcheck_cpu_setup_timer(); 2142 } 2143 2144 /* 2145 * Called for each booted CPU to clear some machine checks opt-ins 2146 */ 2147 void mcheck_cpu_clear(struct cpuinfo_x86 *c) 2148 { 2149 if (mca_cfg.disabled) 2150 return; 2151 2152 if (!mce_available(c)) 2153 return; 2154 2155 /* 2156 * Possibly to clear general settings generic to x86 2157 * __mcheck_cpu_clear_generic(c); 2158 */ 2159 __mcheck_cpu_clear_vendor(c); 2160 2161 } 2162 2163 static void __mce_disable_bank(void *arg) 2164 { 2165 int bank = *((int *)arg); 2166 __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); 2167 cmci_disable_bank(bank); 2168 } 2169 2170 void mce_disable_bank(int bank) 2171 { 2172 if (bank >= this_cpu_read(mce_num_banks)) { 2173 pr_warn(FW_BUG 2174 "Ignoring request to disable invalid MCA bank %d.\n", 2175 bank); 2176 return; 2177 } 2178 set_bit(bank, mce_banks_ce_disabled); 2179 on_each_cpu(__mce_disable_bank, &bank, 1); 2180 } 2181 2182 /* 2183 * mce=off Disables machine check 2184 * mce=no_cmci Disables CMCI 2185 * mce=no_lmce Disables LMCE 2186 * mce=dont_log_ce Clears corrected events silently, no log created for CEs. 2187 * mce=print_all Print all machine check logs to console 2188 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared. 2189 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above) 2190 * monarchtimeout is how long to wait for other CPUs on machine 2191 * check, or 0 to not wait 2192 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h 2193 and older. 2194 * mce=nobootlog Don't log MCEs from before booting. 2195 * mce=bios_cmci_threshold Don't program the CMCI threshold 2196 * mce=recovery force enable copy_mc_fragile() 2197 */ 2198 static int __init mcheck_enable(char *str) 2199 { 2200 struct mca_config *cfg = &mca_cfg; 2201 2202 if (*str == 0) { 2203 enable_p5_mce(); 2204 return 1; 2205 } 2206 if (*str == '=') 2207 str++; 2208 if (!strcmp(str, "off")) 2209 cfg->disabled = 1; 2210 else if (!strcmp(str, "no_cmci")) 2211 cfg->cmci_disabled = true; 2212 else if (!strcmp(str, "no_lmce")) 2213 cfg->lmce_disabled = 1; 2214 else if (!strcmp(str, "dont_log_ce")) 2215 cfg->dont_log_ce = true; 2216 else if (!strcmp(str, "print_all")) 2217 cfg->print_all = true; 2218 else if (!strcmp(str, "ignore_ce")) 2219 cfg->ignore_ce = true; 2220 else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) 2221 cfg->bootlog = (str[0] == 'b'); 2222 else if (!strcmp(str, "bios_cmci_threshold")) 2223 cfg->bios_cmci_threshold = 1; 2224 else if (!strcmp(str, "recovery")) 2225 cfg->recovery = 1; 2226 else if (isdigit(str[0])) { 2227 if (get_option(&str, &cfg->tolerant) == 2) 2228 get_option(&str, &(cfg->monarch_timeout)); 2229 } else { 2230 pr_info("mce argument %s ignored. Please use /sys\n", str); 2231 return 0; 2232 } 2233 return 1; 2234 } 2235 __setup("mce", mcheck_enable); 2236 2237 int __init mcheck_init(void) 2238 { 2239 mce_register_decode_chain(&early_nb); 2240 mce_register_decode_chain(&mce_uc_nb); 2241 mce_register_decode_chain(&mce_default_nb); 2242 2243 INIT_WORK(&mce_work, mce_gen_pool_process); 2244 init_irq_work(&mce_irq_work, mce_irq_work_cb); 2245 2246 return 0; 2247 } 2248 2249 /* 2250 * mce_syscore: PM support 2251 */ 2252 2253 /* 2254 * Disable machine checks on suspend and shutdown. We can't really handle 2255 * them later. 2256 */ 2257 static void mce_disable_error_reporting(void) 2258 { 2259 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 2260 int i; 2261 2262 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 2263 struct mce_bank *b = &mce_banks[i]; 2264 2265 if (b->init) 2266 wrmsrl(mca_msr_reg(i, MCA_CTL), 0); 2267 } 2268 return; 2269 } 2270 2271 static void vendor_disable_error_reporting(void) 2272 { 2273 /* 2274 * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these 2275 * MSRs are socket-wide. Disabling them for just a single offlined CPU 2276 * is bad, since it will inhibit reporting for all shared resources on 2277 * the socket like the last level cache (LLC), the integrated memory 2278 * controller (iMC), etc. 2279 */ 2280 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || 2281 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || 2282 boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 2283 boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) 2284 return; 2285 2286 mce_disable_error_reporting(); 2287 } 2288 2289 static int mce_syscore_suspend(void) 2290 { 2291 vendor_disable_error_reporting(); 2292 return 0; 2293 } 2294 2295 static void mce_syscore_shutdown(void) 2296 { 2297 vendor_disable_error_reporting(); 2298 } 2299 2300 /* 2301 * On resume clear all MCE state. Don't want to see leftovers from the BIOS. 2302 * Only one CPU is active at this time, the others get re-added later using 2303 * CPU hotplug: 2304 */ 2305 static void mce_syscore_resume(void) 2306 { 2307 __mcheck_cpu_init_generic(); 2308 __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); 2309 __mcheck_cpu_init_clear_banks(); 2310 } 2311 2312 static struct syscore_ops mce_syscore_ops = { 2313 .suspend = mce_syscore_suspend, 2314 .shutdown = mce_syscore_shutdown, 2315 .resume = mce_syscore_resume, 2316 }; 2317 2318 /* 2319 * mce_device: Sysfs support 2320 */ 2321 2322 static void mce_cpu_restart(void *data) 2323 { 2324 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2325 return; 2326 __mcheck_cpu_init_generic(); 2327 __mcheck_cpu_init_clear_banks(); 2328 __mcheck_cpu_init_timer(); 2329 } 2330 2331 /* Reinit MCEs after user configuration changes */ 2332 static void mce_restart(void) 2333 { 2334 mce_timer_delete_all(); 2335 on_each_cpu(mce_cpu_restart, NULL, 1); 2336 } 2337 2338 /* Toggle features for corrected errors */ 2339 static void mce_disable_cmci(void *data) 2340 { 2341 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2342 return; 2343 cmci_clear(); 2344 } 2345 2346 static void mce_enable_ce(void *all) 2347 { 2348 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2349 return; 2350 cmci_reenable(); 2351 cmci_recheck(); 2352 if (all) 2353 __mcheck_cpu_init_timer(); 2354 } 2355 2356 static struct bus_type mce_subsys = { 2357 .name = "machinecheck", 2358 .dev_name = "machinecheck", 2359 }; 2360 2361 DEFINE_PER_CPU(struct device *, mce_device); 2362 2363 static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr) 2364 { 2365 return container_of(attr, struct mce_bank_dev, attr); 2366 } 2367 2368 static ssize_t show_bank(struct device *s, struct device_attribute *attr, 2369 char *buf) 2370 { 2371 u8 bank = attr_to_bank(attr)->bank; 2372 struct mce_bank *b; 2373 2374 if (bank >= per_cpu(mce_num_banks, s->id)) 2375 return -EINVAL; 2376 2377 b = &per_cpu(mce_banks_array, s->id)[bank]; 2378 2379 if (!b->init) 2380 return -ENODEV; 2381 2382 return sprintf(buf, "%llx\n", b->ctl); 2383 } 2384 2385 static ssize_t set_bank(struct device *s, struct device_attribute *attr, 2386 const char *buf, size_t size) 2387 { 2388 u8 bank = attr_to_bank(attr)->bank; 2389 struct mce_bank *b; 2390 u64 new; 2391 2392 if (kstrtou64(buf, 0, &new) < 0) 2393 return -EINVAL; 2394 2395 if (bank >= per_cpu(mce_num_banks, s->id)) 2396 return -EINVAL; 2397 2398 b = &per_cpu(mce_banks_array, s->id)[bank]; 2399 2400 if (!b->init) 2401 return -ENODEV; 2402 2403 b->ctl = new; 2404 mce_restart(); 2405 2406 return size; 2407 } 2408 2409 static ssize_t set_ignore_ce(struct device *s, 2410 struct device_attribute *attr, 2411 const char *buf, size_t size) 2412 { 2413 u64 new; 2414 2415 if (kstrtou64(buf, 0, &new) < 0) 2416 return -EINVAL; 2417 2418 mutex_lock(&mce_sysfs_mutex); 2419 if (mca_cfg.ignore_ce ^ !!new) { 2420 if (new) { 2421 /* disable ce features */ 2422 mce_timer_delete_all(); 2423 on_each_cpu(mce_disable_cmci, NULL, 1); 2424 mca_cfg.ignore_ce = true; 2425 } else { 2426 /* enable ce features */ 2427 mca_cfg.ignore_ce = false; 2428 on_each_cpu(mce_enable_ce, (void *)1, 1); 2429 } 2430 } 2431 mutex_unlock(&mce_sysfs_mutex); 2432 2433 return size; 2434 } 2435 2436 static ssize_t set_cmci_disabled(struct device *s, 2437 struct device_attribute *attr, 2438 const char *buf, size_t size) 2439 { 2440 u64 new; 2441 2442 if (kstrtou64(buf, 0, &new) < 0) 2443 return -EINVAL; 2444 2445 mutex_lock(&mce_sysfs_mutex); 2446 if (mca_cfg.cmci_disabled ^ !!new) { 2447 if (new) { 2448 /* disable cmci */ 2449 on_each_cpu(mce_disable_cmci, NULL, 1); 2450 mca_cfg.cmci_disabled = true; 2451 } else { 2452 /* enable cmci */ 2453 mca_cfg.cmci_disabled = false; 2454 on_each_cpu(mce_enable_ce, NULL, 1); 2455 } 2456 } 2457 mutex_unlock(&mce_sysfs_mutex); 2458 2459 return size; 2460 } 2461 2462 static ssize_t store_int_with_restart(struct device *s, 2463 struct device_attribute *attr, 2464 const char *buf, size_t size) 2465 { 2466 unsigned long old_check_interval = check_interval; 2467 ssize_t ret = device_store_ulong(s, attr, buf, size); 2468 2469 if (check_interval == old_check_interval) 2470 return ret; 2471 2472 mutex_lock(&mce_sysfs_mutex); 2473 mce_restart(); 2474 mutex_unlock(&mce_sysfs_mutex); 2475 2476 return ret; 2477 } 2478 2479 static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant); 2480 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout); 2481 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce); 2482 static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all); 2483 2484 static struct dev_ext_attribute dev_attr_check_interval = { 2485 __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), 2486 &check_interval 2487 }; 2488 2489 static struct dev_ext_attribute dev_attr_ignore_ce = { 2490 __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce), 2491 &mca_cfg.ignore_ce 2492 }; 2493 2494 static struct dev_ext_attribute dev_attr_cmci_disabled = { 2495 __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled), 2496 &mca_cfg.cmci_disabled 2497 }; 2498 2499 static struct device_attribute *mce_device_attrs[] = { 2500 &dev_attr_tolerant.attr, 2501 &dev_attr_check_interval.attr, 2502 #ifdef CONFIG_X86_MCELOG_LEGACY 2503 &dev_attr_trigger, 2504 #endif 2505 &dev_attr_monarch_timeout.attr, 2506 &dev_attr_dont_log_ce.attr, 2507 &dev_attr_print_all.attr, 2508 &dev_attr_ignore_ce.attr, 2509 &dev_attr_cmci_disabled.attr, 2510 NULL 2511 }; 2512 2513 static cpumask_var_t mce_device_initialized; 2514 2515 static void mce_device_release(struct device *dev) 2516 { 2517 kfree(dev); 2518 } 2519 2520 /* Per CPU device init. All of the CPUs still share the same bank device: */ 2521 static int mce_device_create(unsigned int cpu) 2522 { 2523 struct device *dev; 2524 int err; 2525 int i, j; 2526 2527 if (!mce_available(&boot_cpu_data)) 2528 return -EIO; 2529 2530 dev = per_cpu(mce_device, cpu); 2531 if (dev) 2532 return 0; 2533 2534 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2535 if (!dev) 2536 return -ENOMEM; 2537 dev->id = cpu; 2538 dev->bus = &mce_subsys; 2539 dev->release = &mce_device_release; 2540 2541 err = device_register(dev); 2542 if (err) { 2543 put_device(dev); 2544 return err; 2545 } 2546 2547 for (i = 0; mce_device_attrs[i]; i++) { 2548 err = device_create_file(dev, mce_device_attrs[i]); 2549 if (err) 2550 goto error; 2551 } 2552 for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) { 2553 err = device_create_file(dev, &mce_bank_devs[j].attr); 2554 if (err) 2555 goto error2; 2556 } 2557 cpumask_set_cpu(cpu, mce_device_initialized); 2558 per_cpu(mce_device, cpu) = dev; 2559 2560 return 0; 2561 error2: 2562 while (--j >= 0) 2563 device_remove_file(dev, &mce_bank_devs[j].attr); 2564 error: 2565 while (--i >= 0) 2566 device_remove_file(dev, mce_device_attrs[i]); 2567 2568 device_unregister(dev); 2569 2570 return err; 2571 } 2572 2573 static void mce_device_remove(unsigned int cpu) 2574 { 2575 struct device *dev = per_cpu(mce_device, cpu); 2576 int i; 2577 2578 if (!cpumask_test_cpu(cpu, mce_device_initialized)) 2579 return; 2580 2581 for (i = 0; mce_device_attrs[i]; i++) 2582 device_remove_file(dev, mce_device_attrs[i]); 2583 2584 for (i = 0; i < per_cpu(mce_num_banks, cpu); i++) 2585 device_remove_file(dev, &mce_bank_devs[i].attr); 2586 2587 device_unregister(dev); 2588 cpumask_clear_cpu(cpu, mce_device_initialized); 2589 per_cpu(mce_device, cpu) = NULL; 2590 } 2591 2592 /* Make sure there are no machine checks on offlined CPUs. */ 2593 static void mce_disable_cpu(void) 2594 { 2595 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2596 return; 2597 2598 if (!cpuhp_tasks_frozen) 2599 cmci_clear(); 2600 2601 vendor_disable_error_reporting(); 2602 } 2603 2604 static void mce_reenable_cpu(void) 2605 { 2606 struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); 2607 int i; 2608 2609 if (!mce_available(raw_cpu_ptr(&cpu_info))) 2610 return; 2611 2612 if (!cpuhp_tasks_frozen) 2613 cmci_reenable(); 2614 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { 2615 struct mce_bank *b = &mce_banks[i]; 2616 2617 if (b->init) 2618 wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl); 2619 } 2620 } 2621 2622 static int mce_cpu_dead(unsigned int cpu) 2623 { 2624 mce_intel_hcpu_update(cpu); 2625 2626 /* intentionally ignoring frozen here */ 2627 if (!cpuhp_tasks_frozen) 2628 cmci_rediscover(); 2629 return 0; 2630 } 2631 2632 static int mce_cpu_online(unsigned int cpu) 2633 { 2634 struct timer_list *t = this_cpu_ptr(&mce_timer); 2635 int ret; 2636 2637 mce_device_create(cpu); 2638 2639 ret = mce_threshold_create_device(cpu); 2640 if (ret) { 2641 mce_device_remove(cpu); 2642 return ret; 2643 } 2644 mce_reenable_cpu(); 2645 mce_start_timer(t); 2646 return 0; 2647 } 2648 2649 static int mce_cpu_pre_down(unsigned int cpu) 2650 { 2651 struct timer_list *t = this_cpu_ptr(&mce_timer); 2652 2653 mce_disable_cpu(); 2654 del_timer_sync(t); 2655 mce_threshold_remove_device(cpu); 2656 mce_device_remove(cpu); 2657 return 0; 2658 } 2659 2660 static __init void mce_init_banks(void) 2661 { 2662 int i; 2663 2664 for (i = 0; i < MAX_NR_BANKS; i++) { 2665 struct mce_bank_dev *b = &mce_bank_devs[i]; 2666 struct device_attribute *a = &b->attr; 2667 2668 b->bank = i; 2669 2670 sysfs_attr_init(&a->attr); 2671 a->attr.name = b->attrname; 2672 snprintf(b->attrname, ATTR_LEN, "bank%d", i); 2673 2674 a->attr.mode = 0644; 2675 a->show = show_bank; 2676 a->store = set_bank; 2677 } 2678 } 2679 2680 /* 2681 * When running on XEN, this initcall is ordered against the XEN mcelog 2682 * initcall: 2683 * 2684 * device_initcall(xen_late_init_mcelog); 2685 * device_initcall_sync(mcheck_init_device); 2686 */ 2687 static __init int mcheck_init_device(void) 2688 { 2689 int err; 2690 2691 /* 2692 * Check if we have a spare virtual bit. This will only become 2693 * a problem if/when we move beyond 5-level page tables. 2694 */ 2695 MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63); 2696 2697 if (!mce_available(&boot_cpu_data)) { 2698 err = -EIO; 2699 goto err_out; 2700 } 2701 2702 if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { 2703 err = -ENOMEM; 2704 goto err_out; 2705 } 2706 2707 mce_init_banks(); 2708 2709 err = subsys_system_register(&mce_subsys, NULL); 2710 if (err) 2711 goto err_out_mem; 2712 2713 err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL, 2714 mce_cpu_dead); 2715 if (err) 2716 goto err_out_mem; 2717 2718 /* 2719 * Invokes mce_cpu_online() on all CPUs which are online when 2720 * the state is installed. 2721 */ 2722 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online", 2723 mce_cpu_online, mce_cpu_pre_down); 2724 if (err < 0) 2725 goto err_out_online; 2726 2727 register_syscore_ops(&mce_syscore_ops); 2728 2729 return 0; 2730 2731 err_out_online: 2732 cpuhp_remove_state(CPUHP_X86_MCE_DEAD); 2733 2734 err_out_mem: 2735 free_cpumask_var(mce_device_initialized); 2736 2737 err_out: 2738 pr_err("Unable to init MCE device (rc: %d)\n", err); 2739 2740 return err; 2741 } 2742 device_initcall_sync(mcheck_init_device); 2743 2744 /* 2745 * Old style boot options parsing. Only for compatibility. 2746 */ 2747 static int __init mcheck_disable(char *str) 2748 { 2749 mca_cfg.disabled = 1; 2750 return 1; 2751 } 2752 __setup("nomce", mcheck_disable); 2753 2754 #ifdef CONFIG_DEBUG_FS 2755 struct dentry *mce_get_debugfs_dir(void) 2756 { 2757 static struct dentry *dmce; 2758 2759 if (!dmce) 2760 dmce = debugfs_create_dir("mce", NULL); 2761 2762 return dmce; 2763 } 2764 2765 static void mce_reset(void) 2766 { 2767 atomic_set(&mce_fake_panicked, 0); 2768 atomic_set(&mce_executing, 0); 2769 atomic_set(&mce_callin, 0); 2770 atomic_set(&global_nwo, 0); 2771 cpumask_setall(&mce_missing_cpus); 2772 } 2773 2774 static int fake_panic_get(void *data, u64 *val) 2775 { 2776 *val = fake_panic; 2777 return 0; 2778 } 2779 2780 static int fake_panic_set(void *data, u64 val) 2781 { 2782 mce_reset(); 2783 fake_panic = val; 2784 return 0; 2785 } 2786 2787 DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set, 2788 "%llu\n"); 2789 2790 static void __init mcheck_debugfs_init(void) 2791 { 2792 struct dentry *dmce; 2793 2794 dmce = mce_get_debugfs_dir(); 2795 debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL, 2796 &fake_panic_fops); 2797 } 2798 #else 2799 static void __init mcheck_debugfs_init(void) { } 2800 #endif 2801 2802 static int __init mcheck_late_init(void) 2803 { 2804 if (mca_cfg.recovery) 2805 enable_copy_mc_fragile(); 2806 2807 mcheck_debugfs_init(); 2808 2809 /* 2810 * Flush out everything that has been logged during early boot, now that 2811 * everything has been initialized (workqueues, decoders, ...). 2812 */ 2813 mce_schedule_work(); 2814 2815 return 0; 2816 } 2817 late_initcall(mcheck_late_init); 2818