1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 5 * Copyright (C) 2011 Don Zickus Red Hat, Inc. 6 * 7 * Pentium III FXSR, SSE support 8 * Gareth Hughes <gareth@valinux.com>, May 2000 9 */ 10 11 /* 12 * Handle hardware traps and faults. 13 */ 14 #include <linux/spinlock.h> 15 #include <linux/kprobes.h> 16 #include <linux/kdebug.h> 17 #include <linux/sched/debug.h> 18 #include <linux/nmi.h> 19 #include <linux/debugfs.h> 20 #include <linux/delay.h> 21 #include <linux/hardirq.h> 22 #include <linux/ratelimit.h> 23 #include <linux/slab.h> 24 #include <linux/export.h> 25 #include <linux/atomic.h> 26 #include <linux/sched/clock.h> 27 28 #include <asm/cpu_entry_area.h> 29 #include <asm/traps.h> 30 #include <asm/mach_traps.h> 31 #include <asm/nmi.h> 32 #include <asm/x86_init.h> 33 #include <asm/reboot.h> 34 #include <asm/cache.h> 35 #include <asm/nospec-branch.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/nmi.h> 39 40 struct nmi_desc { 41 raw_spinlock_t lock; 42 struct list_head head; 43 }; 44 45 static struct nmi_desc nmi_desc[NMI_MAX] = 46 { 47 { 48 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), 49 .head = LIST_HEAD_INIT(nmi_desc[0].head), 50 }, 51 { 52 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), 53 .head = LIST_HEAD_INIT(nmi_desc[1].head), 54 }, 55 { 56 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock), 57 .head = LIST_HEAD_INIT(nmi_desc[2].head), 58 }, 59 { 60 .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock), 61 .head = LIST_HEAD_INIT(nmi_desc[3].head), 62 }, 63 64 }; 65 66 struct nmi_stats { 67 unsigned int normal; 68 unsigned int unknown; 69 unsigned int external; 70 unsigned int swallow; 71 }; 72 73 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); 74 75 static int ignore_nmis __read_mostly; 76 77 int unknown_nmi_panic; 78 /* 79 * Prevent NMI reason port (0x61) being accessed simultaneously, can 80 * only be used in NMI handler. 81 */ 82 static DEFINE_RAW_SPINLOCK(nmi_reason_lock); 83 84 static int __init setup_unknown_nmi_panic(char *str) 85 { 86 unknown_nmi_panic = 1; 87 return 1; 88 } 89 __setup("unknown_nmi_panic", setup_unknown_nmi_panic); 90 91 #define nmi_to_desc(type) (&nmi_desc[type]) 92 93 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; 94 95 static int __init nmi_warning_debugfs(void) 96 { 97 debugfs_create_u64("nmi_longest_ns", 0644, 98 arch_debugfs_dir, &nmi_longest_ns); 99 return 0; 100 } 101 fs_initcall(nmi_warning_debugfs); 102 103 static void nmi_check_duration(struct nmiaction *action, u64 duration) 104 { 105 u64 whole_msecs = READ_ONCE(action->max_duration); 106 int remainder_ns, decimal_msecs; 107 108 if (duration < nmi_longest_ns || duration < action->max_duration) 109 return; 110 111 action->max_duration = duration; 112 113 remainder_ns = do_div(whole_msecs, (1000 * 1000)); 114 decimal_msecs = remainder_ns / 1000; 115 116 printk_ratelimited(KERN_INFO 117 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", 118 action->handler, whole_msecs, decimal_msecs); 119 } 120 121 static int nmi_handle(unsigned int type, struct pt_regs *regs) 122 { 123 struct nmi_desc *desc = nmi_to_desc(type); 124 struct nmiaction *a; 125 int handled=0; 126 127 rcu_read_lock(); 128 129 /* 130 * NMIs are edge-triggered, which means if you have enough 131 * of them concurrently, you can lose some because only one 132 * can be latched at any given time. Walk the whole list 133 * to handle those situations. 134 */ 135 list_for_each_entry_rcu(a, &desc->head, list) { 136 int thishandled; 137 u64 delta; 138 139 delta = sched_clock(); 140 thishandled = a->handler(type, regs); 141 handled += thishandled; 142 delta = sched_clock() - delta; 143 trace_nmi_handler(a->handler, (int)delta, thishandled); 144 145 nmi_check_duration(a, delta); 146 } 147 148 rcu_read_unlock(); 149 150 /* return total number of NMI events handled */ 151 return handled; 152 } 153 NOKPROBE_SYMBOL(nmi_handle); 154 155 int __register_nmi_handler(unsigned int type, struct nmiaction *action) 156 { 157 struct nmi_desc *desc = nmi_to_desc(type); 158 unsigned long flags; 159 160 if (!action->handler) 161 return -EINVAL; 162 163 raw_spin_lock_irqsave(&desc->lock, flags); 164 165 /* 166 * Indicate if there are multiple registrations on the 167 * internal NMI handler call chains (SERR and IO_CHECK). 168 */ 169 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); 170 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); 171 172 /* 173 * some handlers need to be executed first otherwise a fake 174 * event confuses some handlers (kdump uses this flag) 175 */ 176 if (action->flags & NMI_FLAG_FIRST) 177 list_add_rcu(&action->list, &desc->head); 178 else 179 list_add_tail_rcu(&action->list, &desc->head); 180 181 raw_spin_unlock_irqrestore(&desc->lock, flags); 182 return 0; 183 } 184 EXPORT_SYMBOL(__register_nmi_handler); 185 186 void unregister_nmi_handler(unsigned int type, const char *name) 187 { 188 struct nmi_desc *desc = nmi_to_desc(type); 189 struct nmiaction *n; 190 unsigned long flags; 191 192 raw_spin_lock_irqsave(&desc->lock, flags); 193 194 list_for_each_entry_rcu(n, &desc->head, list) { 195 /* 196 * the name passed in to describe the nmi handler 197 * is used as the lookup key 198 */ 199 if (!strcmp(n->name, name)) { 200 WARN(in_nmi(), 201 "Trying to free NMI (%s) from NMI context!\n", n->name); 202 list_del_rcu(&n->list); 203 break; 204 } 205 } 206 207 raw_spin_unlock_irqrestore(&desc->lock, flags); 208 synchronize_rcu(); 209 } 210 EXPORT_SYMBOL_GPL(unregister_nmi_handler); 211 212 static void 213 pci_serr_error(unsigned char reason, struct pt_regs *regs) 214 { 215 /* check to see if anyone registered against these types of errors */ 216 if (nmi_handle(NMI_SERR, regs)) 217 return; 218 219 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", 220 reason, smp_processor_id()); 221 222 if (panic_on_unrecovered_nmi) 223 nmi_panic(regs, "NMI: Not continuing"); 224 225 pr_emerg("Dazed and confused, but trying to continue\n"); 226 227 /* Clear and disable the PCI SERR error line. */ 228 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; 229 outb(reason, NMI_REASON_PORT); 230 } 231 NOKPROBE_SYMBOL(pci_serr_error); 232 233 static void 234 io_check_error(unsigned char reason, struct pt_regs *regs) 235 { 236 unsigned long i; 237 238 /* check to see if anyone registered against these types of errors */ 239 if (nmi_handle(NMI_IO_CHECK, regs)) 240 return; 241 242 pr_emerg( 243 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", 244 reason, smp_processor_id()); 245 show_regs(regs); 246 247 if (panic_on_io_nmi) { 248 nmi_panic(regs, "NMI IOCK error: Not continuing"); 249 250 /* 251 * If we end up here, it means we have received an NMI while 252 * processing panic(). Simply return without delaying and 253 * re-enabling NMIs. 254 */ 255 return; 256 } 257 258 /* Re-enable the IOCK line, wait for a few seconds */ 259 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; 260 outb(reason, NMI_REASON_PORT); 261 262 i = 20000; 263 while (--i) { 264 touch_nmi_watchdog(); 265 udelay(100); 266 } 267 268 reason &= ~NMI_REASON_CLEAR_IOCHK; 269 outb(reason, NMI_REASON_PORT); 270 } 271 NOKPROBE_SYMBOL(io_check_error); 272 273 static void 274 unknown_nmi_error(unsigned char reason, struct pt_regs *regs) 275 { 276 int handled; 277 278 /* 279 * Use 'false' as back-to-back NMIs are dealt with one level up. 280 * Of course this makes having multiple 'unknown' handlers useless 281 * as only the first one is ever run (unless it can actually determine 282 * if it caused the NMI) 283 */ 284 handled = nmi_handle(NMI_UNKNOWN, regs); 285 if (handled) { 286 __this_cpu_add(nmi_stats.unknown, handled); 287 return; 288 } 289 290 __this_cpu_add(nmi_stats.unknown, 1); 291 292 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 293 reason, smp_processor_id()); 294 295 pr_emerg("Do you have a strange power saving mode enabled?\n"); 296 if (unknown_nmi_panic || panic_on_unrecovered_nmi) 297 nmi_panic(regs, "NMI: Not continuing"); 298 299 pr_emerg("Dazed and confused, but trying to continue\n"); 300 } 301 NOKPROBE_SYMBOL(unknown_nmi_error); 302 303 static DEFINE_PER_CPU(bool, swallow_nmi); 304 static DEFINE_PER_CPU(unsigned long, last_nmi_rip); 305 306 static void default_do_nmi(struct pt_regs *regs) 307 { 308 unsigned char reason = 0; 309 int handled; 310 bool b2b = false; 311 312 /* 313 * CPU-specific NMI must be processed before non-CPU-specific 314 * NMI, otherwise we may lose it, because the CPU-specific 315 * NMI can not be detected/processed on other CPUs. 316 */ 317 318 /* 319 * Back-to-back NMIs are interesting because they can either 320 * be two NMI or more than two NMIs (any thing over two is dropped 321 * due to NMI being edge-triggered). If this is the second half 322 * of the back-to-back NMI, assume we dropped things and process 323 * more handlers. Otherwise reset the 'swallow' NMI behaviour 324 */ 325 if (regs->ip == __this_cpu_read(last_nmi_rip)) 326 b2b = true; 327 else 328 __this_cpu_write(swallow_nmi, false); 329 330 __this_cpu_write(last_nmi_rip, regs->ip); 331 332 handled = nmi_handle(NMI_LOCAL, regs); 333 __this_cpu_add(nmi_stats.normal, handled); 334 if (handled) { 335 /* 336 * There are cases when a NMI handler handles multiple 337 * events in the current NMI. One of these events may 338 * be queued for in the next NMI. Because the event is 339 * already handled, the next NMI will result in an unknown 340 * NMI. Instead lets flag this for a potential NMI to 341 * swallow. 342 */ 343 if (handled > 1) 344 __this_cpu_write(swallow_nmi, true); 345 return; 346 } 347 348 /* 349 * Non-CPU-specific NMI: NMI sources can be processed on any CPU. 350 * 351 * Another CPU may be processing panic routines while holding 352 * nmi_reason_lock. Check if the CPU issued the IPI for crash dumping, 353 * and if so, call its callback directly. If there is no CPU preparing 354 * crash dump, we simply loop here. 355 */ 356 while (!raw_spin_trylock(&nmi_reason_lock)) { 357 run_crash_ipi_callback(regs); 358 cpu_relax(); 359 } 360 361 reason = x86_platform.get_nmi_reason(); 362 363 if (reason & NMI_REASON_MASK) { 364 if (reason & NMI_REASON_SERR) 365 pci_serr_error(reason, regs); 366 else if (reason & NMI_REASON_IOCHK) 367 io_check_error(reason, regs); 368 #ifdef CONFIG_X86_32 369 /* 370 * Reassert NMI in case it became active 371 * meanwhile as it's edge-triggered: 372 */ 373 reassert_nmi(); 374 #endif 375 __this_cpu_add(nmi_stats.external, 1); 376 raw_spin_unlock(&nmi_reason_lock); 377 return; 378 } 379 raw_spin_unlock(&nmi_reason_lock); 380 381 /* 382 * Only one NMI can be latched at a time. To handle 383 * this we may process multiple nmi handlers at once to 384 * cover the case where an NMI is dropped. The downside 385 * to this approach is we may process an NMI prematurely, 386 * while its real NMI is sitting latched. This will cause 387 * an unknown NMI on the next run of the NMI processing. 388 * 389 * We tried to flag that condition above, by setting the 390 * swallow_nmi flag when we process more than one event. 391 * This condition is also only present on the second half 392 * of a back-to-back NMI, so we flag that condition too. 393 * 394 * If both are true, we assume we already processed this 395 * NMI previously and we swallow it. Otherwise we reset 396 * the logic. 397 * 398 * There are scenarios where we may accidentally swallow 399 * a 'real' unknown NMI. For example, while processing 400 * a perf NMI another perf NMI comes in along with a 401 * 'real' unknown NMI. These two NMIs get combined into 402 * one (as described above). When the next NMI gets 403 * processed, it will be flagged by perf as handled, but 404 * no one will know that there was a 'real' unknown NMI sent 405 * also. As a result it gets swallowed. Or if the first 406 * perf NMI returns two events handled then the second 407 * NMI will get eaten by the logic below, again losing a 408 * 'real' unknown NMI. But this is the best we can do 409 * for now. 410 */ 411 if (b2b && __this_cpu_read(swallow_nmi)) 412 __this_cpu_add(nmi_stats.swallow, 1); 413 else 414 unknown_nmi_error(reason, regs); 415 } 416 NOKPROBE_SYMBOL(default_do_nmi); 417 418 /* 419 * NMIs can page fault or hit breakpoints which will cause it to lose 420 * its NMI context with the CPU when the breakpoint or page fault does an IRET. 421 * 422 * As a result, NMIs can nest if NMIs get unmasked due an IRET during 423 * NMI processing. On x86_64, the asm glue protects us from nested NMIs 424 * if the outer NMI came from kernel mode, but we can still nest if the 425 * outer NMI came from user mode. 426 * 427 * To handle these nested NMIs, we have three states: 428 * 429 * 1) not running 430 * 2) executing 431 * 3) latched 432 * 433 * When no NMI is in progress, it is in the "not running" state. 434 * When an NMI comes in, it goes into the "executing" state. 435 * Normally, if another NMI is triggered, it does not interrupt 436 * the running NMI and the HW will simply latch it so that when 437 * the first NMI finishes, it will restart the second NMI. 438 * (Note, the latch is binary, thus multiple NMIs triggering, 439 * when one is running, are ignored. Only one NMI is restarted.) 440 * 441 * If an NMI executes an iret, another NMI can preempt it. We do not 442 * want to allow this new NMI to run, but we want to execute it when the 443 * first one finishes. We set the state to "latched", and the exit of 444 * the first NMI will perform a dec_return, if the result is zero 445 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the 446 * dec_return would have set the state to NMI_EXECUTING (what we want it 447 * to be when we are running). In this case, we simply jump back to 448 * rerun the NMI handler again, and restart the 'latched' NMI. 449 * 450 * No trap (breakpoint or page fault) should be hit before nmi_restart, 451 * thus there is no race between the first check of state for NOT_RUNNING 452 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs 453 * at this point. 454 * 455 * In case the NMI takes a page fault, we need to save off the CR2 456 * because the NMI could have preempted another page fault and corrupt 457 * the CR2 that is about to be read. As nested NMIs must be restarted 458 * and they can not take breakpoints or page faults, the update of the 459 * CR2 must be done before converting the nmi state back to NOT_RUNNING. 460 * Otherwise, there would be a race of another nested NMI coming in 461 * after setting state to NOT_RUNNING but before updating the nmi_cr2. 462 */ 463 enum nmi_states { 464 NMI_NOT_RUNNING = 0, 465 NMI_EXECUTING, 466 NMI_LATCHED, 467 }; 468 static DEFINE_PER_CPU(enum nmi_states, nmi_state); 469 static DEFINE_PER_CPU(unsigned long, nmi_cr2); 470 471 #ifdef CONFIG_X86_64 472 /* 473 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without 474 * some care, the inner breakpoint will clobber the outer breakpoint's 475 * stack. 476 * 477 * If a breakpoint is being processed, and the debug stack is being 478 * used, if an NMI comes in and also hits a breakpoint, the stack 479 * pointer will be set to the same fixed address as the breakpoint that 480 * was interrupted, causing that stack to be corrupted. To handle this 481 * case, check if the stack that was interrupted is the debug stack, and 482 * if so, change the IDT so that new breakpoints will use the current 483 * stack and not switch to the fixed address. On return of the NMI, 484 * switch back to the original IDT. 485 */ 486 static DEFINE_PER_CPU(int, update_debug_stack); 487 488 static bool notrace is_debug_stack(unsigned long addr) 489 { 490 struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks); 491 unsigned long top = CEA_ESTACK_TOP(cs, DB); 492 unsigned long bot = CEA_ESTACK_BOT(cs, DB1); 493 494 if (__this_cpu_read(debug_stack_usage)) 495 return true; 496 /* 497 * Note, this covers the guard page between DB and DB1 as well to 498 * avoid two checks. But by all means @addr can never point into 499 * the guard page. 500 */ 501 return addr >= bot && addr < top; 502 } 503 NOKPROBE_SYMBOL(is_debug_stack); 504 #endif 505 506 dotraplinkage notrace void 507 do_nmi(struct pt_regs *regs, long error_code) 508 { 509 if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id())) 510 return; 511 512 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { 513 this_cpu_write(nmi_state, NMI_LATCHED); 514 return; 515 } 516 this_cpu_write(nmi_state, NMI_EXECUTING); 517 this_cpu_write(nmi_cr2, read_cr2()); 518 nmi_restart: 519 520 #ifdef CONFIG_X86_64 521 /* 522 * If we interrupted a breakpoint, it is possible that 523 * the nmi handler will have breakpoints too. We need to 524 * change the IDT such that breakpoints that happen here 525 * continue to use the NMI stack. 526 */ 527 if (unlikely(is_debug_stack(regs->sp))) { 528 debug_stack_set_zero(); 529 this_cpu_write(update_debug_stack, 1); 530 } 531 #endif 532 533 nmi_enter(); 534 535 inc_irq_stat(__nmi_count); 536 537 if (!ignore_nmis) 538 default_do_nmi(regs); 539 540 nmi_exit(); 541 542 #ifdef CONFIG_X86_64 543 if (unlikely(this_cpu_read(update_debug_stack))) { 544 debug_stack_reset(); 545 this_cpu_write(update_debug_stack, 0); 546 } 547 #endif 548 549 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) 550 write_cr2(this_cpu_read(nmi_cr2)); 551 if (this_cpu_dec_return(nmi_state)) 552 goto nmi_restart; 553 554 if (user_mode(regs)) 555 mds_user_clear_cpu_buffers(); 556 } 557 NOKPROBE_SYMBOL(do_nmi); 558 559 void stop_nmi(void) 560 { 561 ignore_nmis++; 562 } 563 564 void restart_nmi(void) 565 { 566 ignore_nmis--; 567 } 568 569 /* reset the back-to-back NMI logic */ 570 void local_touch_nmi(void) 571 { 572 __this_cpu_write(last_nmi_rip, 0); 573 } 574 EXPORT_SYMBOL_GPL(local_touch_nmi); 575