xref: /openbmc/linux/arch/x86/kernel/nmi.c (revision b34e08d5)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *  Copyright (C) 2011	Don Zickus Red Hat, Inc.
5  *
6  *  Pentium III FXSR, SSE support
7  *	Gareth Hughes <gareth@valinux.com>, May 2000
8  */
9 
10 /*
11  * Handle hardware traps and faults.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/kprobes.h>
15 #include <linux/kdebug.h>
16 #include <linux/nmi.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/hardirq.h>
20 #include <linux/slab.h>
21 #include <linux/export.h>
22 
23 #if defined(CONFIG_EDAC)
24 #include <linux/edac.h>
25 #endif
26 
27 #include <linux/atomic.h>
28 #include <asm/traps.h>
29 #include <asm/mach_traps.h>
30 #include <asm/nmi.h>
31 #include <asm/x86_init.h>
32 
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/nmi.h>
35 
36 struct nmi_desc {
37 	spinlock_t lock;
38 	struct list_head head;
39 };
40 
41 static struct nmi_desc nmi_desc[NMI_MAX] =
42 {
43 	{
44 		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
45 		.head = LIST_HEAD_INIT(nmi_desc[0].head),
46 	},
47 	{
48 		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
49 		.head = LIST_HEAD_INIT(nmi_desc[1].head),
50 	},
51 	{
52 		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
53 		.head = LIST_HEAD_INIT(nmi_desc[2].head),
54 	},
55 	{
56 		.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
57 		.head = LIST_HEAD_INIT(nmi_desc[3].head),
58 	},
59 
60 };
61 
62 struct nmi_stats {
63 	unsigned int normal;
64 	unsigned int unknown;
65 	unsigned int external;
66 	unsigned int swallow;
67 };
68 
69 static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
70 
71 static int ignore_nmis;
72 
73 int unknown_nmi_panic;
74 /*
75  * Prevent NMI reason port (0x61) being accessed simultaneously, can
76  * only be used in NMI handler.
77  */
78 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
79 
80 static int __init setup_unknown_nmi_panic(char *str)
81 {
82 	unknown_nmi_panic = 1;
83 	return 1;
84 }
85 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
86 
87 #define nmi_to_desc(type) (&nmi_desc[type])
88 
89 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
90 
91 static int __init nmi_warning_debugfs(void)
92 {
93 	debugfs_create_u64("nmi_longest_ns", 0644,
94 			arch_debugfs_dir, &nmi_longest_ns);
95 	return 0;
96 }
97 fs_initcall(nmi_warning_debugfs);
98 
99 static void nmi_max_handler(struct irq_work *w)
100 {
101 	struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
102 	int remainder_ns, decimal_msecs;
103 	u64 whole_msecs = ACCESS_ONCE(a->max_duration);
104 
105 	remainder_ns = do_div(whole_msecs, (1000 * 1000));
106 	decimal_msecs = remainder_ns / 1000;
107 
108 	printk_ratelimited(KERN_INFO
109 		"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
110 		a->handler, whole_msecs, decimal_msecs);
111 }
112 
113 static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
114 {
115 	struct nmi_desc *desc = nmi_to_desc(type);
116 	struct nmiaction *a;
117 	int handled=0;
118 
119 	rcu_read_lock();
120 
121 	/*
122 	 * NMIs are edge-triggered, which means if you have enough
123 	 * of them concurrently, you can lose some because only one
124 	 * can be latched at any given time.  Walk the whole list
125 	 * to handle those situations.
126 	 */
127 	list_for_each_entry_rcu(a, &desc->head, list) {
128 		int thishandled;
129 		u64 delta;
130 
131 		delta = sched_clock();
132 		thishandled = a->handler(type, regs);
133 		handled += thishandled;
134 		delta = sched_clock() - delta;
135 		trace_nmi_handler(a->handler, (int)delta, thishandled);
136 
137 		if (delta < nmi_longest_ns || delta < a->max_duration)
138 			continue;
139 
140 		a->max_duration = delta;
141 		irq_work_queue(&a->irq_work);
142 	}
143 
144 	rcu_read_unlock();
145 
146 	/* return total number of NMI events handled */
147 	return handled;
148 }
149 
150 int __register_nmi_handler(unsigned int type, struct nmiaction *action)
151 {
152 	struct nmi_desc *desc = nmi_to_desc(type);
153 	unsigned long flags;
154 
155 	if (!action->handler)
156 		return -EINVAL;
157 
158 	init_irq_work(&action->irq_work, nmi_max_handler);
159 
160 	spin_lock_irqsave(&desc->lock, flags);
161 
162 	/*
163 	 * most handlers of type NMI_UNKNOWN never return because
164 	 * they just assume the NMI is theirs.  Just a sanity check
165 	 * to manage expectations
166 	 */
167 	WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
168 	WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
169 	WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
170 
171 	/*
172 	 * some handlers need to be executed first otherwise a fake
173 	 * event confuses some handlers (kdump uses this flag)
174 	 */
175 	if (action->flags & NMI_FLAG_FIRST)
176 		list_add_rcu(&action->list, &desc->head);
177 	else
178 		list_add_tail_rcu(&action->list, &desc->head);
179 
180 	spin_unlock_irqrestore(&desc->lock, flags);
181 	return 0;
182 }
183 EXPORT_SYMBOL(__register_nmi_handler);
184 
185 void unregister_nmi_handler(unsigned int type, const char *name)
186 {
187 	struct nmi_desc *desc = nmi_to_desc(type);
188 	struct nmiaction *n;
189 	unsigned long flags;
190 
191 	spin_lock_irqsave(&desc->lock, flags);
192 
193 	list_for_each_entry_rcu(n, &desc->head, list) {
194 		/*
195 		 * the name passed in to describe the nmi handler
196 		 * is used as the lookup key
197 		 */
198 		if (!strcmp(n->name, name)) {
199 			WARN(in_nmi(),
200 				"Trying to free NMI (%s) from NMI context!\n", n->name);
201 			list_del_rcu(&n->list);
202 			break;
203 		}
204 	}
205 
206 	spin_unlock_irqrestore(&desc->lock, flags);
207 	synchronize_rcu();
208 }
209 EXPORT_SYMBOL_GPL(unregister_nmi_handler);
210 
211 static __kprobes void
212 pci_serr_error(unsigned char reason, struct pt_regs *regs)
213 {
214 	/* check to see if anyone registered against these types of errors */
215 	if (nmi_handle(NMI_SERR, regs, false))
216 		return;
217 
218 	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
219 		 reason, smp_processor_id());
220 
221 	/*
222 	 * On some machines, PCI SERR line is used to report memory
223 	 * errors. EDAC makes use of it.
224 	 */
225 #if defined(CONFIG_EDAC)
226 	if (edac_handler_set()) {
227 		edac_atomic_assert_error();
228 		return;
229 	}
230 #endif
231 
232 	if (panic_on_unrecovered_nmi)
233 		panic("NMI: Not continuing");
234 
235 	pr_emerg("Dazed and confused, but trying to continue\n");
236 
237 	/* Clear and disable the PCI SERR error line. */
238 	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
239 	outb(reason, NMI_REASON_PORT);
240 }
241 
242 static __kprobes void
243 io_check_error(unsigned char reason, struct pt_regs *regs)
244 {
245 	unsigned long i;
246 
247 	/* check to see if anyone registered against these types of errors */
248 	if (nmi_handle(NMI_IO_CHECK, regs, false))
249 		return;
250 
251 	pr_emerg(
252 	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
253 		 reason, smp_processor_id());
254 	show_regs(regs);
255 
256 	if (panic_on_io_nmi)
257 		panic("NMI IOCK error: Not continuing");
258 
259 	/* Re-enable the IOCK line, wait for a few seconds */
260 	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
261 	outb(reason, NMI_REASON_PORT);
262 
263 	i = 20000;
264 	while (--i) {
265 		touch_nmi_watchdog();
266 		udelay(100);
267 	}
268 
269 	reason &= ~NMI_REASON_CLEAR_IOCHK;
270 	outb(reason, NMI_REASON_PORT);
271 }
272 
273 static __kprobes void
274 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
275 {
276 	int handled;
277 
278 	/*
279 	 * Use 'false' as back-to-back NMIs are dealt with one level up.
280 	 * Of course this makes having multiple 'unknown' handlers useless
281 	 * as only the first one is ever run (unless it can actually determine
282 	 * if it caused the NMI)
283 	 */
284 	handled = nmi_handle(NMI_UNKNOWN, regs, false);
285 	if (handled) {
286 		__this_cpu_add(nmi_stats.unknown, handled);
287 		return;
288 	}
289 
290 	__this_cpu_add(nmi_stats.unknown, 1);
291 
292 	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
293 		 reason, smp_processor_id());
294 
295 	pr_emerg("Do you have a strange power saving mode enabled?\n");
296 	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
297 		panic("NMI: Not continuing");
298 
299 	pr_emerg("Dazed and confused, but trying to continue\n");
300 }
301 
302 static DEFINE_PER_CPU(bool, swallow_nmi);
303 static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
304 
305 static __kprobes void default_do_nmi(struct pt_regs *regs)
306 {
307 	unsigned char reason = 0;
308 	int handled;
309 	bool b2b = false;
310 
311 	/*
312 	 * CPU-specific NMI must be processed before non-CPU-specific
313 	 * NMI, otherwise we may lose it, because the CPU-specific
314 	 * NMI can not be detected/processed on other CPUs.
315 	 */
316 
317 	/*
318 	 * Back-to-back NMIs are interesting because they can either
319 	 * be two NMI or more than two NMIs (any thing over two is dropped
320 	 * due to NMI being edge-triggered).  If this is the second half
321 	 * of the back-to-back NMI, assume we dropped things and process
322 	 * more handlers.  Otherwise reset the 'swallow' NMI behaviour
323 	 */
324 	if (regs->ip == __this_cpu_read(last_nmi_rip))
325 		b2b = true;
326 	else
327 		__this_cpu_write(swallow_nmi, false);
328 
329 	__this_cpu_write(last_nmi_rip, regs->ip);
330 
331 	handled = nmi_handle(NMI_LOCAL, regs, b2b);
332 	__this_cpu_add(nmi_stats.normal, handled);
333 	if (handled) {
334 		/*
335 		 * There are cases when a NMI handler handles multiple
336 		 * events in the current NMI.  One of these events may
337 		 * be queued for in the next NMI.  Because the event is
338 		 * already handled, the next NMI will result in an unknown
339 		 * NMI.  Instead lets flag this for a potential NMI to
340 		 * swallow.
341 		 */
342 		if (handled > 1)
343 			__this_cpu_write(swallow_nmi, true);
344 		return;
345 	}
346 
347 	/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
348 	raw_spin_lock(&nmi_reason_lock);
349 	reason = x86_platform.get_nmi_reason();
350 
351 	if (reason & NMI_REASON_MASK) {
352 		if (reason & NMI_REASON_SERR)
353 			pci_serr_error(reason, regs);
354 		else if (reason & NMI_REASON_IOCHK)
355 			io_check_error(reason, regs);
356 #ifdef CONFIG_X86_32
357 		/*
358 		 * Reassert NMI in case it became active
359 		 * meanwhile as it's edge-triggered:
360 		 */
361 		reassert_nmi();
362 #endif
363 		__this_cpu_add(nmi_stats.external, 1);
364 		raw_spin_unlock(&nmi_reason_lock);
365 		return;
366 	}
367 	raw_spin_unlock(&nmi_reason_lock);
368 
369 	/*
370 	 * Only one NMI can be latched at a time.  To handle
371 	 * this we may process multiple nmi handlers at once to
372 	 * cover the case where an NMI is dropped.  The downside
373 	 * to this approach is we may process an NMI prematurely,
374 	 * while its real NMI is sitting latched.  This will cause
375 	 * an unknown NMI on the next run of the NMI processing.
376 	 *
377 	 * We tried to flag that condition above, by setting the
378 	 * swallow_nmi flag when we process more than one event.
379 	 * This condition is also only present on the second half
380 	 * of a back-to-back NMI, so we flag that condition too.
381 	 *
382 	 * If both are true, we assume we already processed this
383 	 * NMI previously and we swallow it.  Otherwise we reset
384 	 * the logic.
385 	 *
386 	 * There are scenarios where we may accidentally swallow
387 	 * a 'real' unknown NMI.  For example, while processing
388 	 * a perf NMI another perf NMI comes in along with a
389 	 * 'real' unknown NMI.  These two NMIs get combined into
390 	 * one (as descibed above).  When the next NMI gets
391 	 * processed, it will be flagged by perf as handled, but
392 	 * noone will know that there was a 'real' unknown NMI sent
393 	 * also.  As a result it gets swallowed.  Or if the first
394 	 * perf NMI returns two events handled then the second
395 	 * NMI will get eaten by the logic below, again losing a
396 	 * 'real' unknown NMI.  But this is the best we can do
397 	 * for now.
398 	 */
399 	if (b2b && __this_cpu_read(swallow_nmi))
400 		__this_cpu_add(nmi_stats.swallow, 1);
401 	else
402 		unknown_nmi_error(reason, regs);
403 }
404 
405 /*
406  * NMIs can hit breakpoints which will cause it to lose its
407  * NMI context with the CPU when the breakpoint does an iret.
408  */
409 #ifdef CONFIG_X86_32
410 /*
411  * For i386, NMIs use the same stack as the kernel, and we can
412  * add a workaround to the iret problem in C (preventing nested
413  * NMIs if an NMI takes a trap). Simply have 3 states the NMI
414  * can be in:
415  *
416  *  1) not running
417  *  2) executing
418  *  3) latched
419  *
420  * When no NMI is in progress, it is in the "not running" state.
421  * When an NMI comes in, it goes into the "executing" state.
422  * Normally, if another NMI is triggered, it does not interrupt
423  * the running NMI and the HW will simply latch it so that when
424  * the first NMI finishes, it will restart the second NMI.
425  * (Note, the latch is binary, thus multiple NMIs triggering,
426  *  when one is running, are ignored. Only one NMI is restarted.)
427  *
428  * If an NMI hits a breakpoint that executes an iret, another
429  * NMI can preempt it. We do not want to allow this new NMI
430  * to run, but we want to execute it when the first one finishes.
431  * We set the state to "latched", and the exit of the first NMI will
432  * perform a dec_return, if the result is zero (NOT_RUNNING), then
433  * it will simply exit the NMI handler. If not, the dec_return
434  * would have set the state to NMI_EXECUTING (what we want it to
435  * be when we are running). In this case, we simply jump back
436  * to rerun the NMI handler again, and restart the 'latched' NMI.
437  *
438  * No trap (breakpoint or page fault) should be hit before nmi_restart,
439  * thus there is no race between the first check of state for NOT_RUNNING
440  * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
441  * at this point.
442  *
443  * In case the NMI takes a page fault, we need to save off the CR2
444  * because the NMI could have preempted another page fault and corrupt
445  * the CR2 that is about to be read. As nested NMIs must be restarted
446  * and they can not take breakpoints or page faults, the update of the
447  * CR2 must be done before converting the nmi state back to NOT_RUNNING.
448  * Otherwise, there would be a race of another nested NMI coming in
449  * after setting state to NOT_RUNNING but before updating the nmi_cr2.
450  */
451 enum nmi_states {
452 	NMI_NOT_RUNNING = 0,
453 	NMI_EXECUTING,
454 	NMI_LATCHED,
455 };
456 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
457 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
458 
459 #define nmi_nesting_preprocess(regs)					\
460 	do {								\
461 		if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {	\
462 			this_cpu_write(nmi_state, NMI_LATCHED);		\
463 			return;						\
464 		}							\
465 		this_cpu_write(nmi_state, NMI_EXECUTING);		\
466 		this_cpu_write(nmi_cr2, read_cr2());			\
467 	} while (0);							\
468 	nmi_restart:
469 
470 #define nmi_nesting_postprocess()					\
471 	do {								\
472 		if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))	\
473 			write_cr2(this_cpu_read(nmi_cr2));		\
474 		if (this_cpu_dec_return(nmi_state))			\
475 			goto nmi_restart;				\
476 	} while (0)
477 #else /* x86_64 */
478 /*
479  * In x86_64 things are a bit more difficult. This has the same problem
480  * where an NMI hitting a breakpoint that calls iret will remove the
481  * NMI context, allowing a nested NMI to enter. What makes this more
482  * difficult is that both NMIs and breakpoints have their own stack.
483  * When a new NMI or breakpoint is executed, the stack is set to a fixed
484  * point. If an NMI is nested, it will have its stack set at that same
485  * fixed address that the first NMI had, and will start corrupting the
486  * stack. This is handled in entry_64.S, but the same problem exists with
487  * the breakpoint stack.
488  *
489  * If a breakpoint is being processed, and the debug stack is being used,
490  * if an NMI comes in and also hits a breakpoint, the stack pointer
491  * will be set to the same fixed address as the breakpoint that was
492  * interrupted, causing that stack to be corrupted. To handle this case,
493  * check if the stack that was interrupted is the debug stack, and if
494  * so, change the IDT so that new breakpoints will use the current stack
495  * and not switch to the fixed address. On return of the NMI, switch back
496  * to the original IDT.
497  */
498 static DEFINE_PER_CPU(int, update_debug_stack);
499 
500 static inline void nmi_nesting_preprocess(struct pt_regs *regs)
501 {
502 	/*
503 	 * If we interrupted a breakpoint, it is possible that
504 	 * the nmi handler will have breakpoints too. We need to
505 	 * change the IDT such that breakpoints that happen here
506 	 * continue to use the NMI stack.
507 	 */
508 	if (unlikely(is_debug_stack(regs->sp))) {
509 		debug_stack_set_zero();
510 		this_cpu_write(update_debug_stack, 1);
511 	}
512 }
513 
514 static inline void nmi_nesting_postprocess(void)
515 {
516 	if (unlikely(this_cpu_read(update_debug_stack))) {
517 		debug_stack_reset();
518 		this_cpu_write(update_debug_stack, 0);
519 	}
520 }
521 #endif
522 
523 dotraplinkage notrace __kprobes void
524 do_nmi(struct pt_regs *regs, long error_code)
525 {
526 	nmi_nesting_preprocess(regs);
527 
528 	nmi_enter();
529 
530 	inc_irq_stat(__nmi_count);
531 
532 	if (!ignore_nmis)
533 		default_do_nmi(regs);
534 
535 	nmi_exit();
536 
537 	/* On i386, may loop back to preprocess */
538 	nmi_nesting_postprocess();
539 }
540 
541 void stop_nmi(void)
542 {
543 	ignore_nmis++;
544 }
545 
546 void restart_nmi(void)
547 {
548 	ignore_nmis--;
549 }
550 
551 /* reset the back-to-back NMI logic */
552 void local_touch_nmi(void)
553 {
554 	__this_cpu_write(last_nmi_rip, 0);
555 }
556 EXPORT_SYMBOL_GPL(local_touch_nmi);
557