xref: /openbmc/linux/arch/x86/kernel/traps.c (revision 68198dca)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *
5  *  Pentium III FXSR, SSE support
6  *	Gareth Hughes <gareth@valinux.com>, May 2000
7  */
8 
9 /*
10  * Handle hardware traps and faults.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/kexec.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/timer.h>
34 #include <linux/init.h>
35 #include <linux/bug.h>
36 #include <linux/nmi.h>
37 #include <linux/mm.h>
38 #include <linux/smp.h>
39 #include <linux/io.h>
40 
41 #if defined(CONFIG_EDAC)
42 #include <linux/edac.h>
43 #endif
44 
45 #include <asm/stacktrace.h>
46 #include <asm/processor.h>
47 #include <asm/debugreg.h>
48 #include <linux/atomic.h>
49 #include <asm/text-patching.h>
50 #include <asm/ftrace.h>
51 #include <asm/traps.h>
52 #include <asm/desc.h>
53 #include <asm/fpu/internal.h>
54 #include <asm/mce.h>
55 #include <asm/fixmap.h>
56 #include <asm/mach_traps.h>
57 #include <asm/alternative.h>
58 #include <asm/fpu/xstate.h>
59 #include <asm/trace/mpx.h>
60 #include <asm/mpx.h>
61 #include <asm/vm86.h>
62 #include <asm/umip.h>
63 
64 #ifdef CONFIG_X86_64
65 #include <asm/x86_init.h>
66 #include <asm/pgalloc.h>
67 #include <asm/proto.h>
68 #else
69 #include <asm/processor-flags.h>
70 #include <asm/setup.h>
71 #include <asm/proto.h>
72 #endif
73 
74 DECLARE_BITMAP(system_vectors, NR_VECTORS);
75 
76 static inline void cond_local_irq_enable(struct pt_regs *regs)
77 {
78 	if (regs->flags & X86_EFLAGS_IF)
79 		local_irq_enable();
80 }
81 
82 static inline void cond_local_irq_disable(struct pt_regs *regs)
83 {
84 	if (regs->flags & X86_EFLAGS_IF)
85 		local_irq_disable();
86 }
87 
88 /*
89  * In IST context, we explicitly disable preemption.  This serves two
90  * purposes: it makes it much less likely that we would accidentally
91  * schedule in IST context and it will force a warning if we somehow
92  * manage to schedule by accident.
93  */
94 void ist_enter(struct pt_regs *regs)
95 {
96 	if (user_mode(regs)) {
97 		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
98 	} else {
99 		/*
100 		 * We might have interrupted pretty much anything.  In
101 		 * fact, if we're a machine check, we can even interrupt
102 		 * NMI processing.  We don't want in_nmi() to return true,
103 		 * but we need to notify RCU.
104 		 */
105 		rcu_nmi_enter();
106 	}
107 
108 	preempt_disable();
109 
110 	/* This code is a bit fragile.  Test it. */
111 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
112 }
113 
114 void ist_exit(struct pt_regs *regs)
115 {
116 	preempt_enable_no_resched();
117 
118 	if (!user_mode(regs))
119 		rcu_nmi_exit();
120 }
121 
122 /**
123  * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
124  * @regs:	regs passed to the IST exception handler
125  *
126  * IST exception handlers normally cannot schedule.  As a special
127  * exception, if the exception interrupted userspace code (i.e.
128  * user_mode(regs) would return true) and the exception was not
129  * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
130  * begins a non-atomic section within an ist_enter()/ist_exit() region.
131  * Callers are responsible for enabling interrupts themselves inside
132  * the non-atomic section, and callers must call ist_end_non_atomic()
133  * before ist_exit().
134  */
135 void ist_begin_non_atomic(struct pt_regs *regs)
136 {
137 	BUG_ON(!user_mode(regs));
138 
139 	/*
140 	 * Sanity check: we need to be on the normal thread stack.  This
141 	 * will catch asm bugs and any attempt to use ist_preempt_enable
142 	 * from double_fault.
143 	 */
144 	BUG_ON(!on_thread_stack());
145 
146 	preempt_enable_no_resched();
147 }
148 
149 /**
150  * ist_end_non_atomic() - begin a non-atomic section in an IST exception
151  *
152  * Ends a non-atomic section started with ist_begin_non_atomic().
153  */
154 void ist_end_non_atomic(void)
155 {
156 	preempt_disable();
157 }
158 
159 int is_valid_bugaddr(unsigned long addr)
160 {
161 	unsigned short ud;
162 
163 	if (addr < TASK_SIZE_MAX)
164 		return 0;
165 
166 	if (probe_kernel_address((unsigned short *)addr, ud))
167 		return 0;
168 
169 	return ud == INSN_UD0 || ud == INSN_UD2;
170 }
171 
172 int fixup_bug(struct pt_regs *regs, int trapnr)
173 {
174 	if (trapnr != X86_TRAP_UD)
175 		return 0;
176 
177 	switch (report_bug(regs->ip, regs)) {
178 	case BUG_TRAP_TYPE_NONE:
179 	case BUG_TRAP_TYPE_BUG:
180 		break;
181 
182 	case BUG_TRAP_TYPE_WARN:
183 		regs->ip += LEN_UD0;
184 		return 1;
185 	}
186 
187 	return 0;
188 }
189 
190 static nokprobe_inline int
191 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
192 		  struct pt_regs *regs,	long error_code)
193 {
194 	if (v8086_mode(regs)) {
195 		/*
196 		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
197 		 * On nmi (interrupt 2), do_trap should not be called.
198 		 */
199 		if (trapnr < X86_TRAP_UD) {
200 			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
201 						error_code, trapnr))
202 				return 0;
203 		}
204 		return -1;
205 	}
206 
207 	if (!user_mode(regs)) {
208 		if (fixup_exception(regs, trapnr))
209 			return 0;
210 
211 		tsk->thread.error_code = error_code;
212 		tsk->thread.trap_nr = trapnr;
213 		die(str, regs, error_code);
214 	}
215 
216 	return -1;
217 }
218 
219 static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
220 				siginfo_t *info)
221 {
222 	unsigned long siaddr;
223 	int sicode;
224 
225 	switch (trapnr) {
226 	default:
227 		return SEND_SIG_PRIV;
228 
229 	case X86_TRAP_DE:
230 		sicode = FPE_INTDIV;
231 		siaddr = uprobe_get_trap_addr(regs);
232 		break;
233 	case X86_TRAP_UD:
234 		sicode = ILL_ILLOPN;
235 		siaddr = uprobe_get_trap_addr(regs);
236 		break;
237 	case X86_TRAP_AC:
238 		sicode = BUS_ADRALN;
239 		siaddr = 0;
240 		break;
241 	}
242 
243 	info->si_signo = signr;
244 	info->si_errno = 0;
245 	info->si_code = sicode;
246 	info->si_addr = (void __user *)siaddr;
247 	return info;
248 }
249 
250 static void
251 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
252 	long error_code, siginfo_t *info)
253 {
254 	struct task_struct *tsk = current;
255 
256 
257 	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
258 		return;
259 	/*
260 	 * We want error_code and trap_nr set for userspace faults and
261 	 * kernelspace faults which result in die(), but not
262 	 * kernelspace faults which are fixed up.  die() gives the
263 	 * process no chance to handle the signal and notice the
264 	 * kernel fault information, so that won't result in polluting
265 	 * the information about previously queued, but not yet
266 	 * delivered, faults.  See also do_general_protection below.
267 	 */
268 	tsk->thread.error_code = error_code;
269 	tsk->thread.trap_nr = trapnr;
270 
271 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
272 	    printk_ratelimit()) {
273 		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
274 			tsk->comm, tsk->pid, str,
275 			regs->ip, regs->sp, error_code);
276 		print_vma_addr(KERN_CONT " in ", regs->ip);
277 		pr_cont("\n");
278 	}
279 
280 	force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
281 }
282 NOKPROBE_SYMBOL(do_trap);
283 
284 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
285 			  unsigned long trapnr, int signr)
286 {
287 	siginfo_t info;
288 
289 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
290 
291 	/*
292 	 * WARN*()s end up here; fix them up before we call the
293 	 * notifier chain.
294 	 */
295 	if (!user_mode(regs) && fixup_bug(regs, trapnr))
296 		return;
297 
298 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
299 			NOTIFY_STOP) {
300 		cond_local_irq_enable(regs);
301 		do_trap(trapnr, signr, str, regs, error_code,
302 			fill_trap_info(regs, signr, trapnr, &info));
303 	}
304 }
305 
306 #define DO_ERROR(trapnr, signr, str, name)				\
307 dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
308 {									\
309 	do_error_trap(regs, error_code, str, trapnr, signr);		\
310 }
311 
312 DO_ERROR(X86_TRAP_DE,     SIGFPE,  "divide error",		divide_error)
313 DO_ERROR(X86_TRAP_OF,     SIGSEGV, "overflow",			overflow)
314 DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",		invalid_op)
315 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
316 DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",		invalid_TSS)
317 DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",	segment_not_present)
318 DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",		stack_segment)
319 DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",		alignment_check)
320 
321 #ifdef CONFIG_VMAP_STACK
322 __visible void __noreturn handle_stack_overflow(const char *message,
323 						struct pt_regs *regs,
324 						unsigned long fault_address)
325 {
326 	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
327 		 (void *)fault_address, current->stack,
328 		 (char *)current->stack + THREAD_SIZE - 1);
329 	die(message, regs, 0);
330 
331 	/* Be absolutely certain we don't return. */
332 	panic(message);
333 }
334 #endif
335 
336 #ifdef CONFIG_X86_64
337 /* Runs on IST stack */
338 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
339 {
340 	static const char str[] = "double fault";
341 	struct task_struct *tsk = current;
342 #ifdef CONFIG_VMAP_STACK
343 	unsigned long cr2;
344 #endif
345 
346 #ifdef CONFIG_X86_ESPFIX64
347 	extern unsigned char native_irq_return_iret[];
348 
349 	/*
350 	 * If IRET takes a non-IST fault on the espfix64 stack, then we
351 	 * end up promoting it to a doublefault.  In that case, modify
352 	 * the stack to make it look like we just entered the #GP
353 	 * handler from user space, similar to bad_iret.
354 	 *
355 	 * No need for ist_enter here because we don't use RCU.
356 	 */
357 	if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
358 		regs->cs == __KERNEL_CS &&
359 		regs->ip == (unsigned long)native_irq_return_iret)
360 	{
361 		struct pt_regs *normal_regs = task_pt_regs(current);
362 
363 		/* Fake a #GP(0) from userspace. */
364 		memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
365 		normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
366 		regs->ip = (unsigned long)general_protection;
367 		regs->sp = (unsigned long)&normal_regs->orig_ax;
368 
369 		return;
370 	}
371 #endif
372 
373 	ist_enter(regs);
374 	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
375 
376 	tsk->thread.error_code = error_code;
377 	tsk->thread.trap_nr = X86_TRAP_DF;
378 
379 #ifdef CONFIG_VMAP_STACK
380 	/*
381 	 * If we overflow the stack into a guard page, the CPU will fail
382 	 * to deliver #PF and will send #DF instead.  Similarly, if we
383 	 * take any non-IST exception while too close to the bottom of
384 	 * the stack, the processor will get a page fault while
385 	 * delivering the exception and will generate a double fault.
386 	 *
387 	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
388 	 * Page-Fault Exception (#PF):
389 	 *
390 	 *   Processors update CR2 whenever a page fault is detected. If a
391 	 *   second page fault occurs while an earlier page fault is being
392 	 *   deliv- ered, the faulting linear address of the second fault will
393 	 *   overwrite the contents of CR2 (replacing the previous
394 	 *   address). These updates to CR2 occur even if the page fault
395 	 *   results in a double fault or occurs during the delivery of a
396 	 *   double fault.
397 	 *
398 	 * The logic below has a small possibility of incorrectly diagnosing
399 	 * some errors as stack overflows.  For example, if the IDT or GDT
400 	 * gets corrupted such that #GP delivery fails due to a bad descriptor
401 	 * causing #GP and we hit this condition while CR2 coincidentally
402 	 * points to the stack guard page, we'll think we overflowed the
403 	 * stack.  Given that we're going to panic one way or another
404 	 * if this happens, this isn't necessarily worth fixing.
405 	 *
406 	 * If necessary, we could improve the test by only diagnosing
407 	 * a stack overflow if the saved RSP points within 47 bytes of
408 	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
409 	 * take an exception, the stack is already aligned and there
410 	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
411 	 * possible error code, so a stack overflow would *not* double
412 	 * fault.  With any less space left, exception delivery could
413 	 * fail, and, as a practical matter, we've overflowed the
414 	 * stack even if the actual trigger for the double fault was
415 	 * something else.
416 	 */
417 	cr2 = read_cr2();
418 	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
419 		handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
420 #endif
421 
422 #ifdef CONFIG_DOUBLEFAULT
423 	df_debug(regs, error_code);
424 #endif
425 	/*
426 	 * This is always a kernel trap and never fixable (and thus must
427 	 * never return).
428 	 */
429 	for (;;)
430 		die(str, regs, error_code);
431 }
432 #endif
433 
434 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
435 {
436 	const struct mpx_bndcsr *bndcsr;
437 	siginfo_t *info;
438 
439 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
440 	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
441 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
442 		return;
443 	cond_local_irq_enable(regs);
444 
445 	if (!user_mode(regs))
446 		die("bounds", regs, error_code);
447 
448 	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
449 		/* The exception is not from Intel MPX */
450 		goto exit_trap;
451 	}
452 
453 	/*
454 	 * We need to look at BNDSTATUS to resolve this exception.
455 	 * A NULL here might mean that it is in its 'init state',
456 	 * which is all zeros which indicates MPX was not
457 	 * responsible for the exception.
458 	 */
459 	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
460 	if (!bndcsr)
461 		goto exit_trap;
462 
463 	trace_bounds_exception_mpx(bndcsr);
464 	/*
465 	 * The error code field of the BNDSTATUS register communicates status
466 	 * information of a bound range exception #BR or operation involving
467 	 * bound directory.
468 	 */
469 	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
470 	case 2:	/* Bound directory has invalid entry. */
471 		if (mpx_handle_bd_fault())
472 			goto exit_trap;
473 		break; /* Success, it was handled */
474 	case 1: /* Bound violation. */
475 		info = mpx_generate_siginfo(regs);
476 		if (IS_ERR(info)) {
477 			/*
478 			 * We failed to decode the MPX instruction.  Act as if
479 			 * the exception was not caused by MPX.
480 			 */
481 			goto exit_trap;
482 		}
483 		/*
484 		 * Success, we decoded the instruction and retrieved
485 		 * an 'info' containing the address being accessed
486 		 * which caused the exception.  This information
487 		 * allows and application to possibly handle the
488 		 * #BR exception itself.
489 		 */
490 		do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
491 		kfree(info);
492 		break;
493 	case 0: /* No exception caused by Intel MPX operations. */
494 		goto exit_trap;
495 	default:
496 		die("bounds", regs, error_code);
497 	}
498 
499 	return;
500 
501 exit_trap:
502 	/*
503 	 * This path out is for all the cases where we could not
504 	 * handle the exception in some way (like allocating a
505 	 * table or telling userspace about it.  We will also end
506 	 * up here if the kernel has MPX turned off at compile
507 	 * time..
508 	 */
509 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
510 }
511 
512 dotraplinkage void
513 do_general_protection(struct pt_regs *regs, long error_code)
514 {
515 	struct task_struct *tsk;
516 
517 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
518 	cond_local_irq_enable(regs);
519 
520 	if (static_cpu_has(X86_FEATURE_UMIP)) {
521 		if (user_mode(regs) && fixup_umip_exception(regs))
522 			return;
523 	}
524 
525 	if (v8086_mode(regs)) {
526 		local_irq_enable();
527 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
528 		return;
529 	}
530 
531 	tsk = current;
532 	if (!user_mode(regs)) {
533 		if (fixup_exception(regs, X86_TRAP_GP))
534 			return;
535 
536 		tsk->thread.error_code = error_code;
537 		tsk->thread.trap_nr = X86_TRAP_GP;
538 		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
539 			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
540 			die("general protection fault", regs, error_code);
541 		return;
542 	}
543 
544 	tsk->thread.error_code = error_code;
545 	tsk->thread.trap_nr = X86_TRAP_GP;
546 
547 	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
548 			printk_ratelimit()) {
549 		pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
550 			tsk->comm, task_pid_nr(tsk),
551 			regs->ip, regs->sp, error_code);
552 		print_vma_addr(KERN_CONT " in ", regs->ip);
553 		pr_cont("\n");
554 	}
555 
556 	force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
557 }
558 NOKPROBE_SYMBOL(do_general_protection);
559 
560 /* May run on IST stack. */
561 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
562 {
563 #ifdef CONFIG_DYNAMIC_FTRACE
564 	/*
565 	 * ftrace must be first, everything else may cause a recursive crash.
566 	 * See note by declaration of modifying_ftrace_code in ftrace.c
567 	 */
568 	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
569 	    ftrace_int3_handler(regs))
570 		return;
571 #endif
572 	if (poke_int3_handler(regs))
573 		return;
574 
575 	ist_enter(regs);
576 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
577 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
578 	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
579 				SIGTRAP) == NOTIFY_STOP)
580 		goto exit;
581 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
582 
583 #ifdef CONFIG_KPROBES
584 	if (kprobe_int3_handler(regs))
585 		goto exit;
586 #endif
587 
588 	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
589 			SIGTRAP) == NOTIFY_STOP)
590 		goto exit;
591 
592 	/*
593 	 * Let others (NMI) know that the debug stack is in use
594 	 * as we may switch to the interrupt stack.
595 	 */
596 	debug_stack_usage_inc();
597 	cond_local_irq_enable(regs);
598 	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
599 	cond_local_irq_disable(regs);
600 	debug_stack_usage_dec();
601 exit:
602 	ist_exit(regs);
603 }
604 NOKPROBE_SYMBOL(do_int3);
605 
606 #ifdef CONFIG_X86_64
607 /*
608  * Help handler running on IST stack to switch off the IST stack if the
609  * interrupted code was in user mode. The actual stack switch is done in
610  * entry_64.S
611  */
612 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
613 {
614 	struct pt_regs *regs = task_pt_regs(current);
615 	*regs = *eregs;
616 	return regs;
617 }
618 NOKPROBE_SYMBOL(sync_regs);
619 
620 struct bad_iret_stack {
621 	void *error_entry_ret;
622 	struct pt_regs regs;
623 };
624 
625 asmlinkage __visible notrace
626 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
627 {
628 	/*
629 	 * This is called from entry_64.S early in handling a fault
630 	 * caused by a bad iret to user mode.  To handle the fault
631 	 * correctly, we want move our stack frame to task_pt_regs
632 	 * and we want to pretend that the exception came from the
633 	 * iret target.
634 	 */
635 	struct bad_iret_stack *new_stack =
636 		container_of(task_pt_regs(current),
637 			     struct bad_iret_stack, regs);
638 
639 	/* Copy the IRET target to the new stack. */
640 	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
641 
642 	/* Copy the remainder of the stack from the current stack. */
643 	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
644 
645 	BUG_ON(!user_mode(&new_stack->regs));
646 	return new_stack;
647 }
648 NOKPROBE_SYMBOL(fixup_bad_iret);
649 #endif
650 
651 static bool is_sysenter_singlestep(struct pt_regs *regs)
652 {
653 	/*
654 	 * We don't try for precision here.  If we're anywhere in the region of
655 	 * code that can be single-stepped in the SYSENTER entry path, then
656 	 * assume that this is a useless single-step trap due to SYSENTER
657 	 * being invoked with TF set.  (We don't know in advance exactly
658 	 * which instructions will be hit because BTF could plausibly
659 	 * be set.)
660 	 */
661 #ifdef CONFIG_X86_32
662 	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
663 		(unsigned long)__end_SYSENTER_singlestep_region -
664 		(unsigned long)__begin_SYSENTER_singlestep_region;
665 #elif defined(CONFIG_IA32_EMULATION)
666 	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
667 		(unsigned long)__end_entry_SYSENTER_compat -
668 		(unsigned long)entry_SYSENTER_compat;
669 #else
670 	return false;
671 #endif
672 }
673 
674 /*
675  * Our handling of the processor debug registers is non-trivial.
676  * We do not clear them on entry and exit from the kernel. Therefore
677  * it is possible to get a watchpoint trap here from inside the kernel.
678  * However, the code in ./ptrace.c has ensured that the user can
679  * only set watchpoints on userspace addresses. Therefore the in-kernel
680  * watchpoint trap can only occur in code which is reading/writing
681  * from user space. Such code must not hold kernel locks (since it
682  * can equally take a page fault), therefore it is safe to call
683  * force_sig_info even though that claims and releases locks.
684  *
685  * Code in ./signal.c ensures that the debug control register
686  * is restored before we deliver any signal, and therefore that
687  * user code runs with the correct debug control register even though
688  * we clear it here.
689  *
690  * Being careful here means that we don't have to be as careful in a
691  * lot of more complicated places (task switching can be a bit lazy
692  * about restoring all the debug state, and ptrace doesn't have to
693  * find every occurrence of the TF bit that could be saved away even
694  * by user code)
695  *
696  * May run on IST stack.
697  */
698 dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
699 {
700 	struct task_struct *tsk = current;
701 	int user_icebp = 0;
702 	unsigned long dr6;
703 	int si_code;
704 
705 	ist_enter(regs);
706 
707 	get_debugreg(dr6, 6);
708 	/*
709 	 * The Intel SDM says:
710 	 *
711 	 *   Certain debug exceptions may clear bits 0-3. The remaining
712 	 *   contents of the DR6 register are never cleared by the
713 	 *   processor. To avoid confusion in identifying debug
714 	 *   exceptions, debug handlers should clear the register before
715 	 *   returning to the interrupted task.
716 	 *
717 	 * Keep it simple: clear DR6 immediately.
718 	 */
719 	set_debugreg(0, 6);
720 
721 	/* Filter out all the reserved bits which are preset to 1 */
722 	dr6 &= ~DR6_RESERVED;
723 
724 	/*
725 	 * The SDM says "The processor clears the BTF flag when it
726 	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
727 	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
728 	 */
729 	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
730 
731 	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
732 		     is_sysenter_singlestep(regs))) {
733 		dr6 &= ~DR_STEP;
734 		if (!dr6)
735 			goto exit;
736 		/*
737 		 * else we might have gotten a single-step trap and hit a
738 		 * watchpoint at the same time, in which case we should fall
739 		 * through and handle the watchpoint.
740 		 */
741 	}
742 
743 	/*
744 	 * If dr6 has no reason to give us about the origin of this trap,
745 	 * then it's very likely the result of an icebp/int01 trap.
746 	 * User wants a sigtrap for that.
747 	 */
748 	if (!dr6 && user_mode(regs))
749 		user_icebp = 1;
750 
751 	/* Store the virtualized DR6 value */
752 	tsk->thread.debugreg6 = dr6;
753 
754 #ifdef CONFIG_KPROBES
755 	if (kprobe_debug_handler(regs))
756 		goto exit;
757 #endif
758 
759 	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
760 							SIGTRAP) == NOTIFY_STOP)
761 		goto exit;
762 
763 	/*
764 	 * Let others (NMI) know that the debug stack is in use
765 	 * as we may switch to the interrupt stack.
766 	 */
767 	debug_stack_usage_inc();
768 
769 	/* It's safe to allow irq's after DR6 has been saved */
770 	cond_local_irq_enable(regs);
771 
772 	if (v8086_mode(regs)) {
773 		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
774 					X86_TRAP_DB);
775 		cond_local_irq_disable(regs);
776 		debug_stack_usage_dec();
777 		goto exit;
778 	}
779 
780 	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
781 		/*
782 		 * Historical junk that used to handle SYSENTER single-stepping.
783 		 * This should be unreachable now.  If we survive for a while
784 		 * without anyone hitting this warning, we'll turn this into
785 		 * an oops.
786 		 */
787 		tsk->thread.debugreg6 &= ~DR_STEP;
788 		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
789 		regs->flags &= ~X86_EFLAGS_TF;
790 	}
791 	si_code = get_si_code(tsk->thread.debugreg6);
792 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
793 		send_sigtrap(tsk, regs, error_code, si_code);
794 	cond_local_irq_disable(regs);
795 	debug_stack_usage_dec();
796 
797 exit:
798 #if defined(CONFIG_X86_32)
799 	/*
800 	 * This is the most likely code path that involves non-trivial use
801 	 * of the SYSENTER stack.  Check that we haven't overrun it.
802 	 */
803 	WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
804 	     "Overran or corrupted SYSENTER stack\n");
805 #endif
806 	ist_exit(regs);
807 }
808 NOKPROBE_SYMBOL(do_debug);
809 
810 /*
811  * Note that we play around with the 'TS' bit in an attempt to get
812  * the correct behaviour even in the presence of the asynchronous
813  * IRQ13 behaviour
814  */
815 static void math_error(struct pt_regs *regs, int error_code, int trapnr)
816 {
817 	struct task_struct *task = current;
818 	struct fpu *fpu = &task->thread.fpu;
819 	siginfo_t info;
820 	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
821 						"simd exception";
822 
823 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
824 		return;
825 	cond_local_irq_enable(regs);
826 
827 	if (!user_mode(regs)) {
828 		if (!fixup_exception(regs, trapnr)) {
829 			task->thread.error_code = error_code;
830 			task->thread.trap_nr = trapnr;
831 			die(str, regs, error_code);
832 		}
833 		return;
834 	}
835 
836 	/*
837 	 * Save the info for the exception handler and clear the error.
838 	 */
839 	fpu__save(fpu);
840 
841 	task->thread.trap_nr	= trapnr;
842 	task->thread.error_code = error_code;
843 	info.si_signo		= SIGFPE;
844 	info.si_errno		= 0;
845 	info.si_addr		= (void __user *)uprobe_get_trap_addr(regs);
846 
847 	info.si_code = fpu__exception_code(fpu, trapnr);
848 
849 	/* Retry when we get spurious exceptions: */
850 	if (!info.si_code)
851 		return;
852 
853 	force_sig_info(SIGFPE, &info, task);
854 }
855 
856 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
857 {
858 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
859 	math_error(regs, error_code, X86_TRAP_MF);
860 }
861 
862 dotraplinkage void
863 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
864 {
865 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
866 	math_error(regs, error_code, X86_TRAP_XF);
867 }
868 
869 dotraplinkage void
870 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
871 {
872 	cond_local_irq_enable(regs);
873 }
874 
875 dotraplinkage void
876 do_device_not_available(struct pt_regs *regs, long error_code)
877 {
878 	unsigned long cr0;
879 
880 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
881 
882 #ifdef CONFIG_MATH_EMULATION
883 	if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
884 		struct math_emu_info info = { };
885 
886 		cond_local_irq_enable(regs);
887 
888 		info.regs = regs;
889 		math_emulate(&info);
890 		return;
891 	}
892 #endif
893 
894 	/* This should not happen. */
895 	cr0 = read_cr0();
896 	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
897 		/* Try to fix it up and carry on. */
898 		write_cr0(cr0 & ~X86_CR0_TS);
899 	} else {
900 		/*
901 		 * Something terrible happened, and we're better off trying
902 		 * to kill the task than getting stuck in a never-ending
903 		 * loop of #NM faults.
904 		 */
905 		die("unexpected #NM exception", regs, error_code);
906 	}
907 }
908 NOKPROBE_SYMBOL(do_device_not_available);
909 
910 #ifdef CONFIG_X86_32
911 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
912 {
913 	siginfo_t info;
914 
915 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
916 	local_irq_enable();
917 
918 	info.si_signo = SIGILL;
919 	info.si_errno = 0;
920 	info.si_code = ILL_BADSTK;
921 	info.si_addr = NULL;
922 	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
923 			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
924 		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
925 			&info);
926 	}
927 }
928 #endif
929 
930 void __init trap_init(void)
931 {
932 	idt_setup_traps();
933 
934 	/*
935 	 * Set the IDT descriptor to a fixed read-only location, so that the
936 	 * "sidt" instruction will not leak the location of the kernel, and
937 	 * to defend the IDT against arbitrary memory write vulnerabilities.
938 	 * It will be reloaded in cpu_init() */
939 	__set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
940 	idt_descr.address = fix_to_virt(FIX_RO_IDT);
941 
942 	/*
943 	 * Should be a barrier for any external CPU state:
944 	 */
945 	cpu_init();
946 
947 	idt_setup_ist_traps();
948 
949 	x86_init.irqs.trap_init();
950 
951 	idt_setup_debugidt_traps();
952 }
953