xref: /openbmc/linux/arch/x86/kernel/traps.c (revision 2359ccdd)
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *
5  *  Pentium III FXSR, SSE support
6  *	Gareth Hughes <gareth@valinux.com>, May 2000
7  */
8 
9 /*
10  * Handle hardware traps and faults.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/kexec.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/timer.h>
34 #include <linux/init.h>
35 #include <linux/bug.h>
36 #include <linux/nmi.h>
37 #include <linux/mm.h>
38 #include <linux/smp.h>
39 #include <linux/io.h>
40 
41 #if defined(CONFIG_EDAC)
42 #include <linux/edac.h>
43 #endif
44 
45 #include <asm/stacktrace.h>
46 #include <asm/processor.h>
47 #include <asm/debugreg.h>
48 #include <linux/atomic.h>
49 #include <asm/text-patching.h>
50 #include <asm/ftrace.h>
51 #include <asm/traps.h>
52 #include <asm/desc.h>
53 #include <asm/fpu/internal.h>
54 #include <asm/cpu_entry_area.h>
55 #include <asm/mce.h>
56 #include <asm/fixmap.h>
57 #include <asm/mach_traps.h>
58 #include <asm/alternative.h>
59 #include <asm/fpu/xstate.h>
60 #include <asm/trace/mpx.h>
61 #include <asm/mpx.h>
62 #include <asm/vm86.h>
63 #include <asm/umip.h>
64 
65 #ifdef CONFIG_X86_64
66 #include <asm/x86_init.h>
67 #include <asm/pgalloc.h>
68 #include <asm/proto.h>
69 #else
70 #include <asm/processor-flags.h>
71 #include <asm/setup.h>
72 #include <asm/proto.h>
73 #endif
74 
75 DECLARE_BITMAP(system_vectors, NR_VECTORS);
76 
77 static inline void cond_local_irq_enable(struct pt_regs *regs)
78 {
79 	if (regs->flags & X86_EFLAGS_IF)
80 		local_irq_enable();
81 }
82 
83 static inline void cond_local_irq_disable(struct pt_regs *regs)
84 {
85 	if (regs->flags & X86_EFLAGS_IF)
86 		local_irq_disable();
87 }
88 
89 /*
90  * In IST context, we explicitly disable preemption.  This serves two
91  * purposes: it makes it much less likely that we would accidentally
92  * schedule in IST context and it will force a warning if we somehow
93  * manage to schedule by accident.
94  */
95 void ist_enter(struct pt_regs *regs)
96 {
97 	if (user_mode(regs)) {
98 		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
99 	} else {
100 		/*
101 		 * We might have interrupted pretty much anything.  In
102 		 * fact, if we're a machine check, we can even interrupt
103 		 * NMI processing.  We don't want in_nmi() to return true,
104 		 * but we need to notify RCU.
105 		 */
106 		rcu_nmi_enter();
107 	}
108 
109 	preempt_disable();
110 
111 	/* This code is a bit fragile.  Test it. */
112 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
113 }
114 
115 void ist_exit(struct pt_regs *regs)
116 {
117 	preempt_enable_no_resched();
118 
119 	if (!user_mode(regs))
120 		rcu_nmi_exit();
121 }
122 
123 /**
124  * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
125  * @regs:	regs passed to the IST exception handler
126  *
127  * IST exception handlers normally cannot schedule.  As a special
128  * exception, if the exception interrupted userspace code (i.e.
129  * user_mode(regs) would return true) and the exception was not
130  * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
131  * begins a non-atomic section within an ist_enter()/ist_exit() region.
132  * Callers are responsible for enabling interrupts themselves inside
133  * the non-atomic section, and callers must call ist_end_non_atomic()
134  * before ist_exit().
135  */
136 void ist_begin_non_atomic(struct pt_regs *regs)
137 {
138 	BUG_ON(!user_mode(regs));
139 
140 	/*
141 	 * Sanity check: we need to be on the normal thread stack.  This
142 	 * will catch asm bugs and any attempt to use ist_preempt_enable
143 	 * from double_fault.
144 	 */
145 	BUG_ON(!on_thread_stack());
146 
147 	preempt_enable_no_resched();
148 }
149 
150 /**
151  * ist_end_non_atomic() - begin a non-atomic section in an IST exception
152  *
153  * Ends a non-atomic section started with ist_begin_non_atomic().
154  */
155 void ist_end_non_atomic(void)
156 {
157 	preempt_disable();
158 }
159 
160 int is_valid_bugaddr(unsigned long addr)
161 {
162 	unsigned short ud;
163 
164 	if (addr < TASK_SIZE_MAX)
165 		return 0;
166 
167 	if (probe_kernel_address((unsigned short *)addr, ud))
168 		return 0;
169 
170 	return ud == INSN_UD0 || ud == INSN_UD2;
171 }
172 
173 int fixup_bug(struct pt_regs *regs, int trapnr)
174 {
175 	if (trapnr != X86_TRAP_UD)
176 		return 0;
177 
178 	switch (report_bug(regs->ip, regs)) {
179 	case BUG_TRAP_TYPE_NONE:
180 	case BUG_TRAP_TYPE_BUG:
181 		break;
182 
183 	case BUG_TRAP_TYPE_WARN:
184 		regs->ip += LEN_UD2;
185 		return 1;
186 	}
187 
188 	return 0;
189 }
190 
191 static nokprobe_inline int
192 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
193 		  struct pt_regs *regs,	long error_code)
194 {
195 	if (v8086_mode(regs)) {
196 		/*
197 		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
198 		 * On nmi (interrupt 2), do_trap should not be called.
199 		 */
200 		if (trapnr < X86_TRAP_UD) {
201 			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
202 						error_code, trapnr))
203 				return 0;
204 		}
205 		return -1;
206 	}
207 
208 	if (!user_mode(regs)) {
209 		if (fixup_exception(regs, trapnr))
210 			return 0;
211 
212 		tsk->thread.error_code = error_code;
213 		tsk->thread.trap_nr = trapnr;
214 		die(str, regs, error_code);
215 	}
216 
217 	return -1;
218 }
219 
220 static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
221 				siginfo_t *info)
222 {
223 	unsigned long siaddr;
224 	int sicode;
225 
226 	switch (trapnr) {
227 	default:
228 		return SEND_SIG_PRIV;
229 
230 	case X86_TRAP_DE:
231 		sicode = FPE_INTDIV;
232 		siaddr = uprobe_get_trap_addr(regs);
233 		break;
234 	case X86_TRAP_UD:
235 		sicode = ILL_ILLOPN;
236 		siaddr = uprobe_get_trap_addr(regs);
237 		break;
238 	case X86_TRAP_AC:
239 		sicode = BUS_ADRALN;
240 		siaddr = 0;
241 		break;
242 	}
243 
244 	info->si_signo = signr;
245 	info->si_errno = 0;
246 	info->si_code = sicode;
247 	info->si_addr = (void __user *)siaddr;
248 	return info;
249 }
250 
251 static void
252 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
253 	long error_code, siginfo_t *info)
254 {
255 	struct task_struct *tsk = current;
256 
257 
258 	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
259 		return;
260 	/*
261 	 * We want error_code and trap_nr set for userspace faults and
262 	 * kernelspace faults which result in die(), but not
263 	 * kernelspace faults which are fixed up.  die() gives the
264 	 * process no chance to handle the signal and notice the
265 	 * kernel fault information, so that won't result in polluting
266 	 * the information about previously queued, but not yet
267 	 * delivered, faults.  See also do_general_protection below.
268 	 */
269 	tsk->thread.error_code = error_code;
270 	tsk->thread.trap_nr = trapnr;
271 
272 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
273 	    printk_ratelimit()) {
274 		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
275 			tsk->comm, tsk->pid, str,
276 			regs->ip, regs->sp, error_code);
277 		print_vma_addr(KERN_CONT " in ", regs->ip);
278 		pr_cont("\n");
279 	}
280 
281 	force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
282 }
283 NOKPROBE_SYMBOL(do_trap);
284 
285 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
286 			  unsigned long trapnr, int signr)
287 {
288 	siginfo_t info;
289 
290 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
291 
292 	/*
293 	 * WARN*()s end up here; fix them up before we call the
294 	 * notifier chain.
295 	 */
296 	if (!user_mode(regs) && fixup_bug(regs, trapnr))
297 		return;
298 
299 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
300 			NOTIFY_STOP) {
301 		cond_local_irq_enable(regs);
302 		do_trap(trapnr, signr, str, regs, error_code,
303 			fill_trap_info(regs, signr, trapnr, &info));
304 	}
305 }
306 
307 #define DO_ERROR(trapnr, signr, str, name)				\
308 dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
309 {									\
310 	do_error_trap(regs, error_code, str, trapnr, signr);		\
311 }
312 
313 DO_ERROR(X86_TRAP_DE,     SIGFPE,  "divide error",		divide_error)
314 DO_ERROR(X86_TRAP_OF,     SIGSEGV, "overflow",			overflow)
315 DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",		invalid_op)
316 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
317 DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",		invalid_TSS)
318 DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",	segment_not_present)
319 DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",		stack_segment)
320 DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",		alignment_check)
321 
322 #ifdef CONFIG_VMAP_STACK
323 __visible void __noreturn handle_stack_overflow(const char *message,
324 						struct pt_regs *regs,
325 						unsigned long fault_address)
326 {
327 	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
328 		 (void *)fault_address, current->stack,
329 		 (char *)current->stack + THREAD_SIZE - 1);
330 	die(message, regs, 0);
331 
332 	/* Be absolutely certain we don't return. */
333 	panic(message);
334 }
335 #endif
336 
337 #ifdef CONFIG_X86_64
338 /* Runs on IST stack */
339 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
340 {
341 	static const char str[] = "double fault";
342 	struct task_struct *tsk = current;
343 #ifdef CONFIG_VMAP_STACK
344 	unsigned long cr2;
345 #endif
346 
347 #ifdef CONFIG_X86_ESPFIX64
348 	extern unsigned char native_irq_return_iret[];
349 
350 	/*
351 	 * If IRET takes a non-IST fault on the espfix64 stack, then we
352 	 * end up promoting it to a doublefault.  In that case, take
353 	 * advantage of the fact that we're not using the normal (TSS.sp0)
354 	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
355 	 * and then modify our own IRET frame so that, when we return,
356 	 * we land directly at the #GP(0) vector with the stack already
357 	 * set up according to its expectations.
358 	 *
359 	 * The net result is that our #GP handler will think that we
360 	 * entered from usermode with the bad user context.
361 	 *
362 	 * No need for ist_enter here because we don't use RCU.
363 	 */
364 	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
365 		regs->cs == __KERNEL_CS &&
366 		regs->ip == (unsigned long)native_irq_return_iret)
367 	{
368 		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
369 
370 		/*
371 		 * regs->sp points to the failing IRET frame on the
372 		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
373 		 * in gpregs->ss through gpregs->ip.
374 		 *
375 		 */
376 		memmove(&gpregs->ip, (void *)regs->sp, 5*8);
377 		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
378 
379 		/*
380 		 * Adjust our frame so that we return straight to the #GP
381 		 * vector with the expected RSP value.  This is safe because
382 		 * we won't enable interupts or schedule before we invoke
383 		 * general_protection, so nothing will clobber the stack
384 		 * frame we just set up.
385 		 */
386 		regs->ip = (unsigned long)general_protection;
387 		regs->sp = (unsigned long)&gpregs->orig_ax;
388 
389 		return;
390 	}
391 #endif
392 
393 	ist_enter(regs);
394 	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
395 
396 	tsk->thread.error_code = error_code;
397 	tsk->thread.trap_nr = X86_TRAP_DF;
398 
399 #ifdef CONFIG_VMAP_STACK
400 	/*
401 	 * If we overflow the stack into a guard page, the CPU will fail
402 	 * to deliver #PF and will send #DF instead.  Similarly, if we
403 	 * take any non-IST exception while too close to the bottom of
404 	 * the stack, the processor will get a page fault while
405 	 * delivering the exception and will generate a double fault.
406 	 *
407 	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
408 	 * Page-Fault Exception (#PF):
409 	 *
410 	 *   Processors update CR2 whenever a page fault is detected. If a
411 	 *   second page fault occurs while an earlier page fault is being
412 	 *   delivered, the faulting linear address of the second fault will
413 	 *   overwrite the contents of CR2 (replacing the previous
414 	 *   address). These updates to CR2 occur even if the page fault
415 	 *   results in a double fault or occurs during the delivery of a
416 	 *   double fault.
417 	 *
418 	 * The logic below has a small possibility of incorrectly diagnosing
419 	 * some errors as stack overflows.  For example, if the IDT or GDT
420 	 * gets corrupted such that #GP delivery fails due to a bad descriptor
421 	 * causing #GP and we hit this condition while CR2 coincidentally
422 	 * points to the stack guard page, we'll think we overflowed the
423 	 * stack.  Given that we're going to panic one way or another
424 	 * if this happens, this isn't necessarily worth fixing.
425 	 *
426 	 * If necessary, we could improve the test by only diagnosing
427 	 * a stack overflow if the saved RSP points within 47 bytes of
428 	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
429 	 * take an exception, the stack is already aligned and there
430 	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
431 	 * possible error code, so a stack overflow would *not* double
432 	 * fault.  With any less space left, exception delivery could
433 	 * fail, and, as a practical matter, we've overflowed the
434 	 * stack even if the actual trigger for the double fault was
435 	 * something else.
436 	 */
437 	cr2 = read_cr2();
438 	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
439 		handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
440 #endif
441 
442 #ifdef CONFIG_DOUBLEFAULT
443 	df_debug(regs, error_code);
444 #endif
445 	/*
446 	 * This is always a kernel trap and never fixable (and thus must
447 	 * never return).
448 	 */
449 	for (;;)
450 		die(str, regs, error_code);
451 }
452 #endif
453 
454 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
455 {
456 	const struct mpx_bndcsr *bndcsr;
457 	siginfo_t *info;
458 
459 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
460 	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
461 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
462 		return;
463 	cond_local_irq_enable(regs);
464 
465 	if (!user_mode(regs))
466 		die("bounds", regs, error_code);
467 
468 	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
469 		/* The exception is not from Intel MPX */
470 		goto exit_trap;
471 	}
472 
473 	/*
474 	 * We need to look at BNDSTATUS to resolve this exception.
475 	 * A NULL here might mean that it is in its 'init state',
476 	 * which is all zeros which indicates MPX was not
477 	 * responsible for the exception.
478 	 */
479 	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
480 	if (!bndcsr)
481 		goto exit_trap;
482 
483 	trace_bounds_exception_mpx(bndcsr);
484 	/*
485 	 * The error code field of the BNDSTATUS register communicates status
486 	 * information of a bound range exception #BR or operation involving
487 	 * bound directory.
488 	 */
489 	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
490 	case 2:	/* Bound directory has invalid entry. */
491 		if (mpx_handle_bd_fault())
492 			goto exit_trap;
493 		break; /* Success, it was handled */
494 	case 1: /* Bound violation. */
495 		info = mpx_generate_siginfo(regs);
496 		if (IS_ERR(info)) {
497 			/*
498 			 * We failed to decode the MPX instruction.  Act as if
499 			 * the exception was not caused by MPX.
500 			 */
501 			goto exit_trap;
502 		}
503 		/*
504 		 * Success, we decoded the instruction and retrieved
505 		 * an 'info' containing the address being accessed
506 		 * which caused the exception.  This information
507 		 * allows and application to possibly handle the
508 		 * #BR exception itself.
509 		 */
510 		do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
511 		kfree(info);
512 		break;
513 	case 0: /* No exception caused by Intel MPX operations. */
514 		goto exit_trap;
515 	default:
516 		die("bounds", regs, error_code);
517 	}
518 
519 	return;
520 
521 exit_trap:
522 	/*
523 	 * This path out is for all the cases where we could not
524 	 * handle the exception in some way (like allocating a
525 	 * table or telling userspace about it.  We will also end
526 	 * up here if the kernel has MPX turned off at compile
527 	 * time..
528 	 */
529 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
530 }
531 
532 dotraplinkage void
533 do_general_protection(struct pt_regs *regs, long error_code)
534 {
535 	struct task_struct *tsk;
536 
537 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
538 	cond_local_irq_enable(regs);
539 
540 	if (static_cpu_has(X86_FEATURE_UMIP)) {
541 		if (user_mode(regs) && fixup_umip_exception(regs))
542 			return;
543 	}
544 
545 	if (v8086_mode(regs)) {
546 		local_irq_enable();
547 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
548 		return;
549 	}
550 
551 	tsk = current;
552 	if (!user_mode(regs)) {
553 		if (fixup_exception(regs, X86_TRAP_GP))
554 			return;
555 
556 		tsk->thread.error_code = error_code;
557 		tsk->thread.trap_nr = X86_TRAP_GP;
558 		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
559 			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
560 			die("general protection fault", regs, error_code);
561 		return;
562 	}
563 
564 	tsk->thread.error_code = error_code;
565 	tsk->thread.trap_nr = X86_TRAP_GP;
566 
567 	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
568 			printk_ratelimit()) {
569 		pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
570 			tsk->comm, task_pid_nr(tsk),
571 			regs->ip, regs->sp, error_code);
572 		print_vma_addr(KERN_CONT " in ", regs->ip);
573 		pr_cont("\n");
574 	}
575 
576 	force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
577 }
578 NOKPROBE_SYMBOL(do_general_protection);
579 
580 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
581 {
582 #ifdef CONFIG_DYNAMIC_FTRACE
583 	/*
584 	 * ftrace must be first, everything else may cause a recursive crash.
585 	 * See note by declaration of modifying_ftrace_code in ftrace.c
586 	 */
587 	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
588 	    ftrace_int3_handler(regs))
589 		return;
590 #endif
591 	if (poke_int3_handler(regs))
592 		return;
593 
594 	/*
595 	 * Use ist_enter despite the fact that we don't use an IST stack.
596 	 * We can be called from a kprobe in non-CONTEXT_KERNEL kernel
597 	 * mode or even during context tracking state changes.
598 	 *
599 	 * This means that we can't schedule.  That's okay.
600 	 */
601 	ist_enter(regs);
602 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
603 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
604 	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
605 				SIGTRAP) == NOTIFY_STOP)
606 		goto exit;
607 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
608 
609 #ifdef CONFIG_KPROBES
610 	if (kprobe_int3_handler(regs))
611 		goto exit;
612 #endif
613 
614 	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
615 			SIGTRAP) == NOTIFY_STOP)
616 		goto exit;
617 
618 	cond_local_irq_enable(regs);
619 	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
620 	cond_local_irq_disable(regs);
621 
622 exit:
623 	ist_exit(regs);
624 }
625 NOKPROBE_SYMBOL(do_int3);
626 
627 #ifdef CONFIG_X86_64
628 /*
629  * Help handler running on a per-cpu (IST or entry trampoline) stack
630  * to switch to the normal thread stack if the interrupted code was in
631  * user mode. The actual stack switch is done in entry_64.S
632  */
633 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
634 {
635 	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
636 	if (regs != eregs)
637 		*regs = *eregs;
638 	return regs;
639 }
640 NOKPROBE_SYMBOL(sync_regs);
641 
642 struct bad_iret_stack {
643 	void *error_entry_ret;
644 	struct pt_regs regs;
645 };
646 
647 asmlinkage __visible notrace
648 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
649 {
650 	/*
651 	 * This is called from entry_64.S early in handling a fault
652 	 * caused by a bad iret to user mode.  To handle the fault
653 	 * correctly, we want to move our stack frame to where it would
654 	 * be had we entered directly on the entry stack (rather than
655 	 * just below the IRET frame) and we want to pretend that the
656 	 * exception came from the IRET target.
657 	 */
658 	struct bad_iret_stack *new_stack =
659 		(struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
660 
661 	/* Copy the IRET target to the new stack. */
662 	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
663 
664 	/* Copy the remainder of the stack from the current stack. */
665 	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
666 
667 	BUG_ON(!user_mode(&new_stack->regs));
668 	return new_stack;
669 }
670 NOKPROBE_SYMBOL(fixup_bad_iret);
671 #endif
672 
673 static bool is_sysenter_singlestep(struct pt_regs *regs)
674 {
675 	/*
676 	 * We don't try for precision here.  If we're anywhere in the region of
677 	 * code that can be single-stepped in the SYSENTER entry path, then
678 	 * assume that this is a useless single-step trap due to SYSENTER
679 	 * being invoked with TF set.  (We don't know in advance exactly
680 	 * which instructions will be hit because BTF could plausibly
681 	 * be set.)
682 	 */
683 #ifdef CONFIG_X86_32
684 	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
685 		(unsigned long)__end_SYSENTER_singlestep_region -
686 		(unsigned long)__begin_SYSENTER_singlestep_region;
687 #elif defined(CONFIG_IA32_EMULATION)
688 	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
689 		(unsigned long)__end_entry_SYSENTER_compat -
690 		(unsigned long)entry_SYSENTER_compat;
691 #else
692 	return false;
693 #endif
694 }
695 
696 /*
697  * Our handling of the processor debug registers is non-trivial.
698  * We do not clear them on entry and exit from the kernel. Therefore
699  * it is possible to get a watchpoint trap here from inside the kernel.
700  * However, the code in ./ptrace.c has ensured that the user can
701  * only set watchpoints on userspace addresses. Therefore the in-kernel
702  * watchpoint trap can only occur in code which is reading/writing
703  * from user space. Such code must not hold kernel locks (since it
704  * can equally take a page fault), therefore it is safe to call
705  * force_sig_info even though that claims and releases locks.
706  *
707  * Code in ./signal.c ensures that the debug control register
708  * is restored before we deliver any signal, and therefore that
709  * user code runs with the correct debug control register even though
710  * we clear it here.
711  *
712  * Being careful here means that we don't have to be as careful in a
713  * lot of more complicated places (task switching can be a bit lazy
714  * about restoring all the debug state, and ptrace doesn't have to
715  * find every occurrence of the TF bit that could be saved away even
716  * by user code)
717  *
718  * May run on IST stack.
719  */
720 dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
721 {
722 	struct task_struct *tsk = current;
723 	int user_icebp = 0;
724 	unsigned long dr6;
725 	int si_code;
726 
727 	ist_enter(regs);
728 
729 	get_debugreg(dr6, 6);
730 	/*
731 	 * The Intel SDM says:
732 	 *
733 	 *   Certain debug exceptions may clear bits 0-3. The remaining
734 	 *   contents of the DR6 register are never cleared by the
735 	 *   processor. To avoid confusion in identifying debug
736 	 *   exceptions, debug handlers should clear the register before
737 	 *   returning to the interrupted task.
738 	 *
739 	 * Keep it simple: clear DR6 immediately.
740 	 */
741 	set_debugreg(0, 6);
742 
743 	/* Filter out all the reserved bits which are preset to 1 */
744 	dr6 &= ~DR6_RESERVED;
745 
746 	/*
747 	 * The SDM says "The processor clears the BTF flag when it
748 	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
749 	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
750 	 */
751 	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
752 
753 	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
754 		     is_sysenter_singlestep(regs))) {
755 		dr6 &= ~DR_STEP;
756 		if (!dr6)
757 			goto exit;
758 		/*
759 		 * else we might have gotten a single-step trap and hit a
760 		 * watchpoint at the same time, in which case we should fall
761 		 * through and handle the watchpoint.
762 		 */
763 	}
764 
765 	/*
766 	 * If dr6 has no reason to give us about the origin of this trap,
767 	 * then it's very likely the result of an icebp/int01 trap.
768 	 * User wants a sigtrap for that.
769 	 */
770 	if (!dr6 && user_mode(regs))
771 		user_icebp = 1;
772 
773 	/* Store the virtualized DR6 value */
774 	tsk->thread.debugreg6 = dr6;
775 
776 #ifdef CONFIG_KPROBES
777 	if (kprobe_debug_handler(regs))
778 		goto exit;
779 #endif
780 
781 	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
782 							SIGTRAP) == NOTIFY_STOP)
783 		goto exit;
784 
785 	/*
786 	 * Let others (NMI) know that the debug stack is in use
787 	 * as we may switch to the interrupt stack.
788 	 */
789 	debug_stack_usage_inc();
790 
791 	/* It's safe to allow irq's after DR6 has been saved */
792 	cond_local_irq_enable(regs);
793 
794 	if (v8086_mode(regs)) {
795 		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
796 					X86_TRAP_DB);
797 		cond_local_irq_disable(regs);
798 		debug_stack_usage_dec();
799 		goto exit;
800 	}
801 
802 	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
803 		/*
804 		 * Historical junk that used to handle SYSENTER single-stepping.
805 		 * This should be unreachable now.  If we survive for a while
806 		 * without anyone hitting this warning, we'll turn this into
807 		 * an oops.
808 		 */
809 		tsk->thread.debugreg6 &= ~DR_STEP;
810 		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
811 		regs->flags &= ~X86_EFLAGS_TF;
812 	}
813 	si_code = get_si_code(tsk->thread.debugreg6);
814 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
815 		send_sigtrap(tsk, regs, error_code, si_code);
816 	cond_local_irq_disable(regs);
817 	debug_stack_usage_dec();
818 
819 exit:
820 	ist_exit(regs);
821 }
822 NOKPROBE_SYMBOL(do_debug);
823 
824 /*
825  * Note that we play around with the 'TS' bit in an attempt to get
826  * the correct behaviour even in the presence of the asynchronous
827  * IRQ13 behaviour
828  */
829 static void math_error(struct pt_regs *regs, int error_code, int trapnr)
830 {
831 	struct task_struct *task = current;
832 	struct fpu *fpu = &task->thread.fpu;
833 	siginfo_t info;
834 	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
835 						"simd exception";
836 
837 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
838 		return;
839 	cond_local_irq_enable(regs);
840 
841 	if (!user_mode(regs)) {
842 		if (!fixup_exception(regs, trapnr)) {
843 			task->thread.error_code = error_code;
844 			task->thread.trap_nr = trapnr;
845 			die(str, regs, error_code);
846 		}
847 		return;
848 	}
849 
850 	/*
851 	 * Save the info for the exception handler and clear the error.
852 	 */
853 	fpu__save(fpu);
854 
855 	task->thread.trap_nr	= trapnr;
856 	task->thread.error_code = error_code;
857 	info.si_signo		= SIGFPE;
858 	info.si_errno		= 0;
859 	info.si_addr		= (void __user *)uprobe_get_trap_addr(regs);
860 
861 	info.si_code = fpu__exception_code(fpu, trapnr);
862 
863 	/* Retry when we get spurious exceptions: */
864 	if (!info.si_code)
865 		return;
866 
867 	force_sig_info(SIGFPE, &info, task);
868 }
869 
870 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
871 {
872 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
873 	math_error(regs, error_code, X86_TRAP_MF);
874 }
875 
876 dotraplinkage void
877 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
878 {
879 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
880 	math_error(regs, error_code, X86_TRAP_XF);
881 }
882 
883 dotraplinkage void
884 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
885 {
886 	cond_local_irq_enable(regs);
887 }
888 
889 dotraplinkage void
890 do_device_not_available(struct pt_regs *regs, long error_code)
891 {
892 	unsigned long cr0;
893 
894 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
895 
896 #ifdef CONFIG_MATH_EMULATION
897 	if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
898 		struct math_emu_info info = { };
899 
900 		cond_local_irq_enable(regs);
901 
902 		info.regs = regs;
903 		math_emulate(&info);
904 		return;
905 	}
906 #endif
907 
908 	/* This should not happen. */
909 	cr0 = read_cr0();
910 	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
911 		/* Try to fix it up and carry on. */
912 		write_cr0(cr0 & ~X86_CR0_TS);
913 	} else {
914 		/*
915 		 * Something terrible happened, and we're better off trying
916 		 * to kill the task than getting stuck in a never-ending
917 		 * loop of #NM faults.
918 		 */
919 		die("unexpected #NM exception", regs, error_code);
920 	}
921 }
922 NOKPROBE_SYMBOL(do_device_not_available);
923 
924 #ifdef CONFIG_X86_32
925 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
926 {
927 	siginfo_t info;
928 
929 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
930 	local_irq_enable();
931 
932 	info.si_signo = SIGILL;
933 	info.si_errno = 0;
934 	info.si_code = ILL_BADSTK;
935 	info.si_addr = NULL;
936 	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
937 			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
938 		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
939 			&info);
940 	}
941 }
942 #endif
943 
944 void __init trap_init(void)
945 {
946 	/* Init cpu_entry_area before IST entries are set up */
947 	setup_cpu_entry_areas();
948 
949 	idt_setup_traps();
950 
951 	/*
952 	 * Set the IDT descriptor to a fixed read-only location, so that the
953 	 * "sidt" instruction will not leak the location of the kernel, and
954 	 * to defend the IDT against arbitrary memory write vulnerabilities.
955 	 * It will be reloaded in cpu_init() */
956 	cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
957 		    PAGE_KERNEL_RO);
958 	idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
959 
960 	/*
961 	 * Should be a barrier for any external CPU state:
962 	 */
963 	cpu_init();
964 
965 	idt_setup_ist_traps();
966 
967 	x86_init.irqs.trap_init();
968 
969 	idt_setup_debugidt_traps();
970 }
971