xref: /openbmc/linux/arch/x86/kernel/signal.c (revision c4a11bf4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1991, 1992  Linus Torvalds
4  *  Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
5  *
6  *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
7  *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
8  *  2000-2002   x86-64 support by Andi Kleen
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/sched.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/mm.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/kstrtox.h>
19 #include <linux/errno.h>
20 #include <linux/wait.h>
21 #include <linux/tracehook.h>
22 #include <linux/unistd.h>
23 #include <linux/stddef.h>
24 #include <linux/personality.h>
25 #include <linux/uaccess.h>
26 #include <linux/user-return-notifier.h>
27 #include <linux/uprobes.h>
28 #include <linux/context_tracking.h>
29 #include <linux/entry-common.h>
30 #include <linux/syscalls.h>
31 
32 #include <asm/processor.h>
33 #include <asm/ucontext.h>
34 #include <asm/fpu/signal.h>
35 #include <asm/fpu/xstate.h>
36 #include <asm/vdso.h>
37 #include <asm/mce.h>
38 #include <asm/sighandling.h>
39 #include <asm/vm86.h>
40 
41 #ifdef CONFIG_X86_64
42 #include <linux/compat.h>
43 #include <asm/proto.h>
44 #include <asm/ia32_unistd.h>
45 #include <asm/fpu/xstate.h>
46 #endif /* CONFIG_X86_64 */
47 
48 #include <asm/syscall.h>
49 #include <asm/sigframe.h>
50 #include <asm/signal.h>
51 
52 #ifdef CONFIG_X86_64
53 /*
54  * If regs->ss will cause an IRET fault, change it.  Otherwise leave it
55  * alone.  Using this generally makes no sense unless
56  * user_64bit_mode(regs) would return true.
57  */
58 static void force_valid_ss(struct pt_regs *regs)
59 {
60 	u32 ar;
61 	asm volatile ("lar %[old_ss], %[ar]\n\t"
62 		      "jz 1f\n\t"		/* If invalid: */
63 		      "xorl %[ar], %[ar]\n\t"	/* set ar = 0 */
64 		      "1:"
65 		      : [ar] "=r" (ar)
66 		      : [old_ss] "rm" ((u16)regs->ss));
67 
68 	/*
69 	 * For a valid 64-bit user context, we need DPL 3, type
70 	 * read-write data or read-write exp-down data, and S and P
71 	 * set.  We can't use VERW because VERW doesn't check the
72 	 * P bit.
73 	 */
74 	ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK;
75 	if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) &&
76 	    ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN))
77 		regs->ss = __USER_DS;
78 }
79 # define CONTEXT_COPY_SIZE	offsetof(struct sigcontext, reserved1)
80 #else
81 # define CONTEXT_COPY_SIZE	sizeof(struct sigcontext)
82 #endif
83 
84 static bool restore_sigcontext(struct pt_regs *regs,
85 			       struct sigcontext __user *usc,
86 			       unsigned long uc_flags)
87 {
88 	struct sigcontext sc;
89 
90 	/* Always make any pending restarted system calls return -EINTR */
91 	current->restart_block.fn = do_no_restart_syscall;
92 
93 	if (copy_from_user(&sc, usc, CONTEXT_COPY_SIZE))
94 		return false;
95 
96 #ifdef CONFIG_X86_32
97 	set_user_gs(regs, sc.gs);
98 	regs->fs = sc.fs;
99 	regs->es = sc.es;
100 	regs->ds = sc.ds;
101 #endif /* CONFIG_X86_32 */
102 
103 	regs->bx = sc.bx;
104 	regs->cx = sc.cx;
105 	regs->dx = sc.dx;
106 	regs->si = sc.si;
107 	regs->di = sc.di;
108 	regs->bp = sc.bp;
109 	regs->ax = sc.ax;
110 	regs->sp = sc.sp;
111 	regs->ip = sc.ip;
112 
113 #ifdef CONFIG_X86_64
114 	regs->r8 = sc.r8;
115 	regs->r9 = sc.r9;
116 	regs->r10 = sc.r10;
117 	regs->r11 = sc.r11;
118 	regs->r12 = sc.r12;
119 	regs->r13 = sc.r13;
120 	regs->r14 = sc.r14;
121 	regs->r15 = sc.r15;
122 #endif /* CONFIG_X86_64 */
123 
124 	/* Get CS/SS and force CPL3 */
125 	regs->cs = sc.cs | 0x03;
126 	regs->ss = sc.ss | 0x03;
127 
128 	regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS);
129 	/* disable syscall checks */
130 	regs->orig_ax = -1;
131 
132 #ifdef CONFIG_X86_64
133 	/*
134 	 * Fix up SS if needed for the benefit of old DOSEMU and
135 	 * CRIU.
136 	 */
137 	if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
138 		force_valid_ss(regs);
139 #endif
140 
141 	return fpu__restore_sig((void __user *)sc.fpstate,
142 			       IS_ENABLED(CONFIG_X86_32));
143 }
144 
145 static __always_inline int
146 __unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
147 		     struct pt_regs *regs, unsigned long mask)
148 {
149 #ifdef CONFIG_X86_32
150 	unsafe_put_user(get_user_gs(regs),
151 				  (unsigned int __user *)&sc->gs, Efault);
152 	unsafe_put_user(regs->fs, (unsigned int __user *)&sc->fs, Efault);
153 	unsafe_put_user(regs->es, (unsigned int __user *)&sc->es, Efault);
154 	unsafe_put_user(regs->ds, (unsigned int __user *)&sc->ds, Efault);
155 #endif /* CONFIG_X86_32 */
156 
157 	unsafe_put_user(regs->di, &sc->di, Efault);
158 	unsafe_put_user(regs->si, &sc->si, Efault);
159 	unsafe_put_user(regs->bp, &sc->bp, Efault);
160 	unsafe_put_user(regs->sp, &sc->sp, Efault);
161 	unsafe_put_user(regs->bx, &sc->bx, Efault);
162 	unsafe_put_user(regs->dx, &sc->dx, Efault);
163 	unsafe_put_user(regs->cx, &sc->cx, Efault);
164 	unsafe_put_user(regs->ax, &sc->ax, Efault);
165 #ifdef CONFIG_X86_64
166 	unsafe_put_user(regs->r8, &sc->r8, Efault);
167 	unsafe_put_user(regs->r9, &sc->r9, Efault);
168 	unsafe_put_user(regs->r10, &sc->r10, Efault);
169 	unsafe_put_user(regs->r11, &sc->r11, Efault);
170 	unsafe_put_user(regs->r12, &sc->r12, Efault);
171 	unsafe_put_user(regs->r13, &sc->r13, Efault);
172 	unsafe_put_user(regs->r14, &sc->r14, Efault);
173 	unsafe_put_user(regs->r15, &sc->r15, Efault);
174 #endif /* CONFIG_X86_64 */
175 
176 	unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault);
177 	unsafe_put_user(current->thread.error_code, &sc->err, Efault);
178 	unsafe_put_user(regs->ip, &sc->ip, Efault);
179 #ifdef CONFIG_X86_32
180 	unsafe_put_user(regs->cs, (unsigned int __user *)&sc->cs, Efault);
181 	unsafe_put_user(regs->flags, &sc->flags, Efault);
182 	unsafe_put_user(regs->sp, &sc->sp_at_signal, Efault);
183 	unsafe_put_user(regs->ss, (unsigned int __user *)&sc->ss, Efault);
184 #else /* !CONFIG_X86_32 */
185 	unsafe_put_user(regs->flags, &sc->flags, Efault);
186 	unsafe_put_user(regs->cs, &sc->cs, Efault);
187 	unsafe_put_user(0, &sc->gs, Efault);
188 	unsafe_put_user(0, &sc->fs, Efault);
189 	unsafe_put_user(regs->ss, &sc->ss, Efault);
190 #endif /* CONFIG_X86_32 */
191 
192 	unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault);
193 
194 	/* non-iBCS2 extensions.. */
195 	unsafe_put_user(mask, &sc->oldmask, Efault);
196 	unsafe_put_user(current->thread.cr2, &sc->cr2, Efault);
197 	return 0;
198 Efault:
199 	return -EFAULT;
200 }
201 
202 #define unsafe_put_sigcontext(sc, fp, regs, set, label)			\
203 do {									\
204 	if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0]))	\
205 		goto label;						\
206 } while(0);
207 
208 #define unsafe_put_sigmask(set, frame, label) \
209 	unsafe_put_user(*(__u64 *)(set), \
210 			(__u64 __user *)&(frame)->uc.uc_sigmask, \
211 			label)
212 
213 /*
214  * Set up a signal frame.
215  */
216 
217 /* x86 ABI requires 16-byte alignment */
218 #define FRAME_ALIGNMENT	16UL
219 
220 #define MAX_FRAME_PADDING	(FRAME_ALIGNMENT - 1)
221 
222 /*
223  * Determine which stack to use..
224  */
225 static unsigned long align_sigframe(unsigned long sp)
226 {
227 #ifdef CONFIG_X86_32
228 	/*
229 	 * Align the stack pointer according to the i386 ABI,
230 	 * i.e. so that on function entry ((sp + 4) & 15) == 0.
231 	 */
232 	sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4;
233 #else /* !CONFIG_X86_32 */
234 	sp = round_down(sp, FRAME_ALIGNMENT) - 8;
235 #endif
236 	return sp;
237 }
238 
239 static void __user *
240 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
241 	     void __user **fpstate)
242 {
243 	/* Default to using normal stack */
244 	bool nested_altstack = on_sig_stack(regs->sp);
245 	bool entering_altstack = false;
246 	unsigned long math_size = 0;
247 	unsigned long sp = regs->sp;
248 	unsigned long buf_fx = 0;
249 
250 	/* redzone */
251 	if (IS_ENABLED(CONFIG_X86_64))
252 		sp -= 128;
253 
254 	/* This is the X/Open sanctioned signal stack switching.  */
255 	if (ka->sa.sa_flags & SA_ONSTACK) {
256 		/*
257 		 * This checks nested_altstack via sas_ss_flags(). Sensible
258 		 * programs use SS_AUTODISARM, which disables that check, and
259 		 * programs that don't use SS_AUTODISARM get compatible.
260 		 */
261 		if (sas_ss_flags(sp) == 0) {
262 			sp = current->sas_ss_sp + current->sas_ss_size;
263 			entering_altstack = true;
264 		}
265 	} else if (IS_ENABLED(CONFIG_X86_32) &&
266 		   !nested_altstack &&
267 		   regs->ss != __USER_DS &&
268 		   !(ka->sa.sa_flags & SA_RESTORER) &&
269 		   ka->sa.sa_restorer) {
270 		/* This is the legacy signal stack switching. */
271 		sp = (unsigned long) ka->sa.sa_restorer;
272 		entering_altstack = true;
273 	}
274 
275 	sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
276 				  &buf_fx, &math_size);
277 	*fpstate = (void __user *)sp;
278 
279 	sp = align_sigframe(sp - frame_size);
280 
281 	/*
282 	 * If we are on the alternate signal stack and would overflow it, don't.
283 	 * Return an always-bogus address instead so we will die with SIGSEGV.
284 	 */
285 	if (unlikely((nested_altstack || entering_altstack) &&
286 		     !__on_sig_stack(sp))) {
287 
288 		if (show_unhandled_signals && printk_ratelimit())
289 			pr_info("%s[%d] overflowed sigaltstack\n",
290 				current->comm, task_pid_nr(current));
291 
292 		return (void __user *)-1L;
293 	}
294 
295 	/* save i387 and extended state */
296 	if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size))
297 		return (void __user *)-1L;
298 
299 	return (void __user *)sp;
300 }
301 
302 #ifdef CONFIG_X86_32
303 static const struct {
304 	u16 poplmovl;
305 	u32 val;
306 	u16 int80;
307 } __attribute__((packed)) retcode = {
308 	0xb858,		/* popl %eax; movl $..., %eax */
309 	__NR_sigreturn,
310 	0x80cd,		/* int $0x80 */
311 };
312 
313 static const struct {
314 	u8  movl;
315 	u32 val;
316 	u16 int80;
317 	u8  pad;
318 } __attribute__((packed)) rt_retcode = {
319 	0xb8,		/* movl $..., %eax */
320 	__NR_rt_sigreturn,
321 	0x80cd,		/* int $0x80 */
322 	0
323 };
324 
325 static int
326 __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
327 	      struct pt_regs *regs)
328 {
329 	struct sigframe __user *frame;
330 	void __user *restorer;
331 	void __user *fp = NULL;
332 
333 	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
334 
335 	if (!user_access_begin(frame, sizeof(*frame)))
336 		return -EFAULT;
337 
338 	unsafe_put_user(sig, &frame->sig, Efault);
339 	unsafe_put_sigcontext(&frame->sc, fp, regs, set, Efault);
340 	unsafe_put_user(set->sig[1], &frame->extramask[0], Efault);
341 	if (current->mm->context.vdso)
342 		restorer = current->mm->context.vdso +
343 			vdso_image_32.sym___kernel_sigreturn;
344 	else
345 		restorer = &frame->retcode;
346 	if (ksig->ka.sa.sa_flags & SA_RESTORER)
347 		restorer = ksig->ka.sa.sa_restorer;
348 
349 	/* Set up to return from userspace.  */
350 	unsafe_put_user(restorer, &frame->pretcode, Efault);
351 
352 	/*
353 	 * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
354 	 *
355 	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
356 	 * reasons and because gdb uses it as a signature to notice
357 	 * signal handler stack frames.
358 	 */
359 	unsafe_put_user(*((u64 *)&retcode), (u64 *)frame->retcode, Efault);
360 	user_access_end();
361 
362 	/* Set up registers for signal handler */
363 	regs->sp = (unsigned long)frame;
364 	regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
365 	regs->ax = (unsigned long)sig;
366 	regs->dx = 0;
367 	regs->cx = 0;
368 
369 	regs->ds = __USER_DS;
370 	regs->es = __USER_DS;
371 	regs->ss = __USER_DS;
372 	regs->cs = __USER_CS;
373 
374 	return 0;
375 
376 Efault:
377 	user_access_end();
378 	return -EFAULT;
379 }
380 
381 static int __setup_rt_frame(int sig, struct ksignal *ksig,
382 			    sigset_t *set, struct pt_regs *regs)
383 {
384 	struct rt_sigframe __user *frame;
385 	void __user *restorer;
386 	void __user *fp = NULL;
387 
388 	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
389 
390 	if (!user_access_begin(frame, sizeof(*frame)))
391 		return -EFAULT;
392 
393 	unsafe_put_user(sig, &frame->sig, Efault);
394 	unsafe_put_user(&frame->info, &frame->pinfo, Efault);
395 	unsafe_put_user(&frame->uc, &frame->puc, Efault);
396 
397 	/* Create the ucontext.  */
398 	if (static_cpu_has(X86_FEATURE_XSAVE))
399 		unsafe_put_user(UC_FP_XSTATE, &frame->uc.uc_flags, Efault);
400 	else
401 		unsafe_put_user(0, &frame->uc.uc_flags, Efault);
402 	unsafe_put_user(0, &frame->uc.uc_link, Efault);
403 	unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
404 
405 	/* Set up to return from userspace.  */
406 	restorer = current->mm->context.vdso +
407 		vdso_image_32.sym___kernel_rt_sigreturn;
408 	if (ksig->ka.sa.sa_flags & SA_RESTORER)
409 		restorer = ksig->ka.sa.sa_restorer;
410 	unsafe_put_user(restorer, &frame->pretcode, Efault);
411 
412 	/*
413 	 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
414 	 *
415 	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
416 	 * reasons and because gdb uses it as a signature to notice
417 	 * signal handler stack frames.
418 	 */
419 	unsafe_put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode, Efault);
420 	unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
421 	unsafe_put_sigmask(set, frame, Efault);
422 	user_access_end();
423 
424 	if (copy_siginfo_to_user(&frame->info, &ksig->info))
425 		return -EFAULT;
426 
427 	/* Set up registers for signal handler */
428 	regs->sp = (unsigned long)frame;
429 	regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
430 	regs->ax = (unsigned long)sig;
431 	regs->dx = (unsigned long)&frame->info;
432 	regs->cx = (unsigned long)&frame->uc;
433 
434 	regs->ds = __USER_DS;
435 	regs->es = __USER_DS;
436 	regs->ss = __USER_DS;
437 	regs->cs = __USER_CS;
438 
439 	return 0;
440 Efault:
441 	user_access_end();
442 	return -EFAULT;
443 }
444 #else /* !CONFIG_X86_32 */
445 static unsigned long frame_uc_flags(struct pt_regs *regs)
446 {
447 	unsigned long flags;
448 
449 	if (boot_cpu_has(X86_FEATURE_XSAVE))
450 		flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
451 	else
452 		flags = UC_SIGCONTEXT_SS;
453 
454 	if (likely(user_64bit_mode(regs)))
455 		flags |= UC_STRICT_RESTORE_SS;
456 
457 	return flags;
458 }
459 
460 static int __setup_rt_frame(int sig, struct ksignal *ksig,
461 			    sigset_t *set, struct pt_regs *regs)
462 {
463 	struct rt_sigframe __user *frame;
464 	void __user *fp = NULL;
465 	unsigned long uc_flags;
466 
467 	/* x86-64 should always use SA_RESTORER. */
468 	if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
469 		return -EFAULT;
470 
471 	frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
472 	uc_flags = frame_uc_flags(regs);
473 
474 	if (!user_access_begin(frame, sizeof(*frame)))
475 		return -EFAULT;
476 
477 	/* Create the ucontext.  */
478 	unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
479 	unsafe_put_user(0, &frame->uc.uc_link, Efault);
480 	unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
481 
482 	/* Set up to return from userspace.  If provided, use a stub
483 	   already in userspace.  */
484 	unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault);
485 	unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
486 	unsafe_put_sigmask(set, frame, Efault);
487 	user_access_end();
488 
489 	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
490 		if (copy_siginfo_to_user(&frame->info, &ksig->info))
491 			return -EFAULT;
492 	}
493 
494 	/* Set up registers for signal handler */
495 	regs->di = sig;
496 	/* In case the signal handler was declared without prototypes */
497 	regs->ax = 0;
498 
499 	/* This also works for non SA_SIGINFO handlers because they expect the
500 	   next argument after the signal number on the stack. */
501 	regs->si = (unsigned long)&frame->info;
502 	regs->dx = (unsigned long)&frame->uc;
503 	regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
504 
505 	regs->sp = (unsigned long)frame;
506 
507 	/*
508 	 * Set up the CS and SS registers to run signal handlers in
509 	 * 64-bit mode, even if the handler happens to be interrupting
510 	 * 32-bit or 16-bit code.
511 	 *
512 	 * SS is subtle.  In 64-bit mode, we don't need any particular
513 	 * SS descriptor, but we do need SS to be valid.  It's possible
514 	 * that the old SS is entirely bogus -- this can happen if the
515 	 * signal we're trying to deliver is #GP or #SS caused by a bad
516 	 * SS value.  We also have a compatibility issue here: DOSEMU
517 	 * relies on the contents of the SS register indicating the
518 	 * SS value at the time of the signal, even though that code in
519 	 * DOSEMU predates sigreturn's ability to restore SS.  (DOSEMU
520 	 * avoids relying on sigreturn to restore SS; instead it uses
521 	 * a trampoline.)  So we do our best: if the old SS was valid,
522 	 * we keep it.  Otherwise we replace it.
523 	 */
524 	regs->cs = __USER_CS;
525 
526 	if (unlikely(regs->ss != __USER_DS))
527 		force_valid_ss(regs);
528 
529 	return 0;
530 
531 Efault:
532 	user_access_end();
533 	return -EFAULT;
534 }
535 #endif /* CONFIG_X86_32 */
536 
537 #ifdef CONFIG_X86_X32_ABI
538 static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to,
539 		const struct kernel_siginfo *from)
540 {
541 	struct compat_siginfo new;
542 
543 	copy_siginfo_to_external32(&new, from);
544 	if (from->si_signo == SIGCHLD) {
545 		new._sifields._sigchld_x32._utime = from->si_utime;
546 		new._sifields._sigchld_x32._stime = from->si_stime;
547 	}
548 	if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
549 		return -EFAULT;
550 	return 0;
551 }
552 
553 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
554 			   const struct kernel_siginfo *from)
555 {
556 	if (in_x32_syscall())
557 		return x32_copy_siginfo_to_user(to, from);
558 	return __copy_siginfo_to_user32(to, from);
559 }
560 #endif /* CONFIG_X86_X32_ABI */
561 
562 static int x32_setup_rt_frame(struct ksignal *ksig,
563 			      compat_sigset_t *set,
564 			      struct pt_regs *regs)
565 {
566 #ifdef CONFIG_X86_X32_ABI
567 	struct rt_sigframe_x32 __user *frame;
568 	unsigned long uc_flags;
569 	void __user *restorer;
570 	void __user *fp = NULL;
571 
572 	if (!(ksig->ka.sa.sa_flags & SA_RESTORER))
573 		return -EFAULT;
574 
575 	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fp);
576 
577 	uc_flags = frame_uc_flags(regs);
578 
579 	if (!user_access_begin(frame, sizeof(*frame)))
580 		return -EFAULT;
581 
582 	/* Create the ucontext.  */
583 	unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault);
584 	unsafe_put_user(0, &frame->uc.uc_link, Efault);
585 	unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault);
586 	unsafe_put_user(0, &frame->uc.uc__pad0, Efault);
587 	restorer = ksig->ka.sa.sa_restorer;
588 	unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault);
589 	unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault);
590 	unsafe_put_sigmask(set, frame, Efault);
591 	user_access_end();
592 
593 	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
594 		if (x32_copy_siginfo_to_user(&frame->info, &ksig->info))
595 			return -EFAULT;
596 	}
597 
598 	/* Set up registers for signal handler */
599 	regs->sp = (unsigned long) frame;
600 	regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
601 
602 	/* We use the x32 calling convention here... */
603 	regs->di = ksig->sig;
604 	regs->si = (unsigned long) &frame->info;
605 	regs->dx = (unsigned long) &frame->uc;
606 
607 	loadsegment(ds, __USER_DS);
608 	loadsegment(es, __USER_DS);
609 
610 	regs->cs = __USER_CS;
611 	regs->ss = __USER_DS;
612 #endif	/* CONFIG_X86_X32_ABI */
613 
614 	return 0;
615 #ifdef CONFIG_X86_X32_ABI
616 Efault:
617 	user_access_end();
618 	return -EFAULT;
619 #endif
620 }
621 
622 /*
623  * Do a signal return; undo the signal stack.
624  */
625 #ifdef CONFIG_X86_32
626 SYSCALL_DEFINE0(sigreturn)
627 {
628 	struct pt_regs *regs = current_pt_regs();
629 	struct sigframe __user *frame;
630 	sigset_t set;
631 
632 	frame = (struct sigframe __user *)(regs->sp - 8);
633 
634 	if (!access_ok(frame, sizeof(*frame)))
635 		goto badframe;
636 	if (__get_user(set.sig[0], &frame->sc.oldmask) ||
637 	    __get_user(set.sig[1], &frame->extramask[0]))
638 		goto badframe;
639 
640 	set_current_blocked(&set);
641 
642 	/*
643 	 * x86_32 has no uc_flags bits relevant to restore_sigcontext.
644 	 * Save a few cycles by skipping the __get_user.
645 	 */
646 	if (!restore_sigcontext(regs, &frame->sc, 0))
647 		goto badframe;
648 	return regs->ax;
649 
650 badframe:
651 	signal_fault(regs, frame, "sigreturn");
652 
653 	return 0;
654 }
655 #endif /* CONFIG_X86_32 */
656 
657 SYSCALL_DEFINE0(rt_sigreturn)
658 {
659 	struct pt_regs *regs = current_pt_regs();
660 	struct rt_sigframe __user *frame;
661 	sigset_t set;
662 	unsigned long uc_flags;
663 
664 	frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
665 	if (!access_ok(frame, sizeof(*frame)))
666 		goto badframe;
667 	if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask))
668 		goto badframe;
669 	if (__get_user(uc_flags, &frame->uc.uc_flags))
670 		goto badframe;
671 
672 	set_current_blocked(&set);
673 
674 	if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
675 		goto badframe;
676 
677 	if (restore_altstack(&frame->uc.uc_stack))
678 		goto badframe;
679 
680 	return regs->ax;
681 
682 badframe:
683 	signal_fault(regs, frame, "rt_sigreturn");
684 	return 0;
685 }
686 
687 /*
688  * There are four different struct types for signal frame: sigframe_ia32,
689  * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case
690  * -- the largest size. It means the size for 64-bit apps is a bit more
691  * than needed, but this keeps the code simple.
692  */
693 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
694 # define MAX_FRAME_SIGINFO_UCTXT_SIZE	sizeof(struct sigframe_ia32)
695 #else
696 # define MAX_FRAME_SIGINFO_UCTXT_SIZE	sizeof(struct rt_sigframe)
697 #endif
698 
699 /*
700  * The FP state frame contains an XSAVE buffer which must be 64-byte aligned.
701  * If a signal frame starts at an unaligned address, extra space is required.
702  * This is the max alignment padding, conservatively.
703  */
704 #define MAX_XSAVE_PADDING	63UL
705 
706 /*
707  * The frame data is composed of the following areas and laid out as:
708  *
709  * -------------------------
710  * | alignment padding     |
711  * -------------------------
712  * | (f)xsave frame        |
713  * -------------------------
714  * | fsave header          |
715  * -------------------------
716  * | alignment padding     |
717  * -------------------------
718  * | siginfo + ucontext    |
719  * -------------------------
720  */
721 
722 /* max_frame_size tells userspace the worst case signal stack size. */
723 static unsigned long __ro_after_init max_frame_size;
724 static unsigned int __ro_after_init fpu_default_state_size;
725 
726 void __init init_sigframe_size(void)
727 {
728 	fpu_default_state_size = fpu__get_fpstate_size();
729 
730 	max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING;
731 
732 	max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING;
733 
734 	/* Userspace expects an aligned size. */
735 	max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT);
736 
737 	pr_info("max sigframe size: %lu\n", max_frame_size);
738 }
739 
740 unsigned long get_sigframe_size(void)
741 {
742 	return max_frame_size;
743 }
744 
745 static inline int is_ia32_compat_frame(struct ksignal *ksig)
746 {
747 	return IS_ENABLED(CONFIG_IA32_EMULATION) &&
748 		ksig->ka.sa.sa_flags & SA_IA32_ABI;
749 }
750 
751 static inline int is_ia32_frame(struct ksignal *ksig)
752 {
753 	return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig);
754 }
755 
756 static inline int is_x32_frame(struct ksignal *ksig)
757 {
758 	return IS_ENABLED(CONFIG_X86_X32_ABI) &&
759 		ksig->ka.sa.sa_flags & SA_X32_ABI;
760 }
761 
762 static int
763 setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
764 {
765 	int usig = ksig->sig;
766 	sigset_t *set = sigmask_to_save();
767 	compat_sigset_t *cset = (compat_sigset_t *) set;
768 
769 	/* Perform fixup for the pre-signal frame. */
770 	rseq_signal_deliver(ksig, regs);
771 
772 	/* Set up the stack frame */
773 	if (is_ia32_frame(ksig)) {
774 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
775 			return ia32_setup_rt_frame(usig, ksig, cset, regs);
776 		else
777 			return ia32_setup_frame(usig, ksig, cset, regs);
778 	} else if (is_x32_frame(ksig)) {
779 		return x32_setup_rt_frame(ksig, cset, regs);
780 	} else {
781 		return __setup_rt_frame(ksig->sig, ksig, set, regs);
782 	}
783 }
784 
785 static void
786 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
787 {
788 	bool stepping, failed;
789 	struct fpu *fpu = &current->thread.fpu;
790 
791 	if (v8086_mode(regs))
792 		save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
793 
794 	/* Are we from a system call? */
795 	if (syscall_get_nr(current, regs) != -1) {
796 		/* If so, check system call restarting.. */
797 		switch (syscall_get_error(current, regs)) {
798 		case -ERESTART_RESTARTBLOCK:
799 		case -ERESTARTNOHAND:
800 			regs->ax = -EINTR;
801 			break;
802 
803 		case -ERESTARTSYS:
804 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
805 				regs->ax = -EINTR;
806 				break;
807 			}
808 			fallthrough;
809 		case -ERESTARTNOINTR:
810 			regs->ax = regs->orig_ax;
811 			regs->ip -= 2;
812 			break;
813 		}
814 	}
815 
816 	/*
817 	 * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now
818 	 * so that register information in the sigcontext is correct and
819 	 * then notify the tracer before entering the signal handler.
820 	 */
821 	stepping = test_thread_flag(TIF_SINGLESTEP);
822 	if (stepping)
823 		user_disable_single_step(current);
824 
825 	failed = (setup_rt_frame(ksig, regs) < 0);
826 	if (!failed) {
827 		/*
828 		 * Clear the direction flag as per the ABI for function entry.
829 		 *
830 		 * Clear RF when entering the signal handler, because
831 		 * it might disable possible debug exception from the
832 		 * signal handler.
833 		 *
834 		 * Clear TF for the case when it wasn't set by debugger to
835 		 * avoid the recursive send_sigtrap() in SIGTRAP handler.
836 		 */
837 		regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
838 		/*
839 		 * Ensure the signal handler starts with the new fpu state.
840 		 */
841 		fpu__clear_user_states(fpu);
842 	}
843 	signal_setup_done(failed, ksig, stepping);
844 }
845 
846 static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
847 {
848 #ifdef CONFIG_IA32_EMULATION
849 	if (current->restart_block.arch_data & TS_COMPAT)
850 		return __NR_ia32_restart_syscall;
851 #endif
852 #ifdef CONFIG_X86_X32_ABI
853 	return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
854 #else
855 	return __NR_restart_syscall;
856 #endif
857 }
858 
859 /*
860  * Note that 'init' is a special process: it doesn't get signals it doesn't
861  * want to handle. Thus you cannot kill init even with a SIGKILL even by
862  * mistake.
863  */
864 void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
865 {
866 	struct ksignal ksig;
867 
868 	if (has_signal && get_signal(&ksig)) {
869 		/* Whee! Actually deliver the signal.  */
870 		handle_signal(&ksig, regs);
871 		return;
872 	}
873 
874 	/* Did we come from a system call? */
875 	if (syscall_get_nr(current, regs) != -1) {
876 		/* Restart the system call - no handlers present */
877 		switch (syscall_get_error(current, regs)) {
878 		case -ERESTARTNOHAND:
879 		case -ERESTARTSYS:
880 		case -ERESTARTNOINTR:
881 			regs->ax = regs->orig_ax;
882 			regs->ip -= 2;
883 			break;
884 
885 		case -ERESTART_RESTARTBLOCK:
886 			regs->ax = get_nr_restart_syscall(regs);
887 			regs->ip -= 2;
888 			break;
889 		}
890 	}
891 
892 	/*
893 	 * If there's no signal to deliver, we just put the saved sigmask
894 	 * back.
895 	 */
896 	restore_saved_sigmask();
897 }
898 
899 void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
900 {
901 	struct task_struct *me = current;
902 
903 	if (show_unhandled_signals && printk_ratelimit()) {
904 		printk("%s"
905 		       "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
906 		       task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
907 		       me->comm, me->pid, where, frame,
908 		       regs->ip, regs->sp, regs->orig_ax);
909 		print_vma_addr(KERN_CONT " in ", regs->ip);
910 		pr_cont("\n");
911 	}
912 
913 	force_sig(SIGSEGV);
914 }
915 
916 #ifdef CONFIG_DYNAMIC_SIGFRAME
917 #ifdef CONFIG_STRICT_SIGALTSTACK_SIZE
918 static bool strict_sigaltstack_size __ro_after_init = true;
919 #else
920 static bool strict_sigaltstack_size __ro_after_init = false;
921 #endif
922 
923 static int __init strict_sas_size(char *arg)
924 {
925 	return kstrtobool(arg, &strict_sigaltstack_size);
926 }
927 __setup("strict_sas_size", strict_sas_size);
928 
929 /*
930  * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512
931  * exceeds that size already. As such programs might never use the
932  * sigaltstack they just continued to work. While always checking against
933  * the real size would be correct, this might be considered a regression.
934  *
935  * Therefore avoid the sanity check, unless enforced by kernel
936  * configuration or command line option.
937  *
938  * When dynamic FPU features are supported, the check is also enforced when
939  * the task has permissions to use dynamic features. Tasks which have no
940  * permission are checked against the size of the non-dynamic feature set
941  * if strict checking is enabled. This avoids forcing all tasks on the
942  * system to allocate large sigaltstacks even if they are never going
943  * to use a dynamic feature. As this is serialized via sighand::siglock
944  * any permission request for a dynamic feature either happened already
945  * or will see the newly install sigaltstack size in the permission checks.
946  */
947 bool sigaltstack_size_valid(size_t ss_size)
948 {
949 	unsigned long fsize = max_frame_size - fpu_default_state_size;
950 	u64 mask;
951 
952 	lockdep_assert_held(&current->sighand->siglock);
953 
954 	if (!fpu_state_size_dynamic() && !strict_sigaltstack_size)
955 		return true;
956 
957 	fsize += current->group_leader->thread.fpu.perm.__user_state_size;
958 	if (likely(ss_size > fsize))
959 		return true;
960 
961 	if (strict_sigaltstack_size)
962 		return ss_size > fsize;
963 
964 	mask = current->group_leader->thread.fpu.perm.__state_perm;
965 	if (mask & XFEATURE_MASK_USER_DYNAMIC)
966 		return ss_size > fsize;
967 
968 	return true;
969 }
970 #endif /* CONFIG_DYNAMIC_SIGFRAME */
971 
972 #ifdef CONFIG_X86_X32_ABI
973 COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
974 {
975 	struct pt_regs *regs = current_pt_regs();
976 	struct rt_sigframe_x32 __user *frame;
977 	sigset_t set;
978 	unsigned long uc_flags;
979 
980 	frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
981 
982 	if (!access_ok(frame, sizeof(*frame)))
983 		goto badframe;
984 	if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask))
985 		goto badframe;
986 	if (__get_user(uc_flags, &frame->uc.uc_flags))
987 		goto badframe;
988 
989 	set_current_blocked(&set);
990 
991 	if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
992 		goto badframe;
993 
994 	if (compat_restore_altstack(&frame->uc.uc_stack))
995 		goto badframe;
996 
997 	return regs->ax;
998 
999 badframe:
1000 	signal_fault(regs, frame, "x32 rt_sigreturn");
1001 	return 0;
1002 }
1003 #endif
1004