xref: /openbmc/linux/arch/arm64/kernel/signal.c (revision 133f9794)
1 /*
2  * Based on arch/arm/kernel/signal.c
3  *
4  * Copyright (C) 1995-2009 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/compat.h>
21 #include <linux/errno.h>
22 #include <linux/kernel.h>
23 #include <linux/signal.h>
24 #include <linux/personality.h>
25 #include <linux/freezer.h>
26 #include <linux/stddef.h>
27 #include <linux/uaccess.h>
28 #include <linux/sizes.h>
29 #include <linux/string.h>
30 #include <linux/tracehook.h>
31 #include <linux/ratelimit.h>
32 #include <linux/syscalls.h>
33 
34 #include <asm/daifflags.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/elf.h>
37 #include <asm/cacheflush.h>
38 #include <asm/ucontext.h>
39 #include <asm/unistd.h>
40 #include <asm/fpsimd.h>
41 #include <asm/ptrace.h>
42 #include <asm/signal32.h>
43 #include <asm/vdso.h>
44 
45 /*
46  * Do a signal return; undo the signal stack. These are aligned to 128-bit.
47  */
48 struct rt_sigframe {
49 	struct siginfo info;
50 	struct ucontext uc;
51 };
52 
53 struct frame_record {
54 	u64 fp;
55 	u64 lr;
56 };
57 
58 struct rt_sigframe_user_layout {
59 	struct rt_sigframe __user *sigframe;
60 	struct frame_record __user *next_frame;
61 
62 	unsigned long size;	/* size of allocated sigframe data */
63 	unsigned long limit;	/* largest allowed size */
64 
65 	unsigned long fpsimd_offset;
66 	unsigned long esr_offset;
67 	unsigned long sve_offset;
68 	unsigned long extra_offset;
69 	unsigned long end_offset;
70 };
71 
72 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
73 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
74 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
75 
76 static void init_user_layout(struct rt_sigframe_user_layout *user)
77 {
78 	const size_t reserved_size =
79 		sizeof(user->sigframe->uc.uc_mcontext.__reserved);
80 
81 	memset(user, 0, sizeof(*user));
82 	user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
83 
84 	user->limit = user->size + reserved_size;
85 
86 	user->limit -= TERMINATOR_SIZE;
87 	user->limit -= EXTRA_CONTEXT_SIZE;
88 	/* Reserve space for extension and terminator ^ */
89 }
90 
91 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
92 {
93 	return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
94 }
95 
96 /*
97  * Sanity limit on the approximate maximum size of signal frame we'll
98  * try to generate.  Stack alignment padding and the frame record are
99  * not taken into account.  This limit is not a guarantee and is
100  * NOT ABI.
101  */
102 #define SIGFRAME_MAXSZ SZ_64K
103 
104 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
105 			    unsigned long *offset, size_t size, bool extend)
106 {
107 	size_t padded_size = round_up(size, 16);
108 
109 	if (padded_size > user->limit - user->size &&
110 	    !user->extra_offset &&
111 	    extend) {
112 		int ret;
113 
114 		user->limit += EXTRA_CONTEXT_SIZE;
115 		ret = __sigframe_alloc(user, &user->extra_offset,
116 				       sizeof(struct extra_context), false);
117 		if (ret) {
118 			user->limit -= EXTRA_CONTEXT_SIZE;
119 			return ret;
120 		}
121 
122 		/* Reserve space for the __reserved[] terminator */
123 		user->size += TERMINATOR_SIZE;
124 
125 		/*
126 		 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
127 		 * the terminator:
128 		 */
129 		user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
130 	}
131 
132 	/* Still not enough space?  Bad luck! */
133 	if (padded_size > user->limit - user->size)
134 		return -ENOMEM;
135 
136 	*offset = user->size;
137 	user->size += padded_size;
138 
139 	return 0;
140 }
141 
142 /*
143  * Allocate space for an optional record of <size> bytes in the user
144  * signal frame.  The offset from the signal frame base address to the
145  * allocated block is assigned to *offset.
146  */
147 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
148 			  unsigned long *offset, size_t size)
149 {
150 	return __sigframe_alloc(user, offset, size, true);
151 }
152 
153 /* Allocate the null terminator record and prevent further allocations */
154 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
155 {
156 	int ret;
157 
158 	/* Un-reserve the space reserved for the terminator: */
159 	user->limit += TERMINATOR_SIZE;
160 
161 	ret = sigframe_alloc(user, &user->end_offset,
162 			     sizeof(struct _aarch64_ctx));
163 	if (ret)
164 		return ret;
165 
166 	/* Prevent further allocation: */
167 	user->limit = user->size;
168 	return 0;
169 }
170 
171 static void __user *apply_user_offset(
172 	struct rt_sigframe_user_layout const *user, unsigned long offset)
173 {
174 	char __user *base = (char __user *)user->sigframe;
175 
176 	return base + offset;
177 }
178 
179 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
180 {
181 	struct user_fpsimd_state const *fpsimd =
182 		&current->thread.fpsimd_state.user_fpsimd;
183 	int err;
184 
185 	/* copy the FP and status/control registers */
186 	err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
187 	__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
188 	__put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
189 
190 	/* copy the magic/size information */
191 	__put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
192 	__put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
193 
194 	return err ? -EFAULT : 0;
195 }
196 
197 static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
198 {
199 	struct user_fpsimd_state fpsimd;
200 	__u32 magic, size;
201 	int err = 0;
202 
203 	/* check the magic/size information */
204 	__get_user_error(magic, &ctx->head.magic, err);
205 	__get_user_error(size, &ctx->head.size, err);
206 	if (err)
207 		return -EFAULT;
208 	if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
209 		return -EINVAL;
210 
211 	/* copy the FP and status/control registers */
212 	err = __copy_from_user(fpsimd.vregs, ctx->vregs,
213 			       sizeof(fpsimd.vregs));
214 	__get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
215 	__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
216 
217 	clear_thread_flag(TIF_SVE);
218 
219 	/* load the hardware registers from the fpsimd_state structure */
220 	if (!err)
221 		fpsimd_update_current_state(&fpsimd);
222 
223 	return err ? -EFAULT : 0;
224 }
225 
226 
227 struct user_ctxs {
228 	struct fpsimd_context __user *fpsimd;
229 	struct sve_context __user *sve;
230 };
231 
232 #ifdef CONFIG_ARM64_SVE
233 
234 static int preserve_sve_context(struct sve_context __user *ctx)
235 {
236 	int err = 0;
237 	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
238 	unsigned int vl = current->thread.sve_vl;
239 	unsigned int vq = 0;
240 
241 	if (test_thread_flag(TIF_SVE))
242 		vq = sve_vq_from_vl(vl);
243 
244 	memset(reserved, 0, sizeof(reserved));
245 
246 	__put_user_error(SVE_MAGIC, &ctx->head.magic, err);
247 	__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
248 			 &ctx->head.size, err);
249 	__put_user_error(vl, &ctx->vl, err);
250 	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
251 	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
252 
253 	if (vq) {
254 		/*
255 		 * This assumes that the SVE state has already been saved to
256 		 * the task struct by calling preserve_fpsimd_context().
257 		 */
258 		err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
259 				      current->thread.sve_state,
260 				      SVE_SIG_REGS_SIZE(vq));
261 	}
262 
263 	return err ? -EFAULT : 0;
264 }
265 
266 static int restore_sve_fpsimd_context(struct user_ctxs *user)
267 {
268 	int err;
269 	unsigned int vq;
270 	struct user_fpsimd_state fpsimd;
271 	struct sve_context sve;
272 
273 	if (__copy_from_user(&sve, user->sve, sizeof(sve)))
274 		return -EFAULT;
275 
276 	if (sve.vl != current->thread.sve_vl)
277 		return -EINVAL;
278 
279 	if (sve.head.size <= sizeof(*user->sve)) {
280 		clear_thread_flag(TIF_SVE);
281 		goto fpsimd_only;
282 	}
283 
284 	vq = sve_vq_from_vl(sve.vl);
285 
286 	if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
287 		return -EINVAL;
288 
289 	/*
290 	 * Careful: we are about __copy_from_user() directly into
291 	 * thread.sve_state with preemption enabled, so protection is
292 	 * needed to prevent a racing context switch from writing stale
293 	 * registers back over the new data.
294 	 */
295 
296 	fpsimd_flush_task_state(current);
297 	barrier();
298 	/* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
299 
300 	set_thread_flag(TIF_FOREIGN_FPSTATE);
301 	barrier();
302 	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
303 
304 	sve_alloc(current);
305 	err = __copy_from_user(current->thread.sve_state,
306 			       (char __user const *)user->sve +
307 					SVE_SIG_REGS_OFFSET,
308 			       SVE_SIG_REGS_SIZE(vq));
309 	if (err)
310 		return -EFAULT;
311 
312 	set_thread_flag(TIF_SVE);
313 
314 fpsimd_only:
315 	/* copy the FP and status/control registers */
316 	/* restore_sigframe() already checked that user->fpsimd != NULL. */
317 	err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
318 			       sizeof(fpsimd.vregs));
319 	__get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
320 	__get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
321 
322 	/* load the hardware registers from the fpsimd_state structure */
323 	if (!err)
324 		fpsimd_update_current_state(&fpsimd);
325 
326 	return err ? -EFAULT : 0;
327 }
328 
329 #else /* ! CONFIG_ARM64_SVE */
330 
331 /* Turn any non-optimised out attempts to use these into a link error: */
332 extern int preserve_sve_context(void __user *ctx);
333 extern int restore_sve_fpsimd_context(struct user_ctxs *user);
334 
335 #endif /* ! CONFIG_ARM64_SVE */
336 
337 
338 static int parse_user_sigframe(struct user_ctxs *user,
339 			       struct rt_sigframe __user *sf)
340 {
341 	struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
342 	struct _aarch64_ctx __user *head;
343 	char __user *base = (char __user *)&sc->__reserved;
344 	size_t offset = 0;
345 	size_t limit = sizeof(sc->__reserved);
346 	bool have_extra_context = false;
347 	char const __user *const sfp = (char const __user *)sf;
348 
349 	user->fpsimd = NULL;
350 	user->sve = NULL;
351 
352 	if (!IS_ALIGNED((unsigned long)base, 16))
353 		goto invalid;
354 
355 	while (1) {
356 		int err = 0;
357 		u32 magic, size;
358 		char const __user *userp;
359 		struct extra_context const __user *extra;
360 		u64 extra_datap;
361 		u32 extra_size;
362 		struct _aarch64_ctx const __user *end;
363 		u32 end_magic, end_size;
364 
365 		if (limit - offset < sizeof(*head))
366 			goto invalid;
367 
368 		if (!IS_ALIGNED(offset, 16))
369 			goto invalid;
370 
371 		head = (struct _aarch64_ctx __user *)(base + offset);
372 		__get_user_error(magic, &head->magic, err);
373 		__get_user_error(size, &head->size, err);
374 		if (err)
375 			return err;
376 
377 		if (limit - offset < size)
378 			goto invalid;
379 
380 		switch (magic) {
381 		case 0:
382 			if (size)
383 				goto invalid;
384 
385 			goto done;
386 
387 		case FPSIMD_MAGIC:
388 			if (user->fpsimd)
389 				goto invalid;
390 
391 			if (size < sizeof(*user->fpsimd))
392 				goto invalid;
393 
394 			user->fpsimd = (struct fpsimd_context __user *)head;
395 			break;
396 
397 		case ESR_MAGIC:
398 			/* ignore */
399 			break;
400 
401 		case SVE_MAGIC:
402 			if (!system_supports_sve())
403 				goto invalid;
404 
405 			if (user->sve)
406 				goto invalid;
407 
408 			if (size < sizeof(*user->sve))
409 				goto invalid;
410 
411 			user->sve = (struct sve_context __user *)head;
412 			break;
413 
414 		case EXTRA_MAGIC:
415 			if (have_extra_context)
416 				goto invalid;
417 
418 			if (size < sizeof(*extra))
419 				goto invalid;
420 
421 			userp = (char const __user *)head;
422 
423 			extra = (struct extra_context const __user *)userp;
424 			userp += size;
425 
426 			__get_user_error(extra_datap, &extra->datap, err);
427 			__get_user_error(extra_size, &extra->size, err);
428 			if (err)
429 				return err;
430 
431 			/* Check for the dummy terminator in __reserved[]: */
432 
433 			if (limit - offset - size < TERMINATOR_SIZE)
434 				goto invalid;
435 
436 			end = (struct _aarch64_ctx const __user *)userp;
437 			userp += TERMINATOR_SIZE;
438 
439 			__get_user_error(end_magic, &end->magic, err);
440 			__get_user_error(end_size, &end->size, err);
441 			if (err)
442 				return err;
443 
444 			if (end_magic || end_size)
445 				goto invalid;
446 
447 			/* Prevent looping/repeated parsing of extra_context */
448 			have_extra_context = true;
449 
450 			base = (__force void __user *)extra_datap;
451 			if (!IS_ALIGNED((unsigned long)base, 16))
452 				goto invalid;
453 
454 			if (!IS_ALIGNED(extra_size, 16))
455 				goto invalid;
456 
457 			if (base != userp)
458 				goto invalid;
459 
460 			/* Reject "unreasonably large" frames: */
461 			if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
462 				goto invalid;
463 
464 			/*
465 			 * Ignore trailing terminator in __reserved[]
466 			 * and start parsing extra data:
467 			 */
468 			offset = 0;
469 			limit = extra_size;
470 
471 			if (!access_ok(VERIFY_READ, base, limit))
472 				goto invalid;
473 
474 			continue;
475 
476 		default:
477 			goto invalid;
478 		}
479 
480 		if (size < sizeof(*head))
481 			goto invalid;
482 
483 		if (limit - offset < size)
484 			goto invalid;
485 
486 		offset += size;
487 	}
488 
489 done:
490 	return 0;
491 
492 invalid:
493 	return -EINVAL;
494 }
495 
496 static int restore_sigframe(struct pt_regs *regs,
497 			    struct rt_sigframe __user *sf)
498 {
499 	sigset_t set;
500 	int i, err;
501 	struct user_ctxs user;
502 
503 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
504 	if (err == 0)
505 		set_current_blocked(&set);
506 
507 	for (i = 0; i < 31; i++)
508 		__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
509 				 err);
510 	__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
511 	__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
512 	__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
513 
514 	/*
515 	 * Avoid sys_rt_sigreturn() restarting.
516 	 */
517 	forget_syscall(regs);
518 
519 	err |= !valid_user_regs(&regs->user_regs, current);
520 	if (err == 0)
521 		err = parse_user_sigframe(&user, sf);
522 
523 	if (err == 0) {
524 		if (!user.fpsimd)
525 			return -EINVAL;
526 
527 		if (user.sve) {
528 			if (!system_supports_sve())
529 				return -EINVAL;
530 
531 			err = restore_sve_fpsimd_context(&user);
532 		} else {
533 			err = restore_fpsimd_context(user.fpsimd);
534 		}
535 	}
536 
537 	return err;
538 }
539 
540 asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
541 {
542 	struct rt_sigframe __user *frame;
543 
544 	/* Always make any pending restarted system calls return -EINTR */
545 	current->restart_block.fn = do_no_restart_syscall;
546 
547 	/*
548 	 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
549 	 * be word aligned here.
550 	 */
551 	if (regs->sp & 15)
552 		goto badframe;
553 
554 	frame = (struct rt_sigframe __user *)regs->sp;
555 
556 	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
557 		goto badframe;
558 
559 	if (restore_sigframe(regs, frame))
560 		goto badframe;
561 
562 	if (restore_altstack(&frame->uc.uc_stack))
563 		goto badframe;
564 
565 	return regs->regs[0];
566 
567 badframe:
568 	if (show_unhandled_signals)
569 		pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
570 				    current->comm, task_pid_nr(current), __func__,
571 				    regs->pc, regs->sp);
572 	force_sig(SIGSEGV, current);
573 	return 0;
574 }
575 
576 /* Determine the layout of optional records in the signal frame */
577 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
578 {
579 	int err;
580 
581 	err = sigframe_alloc(user, &user->fpsimd_offset,
582 			     sizeof(struct fpsimd_context));
583 	if (err)
584 		return err;
585 
586 	/* fault information, if valid */
587 	if (current->thread.fault_code) {
588 		err = sigframe_alloc(user, &user->esr_offset,
589 				     sizeof(struct esr_context));
590 		if (err)
591 			return err;
592 	}
593 
594 	if (system_supports_sve()) {
595 		unsigned int vq = 0;
596 
597 		if (test_thread_flag(TIF_SVE))
598 			vq = sve_vq_from_vl(current->thread.sve_vl);
599 
600 		err = sigframe_alloc(user, &user->sve_offset,
601 				     SVE_SIG_CONTEXT_SIZE(vq));
602 		if (err)
603 			return err;
604 	}
605 
606 	return sigframe_alloc_end(user);
607 }
608 
609 
610 static int setup_sigframe(struct rt_sigframe_user_layout *user,
611 			  struct pt_regs *regs, sigset_t *set)
612 {
613 	int i, err = 0;
614 	struct rt_sigframe __user *sf = user->sigframe;
615 
616 	/* set up the stack frame for unwinding */
617 	__put_user_error(regs->regs[29], &user->next_frame->fp, err);
618 	__put_user_error(regs->regs[30], &user->next_frame->lr, err);
619 
620 	for (i = 0; i < 31; i++)
621 		__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
622 				 err);
623 	__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
624 	__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
625 	__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
626 
627 	__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
628 
629 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
630 
631 	if (err == 0) {
632 		struct fpsimd_context __user *fpsimd_ctx =
633 			apply_user_offset(user, user->fpsimd_offset);
634 		err |= preserve_fpsimd_context(fpsimd_ctx);
635 	}
636 
637 	/* fault information, if valid */
638 	if (err == 0 && user->esr_offset) {
639 		struct esr_context __user *esr_ctx =
640 			apply_user_offset(user, user->esr_offset);
641 
642 		__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
643 		__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
644 		__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
645 	}
646 
647 	/* Scalable Vector Extension state, if present */
648 	if (system_supports_sve() && err == 0 && user->sve_offset) {
649 		struct sve_context __user *sve_ctx =
650 			apply_user_offset(user, user->sve_offset);
651 		err |= preserve_sve_context(sve_ctx);
652 	}
653 
654 	if (err == 0 && user->extra_offset) {
655 		char __user *sfp = (char __user *)user->sigframe;
656 		char __user *userp =
657 			apply_user_offset(user, user->extra_offset);
658 
659 		struct extra_context __user *extra;
660 		struct _aarch64_ctx __user *end;
661 		u64 extra_datap;
662 		u32 extra_size;
663 
664 		extra = (struct extra_context __user *)userp;
665 		userp += EXTRA_CONTEXT_SIZE;
666 
667 		end = (struct _aarch64_ctx __user *)userp;
668 		userp += TERMINATOR_SIZE;
669 
670 		/*
671 		 * extra_datap is just written to the signal frame.
672 		 * The value gets cast back to a void __user *
673 		 * during sigreturn.
674 		 */
675 		extra_datap = (__force u64)userp;
676 		extra_size = sfp + round_up(user->size, 16) - userp;
677 
678 		__put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
679 		__put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
680 		__put_user_error(extra_datap, &extra->datap, err);
681 		__put_user_error(extra_size, &extra->size, err);
682 
683 		/* Add the terminator */
684 		__put_user_error(0, &end->magic, err);
685 		__put_user_error(0, &end->size, err);
686 	}
687 
688 	/* set the "end" magic */
689 	if (err == 0) {
690 		struct _aarch64_ctx __user *end =
691 			apply_user_offset(user, user->end_offset);
692 
693 		__put_user_error(0, &end->magic, err);
694 		__put_user_error(0, &end->size, err);
695 	}
696 
697 	return err;
698 }
699 
700 static int get_sigframe(struct rt_sigframe_user_layout *user,
701 			 struct ksignal *ksig, struct pt_regs *regs)
702 {
703 	unsigned long sp, sp_top;
704 	int err;
705 
706 	init_user_layout(user);
707 	err = setup_sigframe_layout(user);
708 	if (err)
709 		return err;
710 
711 	sp = sp_top = sigsp(regs->sp, ksig);
712 
713 	sp = round_down(sp - sizeof(struct frame_record), 16);
714 	user->next_frame = (struct frame_record __user *)sp;
715 
716 	sp = round_down(sp, 16) - sigframe_size(user);
717 	user->sigframe = (struct rt_sigframe __user *)sp;
718 
719 	/*
720 	 * Check that we can actually write to the signal frame.
721 	 */
722 	if (!access_ok(VERIFY_WRITE, user->sigframe, sp_top - sp))
723 		return -EFAULT;
724 
725 	return 0;
726 }
727 
728 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
729 			 struct rt_sigframe_user_layout *user, int usig)
730 {
731 	__sigrestore_t sigtramp;
732 
733 	regs->regs[0] = usig;
734 	regs->sp = (unsigned long)user->sigframe;
735 	regs->regs[29] = (unsigned long)&user->next_frame->fp;
736 	regs->pc = (unsigned long)ka->sa.sa_handler;
737 
738 	if (ka->sa.sa_flags & SA_RESTORER)
739 		sigtramp = ka->sa.sa_restorer;
740 	else
741 		sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
742 
743 	regs->regs[30] = (unsigned long)sigtramp;
744 }
745 
746 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
747 			  struct pt_regs *regs)
748 {
749 	struct rt_sigframe_user_layout user;
750 	struct rt_sigframe __user *frame;
751 	int err = 0;
752 
753 	fpsimd_signal_preserve_current_state();
754 
755 	if (get_sigframe(&user, ksig, regs))
756 		return 1;
757 
758 	frame = user.sigframe;
759 
760 	__put_user_error(0, &frame->uc.uc_flags, err);
761 	__put_user_error(NULL, &frame->uc.uc_link, err);
762 
763 	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
764 	err |= setup_sigframe(&user, regs, set);
765 	if (err == 0) {
766 		setup_return(regs, &ksig->ka, &user, usig);
767 		if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
768 			err |= copy_siginfo_to_user(&frame->info, &ksig->info);
769 			regs->regs[1] = (unsigned long)&frame->info;
770 			regs->regs[2] = (unsigned long)&frame->uc;
771 		}
772 	}
773 
774 	return err;
775 }
776 
777 static void setup_restart_syscall(struct pt_regs *regs)
778 {
779 	if (is_compat_task())
780 		compat_setup_restart_syscall(regs);
781 	else
782 		regs->regs[8] = __NR_restart_syscall;
783 }
784 
785 /*
786  * OK, we're invoking a handler
787  */
788 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
789 {
790 	struct task_struct *tsk = current;
791 	sigset_t *oldset = sigmask_to_save();
792 	int usig = ksig->sig;
793 	int ret;
794 
795 	/*
796 	 * Set up the stack frame
797 	 */
798 	if (is_compat_task()) {
799 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
800 			ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
801 		else
802 			ret = compat_setup_frame(usig, ksig, oldset, regs);
803 	} else {
804 		ret = setup_rt_frame(usig, ksig, oldset, regs);
805 	}
806 
807 	/*
808 	 * Check that the resulting registers are actually sane.
809 	 */
810 	ret |= !valid_user_regs(&regs->user_regs, current);
811 
812 	/*
813 	 * Fast forward the stepping logic so we step into the signal
814 	 * handler.
815 	 */
816 	if (!ret)
817 		user_fastforward_single_step(tsk);
818 
819 	signal_setup_done(ret, ksig, 0);
820 }
821 
822 /*
823  * Note that 'init' is a special process: it doesn't get signals it doesn't
824  * want to handle. Thus you cannot kill init even with a SIGKILL even by
825  * mistake.
826  *
827  * Note that we go through the signals twice: once to check the signals that
828  * the kernel can handle, and then we build all the user-level signal handling
829  * stack-frames in one go after that.
830  */
831 static void do_signal(struct pt_regs *regs)
832 {
833 	unsigned long continue_addr = 0, restart_addr = 0;
834 	int retval = 0;
835 	struct ksignal ksig;
836 
837 	/*
838 	 * If we were from a system call, check for system call restarting...
839 	 */
840 	if (in_syscall(regs)) {
841 		continue_addr = regs->pc;
842 		restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
843 		retval = regs->regs[0];
844 
845 		/*
846 		 * Avoid additional syscall restarting via ret_to_user.
847 		 */
848 		forget_syscall(regs);
849 
850 		/*
851 		 * Prepare for system call restart. We do this here so that a
852 		 * debugger will see the already changed PC.
853 		 */
854 		switch (retval) {
855 		case -ERESTARTNOHAND:
856 		case -ERESTARTSYS:
857 		case -ERESTARTNOINTR:
858 		case -ERESTART_RESTARTBLOCK:
859 			regs->regs[0] = regs->orig_x0;
860 			regs->pc = restart_addr;
861 			break;
862 		}
863 	}
864 
865 	/*
866 	 * Get the signal to deliver. When running under ptrace, at this point
867 	 * the debugger may change all of our registers.
868 	 */
869 	if (get_signal(&ksig)) {
870 		/*
871 		 * Depending on the signal settings, we may need to revert the
872 		 * decision to restart the system call, but skip this if a
873 		 * debugger has chosen to restart at a different PC.
874 		 */
875 		if (regs->pc == restart_addr &&
876 		    (retval == -ERESTARTNOHAND ||
877 		     retval == -ERESTART_RESTARTBLOCK ||
878 		     (retval == -ERESTARTSYS &&
879 		      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
880 			regs->regs[0] = -EINTR;
881 			regs->pc = continue_addr;
882 		}
883 
884 		handle_signal(&ksig, regs);
885 		return;
886 	}
887 
888 	/*
889 	 * Handle restarting a different system call. As above, if a debugger
890 	 * has chosen to restart at a different PC, ignore the restart.
891 	 */
892 	if (in_syscall(regs) && regs->pc == restart_addr) {
893 		if (retval == -ERESTART_RESTARTBLOCK)
894 			setup_restart_syscall(regs);
895 		user_rewind_single_step(current);
896 	}
897 
898 	restore_saved_sigmask();
899 }
900 
901 asmlinkage void do_notify_resume(struct pt_regs *regs,
902 				 unsigned int thread_flags)
903 {
904 	/*
905 	 * The assembly code enters us with IRQs off, but it hasn't
906 	 * informed the tracing code of that for efficiency reasons.
907 	 * Update the trace code with the current status.
908 	 */
909 	trace_hardirqs_off();
910 
911 	do {
912 		/* Check valid user FS if needed */
913 		addr_limit_user_check();
914 
915 		if (thread_flags & _TIF_NEED_RESCHED) {
916 			/* Unmask Debug and SError for the next task */
917 			local_daif_restore(DAIF_PROCCTX_NOIRQ);
918 
919 			schedule();
920 		} else {
921 			local_daif_restore(DAIF_PROCCTX);
922 
923 			if (thread_flags & _TIF_UPROBE)
924 				uprobe_notify_resume(regs);
925 
926 			if (thread_flags & _TIF_SIGPENDING)
927 				do_signal(regs);
928 
929 			if (thread_flags & _TIF_NOTIFY_RESUME) {
930 				clear_thread_flag(TIF_NOTIFY_RESUME);
931 				tracehook_notify_resume(regs);
932 			}
933 
934 			if (thread_flags & _TIF_FOREIGN_FPSTATE)
935 				fpsimd_restore_current_state();
936 		}
937 
938 		local_daif_mask();
939 		thread_flags = READ_ONCE(current_thread_info()->flags);
940 	} while (thread_flags & _TIF_WORK_MASK);
941 }
942