1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/personality.h> 15 #include <linux/freezer.h> 16 #include <linux/stddef.h> 17 #include <linux/uaccess.h> 18 #include <linux/sizes.h> 19 #include <linux/string.h> 20 #include <linux/tracehook.h> 21 #include <linux/ratelimit.h> 22 #include <linux/syscalls.h> 23 24 #include <asm/daifflags.h> 25 #include <asm/debug-monitors.h> 26 #include <asm/elf.h> 27 #include <asm/cacheflush.h> 28 #include <asm/ucontext.h> 29 #include <asm/unistd.h> 30 #include <asm/fpsimd.h> 31 #include <asm/ptrace.h> 32 #include <asm/syscall.h> 33 #include <asm/signal32.h> 34 #include <asm/traps.h> 35 #include <asm/vdso.h> 36 37 /* 38 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 39 */ 40 struct rt_sigframe { 41 struct siginfo info; 42 struct ucontext uc; 43 }; 44 45 struct frame_record { 46 u64 fp; 47 u64 lr; 48 }; 49 50 struct rt_sigframe_user_layout { 51 struct rt_sigframe __user *sigframe; 52 struct frame_record __user *next_frame; 53 54 unsigned long size; /* size of allocated sigframe data */ 55 unsigned long limit; /* largest allowed size */ 56 57 unsigned long fpsimd_offset; 58 unsigned long esr_offset; 59 unsigned long sve_offset; 60 unsigned long extra_offset; 61 unsigned long end_offset; 62 }; 63 64 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) 65 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 66 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 67 68 static void init_user_layout(struct rt_sigframe_user_layout *user) 69 { 70 const size_t reserved_size = 71 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 72 73 memset(user, 0, sizeof(*user)); 74 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 75 76 user->limit = user->size + reserved_size; 77 78 user->limit -= TERMINATOR_SIZE; 79 user->limit -= EXTRA_CONTEXT_SIZE; 80 /* Reserve space for extension and terminator ^ */ 81 } 82 83 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 84 { 85 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 86 } 87 88 /* 89 * Sanity limit on the approximate maximum size of signal frame we'll 90 * try to generate. Stack alignment padding and the frame record are 91 * not taken into account. This limit is not a guarantee and is 92 * NOT ABI. 93 */ 94 #define SIGFRAME_MAXSZ SZ_64K 95 96 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 97 unsigned long *offset, size_t size, bool extend) 98 { 99 size_t padded_size = round_up(size, 16); 100 101 if (padded_size > user->limit - user->size && 102 !user->extra_offset && 103 extend) { 104 int ret; 105 106 user->limit += EXTRA_CONTEXT_SIZE; 107 ret = __sigframe_alloc(user, &user->extra_offset, 108 sizeof(struct extra_context), false); 109 if (ret) { 110 user->limit -= EXTRA_CONTEXT_SIZE; 111 return ret; 112 } 113 114 /* Reserve space for the __reserved[] terminator */ 115 user->size += TERMINATOR_SIZE; 116 117 /* 118 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 119 * the terminator: 120 */ 121 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 122 } 123 124 /* Still not enough space? Bad luck! */ 125 if (padded_size > user->limit - user->size) 126 return -ENOMEM; 127 128 *offset = user->size; 129 user->size += padded_size; 130 131 return 0; 132 } 133 134 /* 135 * Allocate space for an optional record of <size> bytes in the user 136 * signal frame. The offset from the signal frame base address to the 137 * allocated block is assigned to *offset. 138 */ 139 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 140 unsigned long *offset, size_t size) 141 { 142 return __sigframe_alloc(user, offset, size, true); 143 } 144 145 /* Allocate the null terminator record and prevent further allocations */ 146 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 147 { 148 int ret; 149 150 /* Un-reserve the space reserved for the terminator: */ 151 user->limit += TERMINATOR_SIZE; 152 153 ret = sigframe_alloc(user, &user->end_offset, 154 sizeof(struct _aarch64_ctx)); 155 if (ret) 156 return ret; 157 158 /* Prevent further allocation: */ 159 user->limit = user->size; 160 return 0; 161 } 162 163 static void __user *apply_user_offset( 164 struct rt_sigframe_user_layout const *user, unsigned long offset) 165 { 166 char __user *base = (char __user *)user->sigframe; 167 168 return base + offset; 169 } 170 171 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 172 { 173 struct user_fpsimd_state const *fpsimd = 174 ¤t->thread.uw.fpsimd_state; 175 int err; 176 177 /* copy the FP and status/control registers */ 178 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 179 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 180 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 181 182 /* copy the magic/size information */ 183 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 184 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 185 186 return err ? -EFAULT : 0; 187 } 188 189 static int restore_fpsimd_context(struct fpsimd_context __user *ctx) 190 { 191 struct user_fpsimd_state fpsimd; 192 __u32 magic, size; 193 int err = 0; 194 195 /* check the magic/size information */ 196 __get_user_error(magic, &ctx->head.magic, err); 197 __get_user_error(size, &ctx->head.size, err); 198 if (err) 199 return -EFAULT; 200 if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context)) 201 return -EINVAL; 202 203 /* copy the FP and status/control registers */ 204 err = __copy_from_user(fpsimd.vregs, ctx->vregs, 205 sizeof(fpsimd.vregs)); 206 __get_user_error(fpsimd.fpsr, &ctx->fpsr, err); 207 __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); 208 209 clear_thread_flag(TIF_SVE); 210 211 /* load the hardware registers from the fpsimd_state structure */ 212 if (!err) 213 fpsimd_update_current_state(&fpsimd); 214 215 return err ? -EFAULT : 0; 216 } 217 218 219 struct user_ctxs { 220 struct fpsimd_context __user *fpsimd; 221 struct sve_context __user *sve; 222 }; 223 224 #ifdef CONFIG_ARM64_SVE 225 226 static int preserve_sve_context(struct sve_context __user *ctx) 227 { 228 int err = 0; 229 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 230 unsigned int vl = current->thread.sve_vl; 231 unsigned int vq = 0; 232 233 if (test_thread_flag(TIF_SVE)) 234 vq = sve_vq_from_vl(vl); 235 236 memset(reserved, 0, sizeof(reserved)); 237 238 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 239 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 240 &ctx->head.size, err); 241 __put_user_error(vl, &ctx->vl, err); 242 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 243 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 244 245 if (vq) { 246 /* 247 * This assumes that the SVE state has already been saved to 248 * the task struct by calling the function 249 * fpsimd_signal_preserve_current_state(). 250 */ 251 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 252 current->thread.sve_state, 253 SVE_SIG_REGS_SIZE(vq)); 254 } 255 256 return err ? -EFAULT : 0; 257 } 258 259 static int restore_sve_fpsimd_context(struct user_ctxs *user) 260 { 261 int err; 262 unsigned int vq; 263 struct user_fpsimd_state fpsimd; 264 struct sve_context sve; 265 266 if (__copy_from_user(&sve, user->sve, sizeof(sve))) 267 return -EFAULT; 268 269 if (sve.vl != current->thread.sve_vl) 270 return -EINVAL; 271 272 if (sve.head.size <= sizeof(*user->sve)) { 273 clear_thread_flag(TIF_SVE); 274 goto fpsimd_only; 275 } 276 277 vq = sve_vq_from_vl(sve.vl); 278 279 if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq)) 280 return -EINVAL; 281 282 /* 283 * Careful: we are about __copy_from_user() directly into 284 * thread.sve_state with preemption enabled, so protection is 285 * needed to prevent a racing context switch from writing stale 286 * registers back over the new data. 287 */ 288 289 fpsimd_flush_task_state(current); 290 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 291 292 sve_alloc(current); 293 err = __copy_from_user(current->thread.sve_state, 294 (char __user const *)user->sve + 295 SVE_SIG_REGS_OFFSET, 296 SVE_SIG_REGS_SIZE(vq)); 297 if (err) 298 return -EFAULT; 299 300 set_thread_flag(TIF_SVE); 301 302 fpsimd_only: 303 /* copy the FP and status/control registers */ 304 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 305 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 306 sizeof(fpsimd.vregs)); 307 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 308 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 309 310 /* load the hardware registers from the fpsimd_state structure */ 311 if (!err) 312 fpsimd_update_current_state(&fpsimd); 313 314 return err ? -EFAULT : 0; 315 } 316 317 #else /* ! CONFIG_ARM64_SVE */ 318 319 /* Turn any non-optimised out attempts to use these into a link error: */ 320 extern int preserve_sve_context(void __user *ctx); 321 extern int restore_sve_fpsimd_context(struct user_ctxs *user); 322 323 #endif /* ! CONFIG_ARM64_SVE */ 324 325 326 static int parse_user_sigframe(struct user_ctxs *user, 327 struct rt_sigframe __user *sf) 328 { 329 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 330 struct _aarch64_ctx __user *head; 331 char __user *base = (char __user *)&sc->__reserved; 332 size_t offset = 0; 333 size_t limit = sizeof(sc->__reserved); 334 bool have_extra_context = false; 335 char const __user *const sfp = (char const __user *)sf; 336 337 user->fpsimd = NULL; 338 user->sve = NULL; 339 340 if (!IS_ALIGNED((unsigned long)base, 16)) 341 goto invalid; 342 343 while (1) { 344 int err = 0; 345 u32 magic, size; 346 char const __user *userp; 347 struct extra_context const __user *extra; 348 u64 extra_datap; 349 u32 extra_size; 350 struct _aarch64_ctx const __user *end; 351 u32 end_magic, end_size; 352 353 if (limit - offset < sizeof(*head)) 354 goto invalid; 355 356 if (!IS_ALIGNED(offset, 16)) 357 goto invalid; 358 359 head = (struct _aarch64_ctx __user *)(base + offset); 360 __get_user_error(magic, &head->magic, err); 361 __get_user_error(size, &head->size, err); 362 if (err) 363 return err; 364 365 if (limit - offset < size) 366 goto invalid; 367 368 switch (magic) { 369 case 0: 370 if (size) 371 goto invalid; 372 373 goto done; 374 375 case FPSIMD_MAGIC: 376 if (!system_supports_fpsimd()) 377 goto invalid; 378 if (user->fpsimd) 379 goto invalid; 380 381 if (size < sizeof(*user->fpsimd)) 382 goto invalid; 383 384 user->fpsimd = (struct fpsimd_context __user *)head; 385 break; 386 387 case ESR_MAGIC: 388 /* ignore */ 389 break; 390 391 case SVE_MAGIC: 392 if (!system_supports_sve()) 393 goto invalid; 394 395 if (user->sve) 396 goto invalid; 397 398 if (size < sizeof(*user->sve)) 399 goto invalid; 400 401 user->sve = (struct sve_context __user *)head; 402 break; 403 404 case EXTRA_MAGIC: 405 if (have_extra_context) 406 goto invalid; 407 408 if (size < sizeof(*extra)) 409 goto invalid; 410 411 userp = (char const __user *)head; 412 413 extra = (struct extra_context const __user *)userp; 414 userp += size; 415 416 __get_user_error(extra_datap, &extra->datap, err); 417 __get_user_error(extra_size, &extra->size, err); 418 if (err) 419 return err; 420 421 /* Check for the dummy terminator in __reserved[]: */ 422 423 if (limit - offset - size < TERMINATOR_SIZE) 424 goto invalid; 425 426 end = (struct _aarch64_ctx const __user *)userp; 427 userp += TERMINATOR_SIZE; 428 429 __get_user_error(end_magic, &end->magic, err); 430 __get_user_error(end_size, &end->size, err); 431 if (err) 432 return err; 433 434 if (end_magic || end_size) 435 goto invalid; 436 437 /* Prevent looping/repeated parsing of extra_context */ 438 have_extra_context = true; 439 440 base = (__force void __user *)extra_datap; 441 if (!IS_ALIGNED((unsigned long)base, 16)) 442 goto invalid; 443 444 if (!IS_ALIGNED(extra_size, 16)) 445 goto invalid; 446 447 if (base != userp) 448 goto invalid; 449 450 /* Reject "unreasonably large" frames: */ 451 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 452 goto invalid; 453 454 /* 455 * Ignore trailing terminator in __reserved[] 456 * and start parsing extra data: 457 */ 458 offset = 0; 459 limit = extra_size; 460 461 if (!access_ok(base, limit)) 462 goto invalid; 463 464 continue; 465 466 default: 467 goto invalid; 468 } 469 470 if (size < sizeof(*head)) 471 goto invalid; 472 473 if (limit - offset < size) 474 goto invalid; 475 476 offset += size; 477 } 478 479 done: 480 return 0; 481 482 invalid: 483 return -EINVAL; 484 } 485 486 static int restore_sigframe(struct pt_regs *regs, 487 struct rt_sigframe __user *sf) 488 { 489 sigset_t set; 490 int i, err; 491 struct user_ctxs user; 492 493 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 494 if (err == 0) 495 set_current_blocked(&set); 496 497 for (i = 0; i < 31; i++) 498 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 499 err); 500 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 501 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 502 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 503 504 /* 505 * Avoid sys_rt_sigreturn() restarting. 506 */ 507 forget_syscall(regs); 508 509 err |= !valid_user_regs(®s->user_regs, current); 510 if (err == 0) 511 err = parse_user_sigframe(&user, sf); 512 513 if (err == 0 && system_supports_fpsimd()) { 514 if (!user.fpsimd) 515 return -EINVAL; 516 517 if (user.sve) { 518 if (!system_supports_sve()) 519 return -EINVAL; 520 521 err = restore_sve_fpsimd_context(&user); 522 } else { 523 err = restore_fpsimd_context(user.fpsimd); 524 } 525 } 526 527 return err; 528 } 529 530 SYSCALL_DEFINE0(rt_sigreturn) 531 { 532 struct pt_regs *regs = current_pt_regs(); 533 struct rt_sigframe __user *frame; 534 535 /* Always make any pending restarted system calls return -EINTR */ 536 current->restart_block.fn = do_no_restart_syscall; 537 538 /* 539 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 540 * be word aligned here. 541 */ 542 if (regs->sp & 15) 543 goto badframe; 544 545 frame = (struct rt_sigframe __user *)regs->sp; 546 547 if (!access_ok(frame, sizeof (*frame))) 548 goto badframe; 549 550 if (restore_sigframe(regs, frame)) 551 goto badframe; 552 553 if (restore_altstack(&frame->uc.uc_stack)) 554 goto badframe; 555 556 return regs->regs[0]; 557 558 badframe: 559 arm64_notify_segfault(regs->sp); 560 return 0; 561 } 562 563 /* 564 * Determine the layout of optional records in the signal frame 565 * 566 * add_all: if true, lays out the biggest possible signal frame for 567 * this task; otherwise, generates a layout for the current state 568 * of the task. 569 */ 570 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 571 bool add_all) 572 { 573 int err; 574 575 err = sigframe_alloc(user, &user->fpsimd_offset, 576 sizeof(struct fpsimd_context)); 577 if (err) 578 return err; 579 580 /* fault information, if valid */ 581 if (add_all || current->thread.fault_code) { 582 err = sigframe_alloc(user, &user->esr_offset, 583 sizeof(struct esr_context)); 584 if (err) 585 return err; 586 } 587 588 if (system_supports_sve()) { 589 unsigned int vq = 0; 590 591 if (add_all || test_thread_flag(TIF_SVE)) { 592 int vl = sve_max_vl; 593 594 if (!add_all) 595 vl = current->thread.sve_vl; 596 597 vq = sve_vq_from_vl(vl); 598 } 599 600 err = sigframe_alloc(user, &user->sve_offset, 601 SVE_SIG_CONTEXT_SIZE(vq)); 602 if (err) 603 return err; 604 } 605 606 return sigframe_alloc_end(user); 607 } 608 609 static int setup_sigframe(struct rt_sigframe_user_layout *user, 610 struct pt_regs *regs, sigset_t *set) 611 { 612 int i, err = 0; 613 struct rt_sigframe __user *sf = user->sigframe; 614 615 /* set up the stack frame for unwinding */ 616 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 617 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 618 619 for (i = 0; i < 31; i++) 620 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 621 err); 622 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 623 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 624 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 625 626 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 627 628 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 629 630 if (err == 0 && system_supports_fpsimd()) { 631 struct fpsimd_context __user *fpsimd_ctx = 632 apply_user_offset(user, user->fpsimd_offset); 633 err |= preserve_fpsimd_context(fpsimd_ctx); 634 } 635 636 /* fault information, if valid */ 637 if (err == 0 && user->esr_offset) { 638 struct esr_context __user *esr_ctx = 639 apply_user_offset(user, user->esr_offset); 640 641 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 642 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 643 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 644 } 645 646 /* Scalable Vector Extension state, if present */ 647 if (system_supports_sve() && err == 0 && user->sve_offset) { 648 struct sve_context __user *sve_ctx = 649 apply_user_offset(user, user->sve_offset); 650 err |= preserve_sve_context(sve_ctx); 651 } 652 653 if (err == 0 && user->extra_offset) { 654 char __user *sfp = (char __user *)user->sigframe; 655 char __user *userp = 656 apply_user_offset(user, user->extra_offset); 657 658 struct extra_context __user *extra; 659 struct _aarch64_ctx __user *end; 660 u64 extra_datap; 661 u32 extra_size; 662 663 extra = (struct extra_context __user *)userp; 664 userp += EXTRA_CONTEXT_SIZE; 665 666 end = (struct _aarch64_ctx __user *)userp; 667 userp += TERMINATOR_SIZE; 668 669 /* 670 * extra_datap is just written to the signal frame. 671 * The value gets cast back to a void __user * 672 * during sigreturn. 673 */ 674 extra_datap = (__force u64)userp; 675 extra_size = sfp + round_up(user->size, 16) - userp; 676 677 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 678 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 679 __put_user_error(extra_datap, &extra->datap, err); 680 __put_user_error(extra_size, &extra->size, err); 681 682 /* Add the terminator */ 683 __put_user_error(0, &end->magic, err); 684 __put_user_error(0, &end->size, err); 685 } 686 687 /* set the "end" magic */ 688 if (err == 0) { 689 struct _aarch64_ctx __user *end = 690 apply_user_offset(user, user->end_offset); 691 692 __put_user_error(0, &end->magic, err); 693 __put_user_error(0, &end->size, err); 694 } 695 696 return err; 697 } 698 699 static int get_sigframe(struct rt_sigframe_user_layout *user, 700 struct ksignal *ksig, struct pt_regs *regs) 701 { 702 unsigned long sp, sp_top; 703 int err; 704 705 init_user_layout(user); 706 err = setup_sigframe_layout(user, false); 707 if (err) 708 return err; 709 710 sp = sp_top = sigsp(regs->sp, ksig); 711 712 sp = round_down(sp - sizeof(struct frame_record), 16); 713 user->next_frame = (struct frame_record __user *)sp; 714 715 sp = round_down(sp, 16) - sigframe_size(user); 716 user->sigframe = (struct rt_sigframe __user *)sp; 717 718 /* 719 * Check that we can actually write to the signal frame. 720 */ 721 if (!access_ok(user->sigframe, sp_top - sp)) 722 return -EFAULT; 723 724 return 0; 725 } 726 727 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, 728 struct rt_sigframe_user_layout *user, int usig) 729 { 730 __sigrestore_t sigtramp; 731 732 regs->regs[0] = usig; 733 regs->sp = (unsigned long)user->sigframe; 734 regs->regs[29] = (unsigned long)&user->next_frame->fp; 735 regs->pc = (unsigned long)ka->sa.sa_handler; 736 737 /* 738 * Signal delivery is a (wacky) indirect function call in 739 * userspace, so simulate the same setting of BTYPE as a BLR 740 * <register containing the signal handler entry point>. 741 * Signal delivery to a location in a PROT_BTI guarded page 742 * that is not a function entry point will now trigger a 743 * SIGILL in userspace. 744 * 745 * If the signal handler entry point is not in a PROT_BTI 746 * guarded page, this is harmless. 747 */ 748 if (system_supports_bti()) { 749 regs->pstate &= ~PSR_BTYPE_MASK; 750 regs->pstate |= PSR_BTYPE_C; 751 } 752 753 /* TCO (Tag Check Override) always cleared for signal handlers */ 754 regs->pstate &= ~PSR_TCO_BIT; 755 756 if (ka->sa.sa_flags & SA_RESTORER) 757 sigtramp = ka->sa.sa_restorer; 758 else 759 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 760 761 regs->regs[30] = (unsigned long)sigtramp; 762 } 763 764 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 765 struct pt_regs *regs) 766 { 767 struct rt_sigframe_user_layout user; 768 struct rt_sigframe __user *frame; 769 int err = 0; 770 771 fpsimd_signal_preserve_current_state(); 772 773 if (get_sigframe(&user, ksig, regs)) 774 return 1; 775 776 frame = user.sigframe; 777 778 __put_user_error(0, &frame->uc.uc_flags, err); 779 __put_user_error(NULL, &frame->uc.uc_link, err); 780 781 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 782 err |= setup_sigframe(&user, regs, set); 783 if (err == 0) { 784 setup_return(regs, &ksig->ka, &user, usig); 785 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 786 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 787 regs->regs[1] = (unsigned long)&frame->info; 788 regs->regs[2] = (unsigned long)&frame->uc; 789 } 790 } 791 792 return err; 793 } 794 795 static void setup_restart_syscall(struct pt_regs *regs) 796 { 797 if (is_compat_task()) 798 compat_setup_restart_syscall(regs); 799 else 800 regs->regs[8] = __NR_restart_syscall; 801 } 802 803 /* 804 * OK, we're invoking a handler 805 */ 806 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 807 { 808 sigset_t *oldset = sigmask_to_save(); 809 int usig = ksig->sig; 810 int ret; 811 812 rseq_signal_deliver(ksig, regs); 813 814 /* 815 * Set up the stack frame 816 */ 817 if (is_compat_task()) { 818 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 819 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 820 else 821 ret = compat_setup_frame(usig, ksig, oldset, regs); 822 } else { 823 ret = setup_rt_frame(usig, ksig, oldset, regs); 824 } 825 826 /* 827 * Check that the resulting registers are actually sane. 828 */ 829 ret |= !valid_user_regs(®s->user_regs, current); 830 831 /* Step into the signal handler if we are stepping */ 832 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 833 } 834 835 /* 836 * Note that 'init' is a special process: it doesn't get signals it doesn't 837 * want to handle. Thus you cannot kill init even with a SIGKILL even by 838 * mistake. 839 * 840 * Note that we go through the signals twice: once to check the signals that 841 * the kernel can handle, and then we build all the user-level signal handling 842 * stack-frames in one go after that. 843 */ 844 static void do_signal(struct pt_regs *regs) 845 { 846 unsigned long continue_addr = 0, restart_addr = 0; 847 int retval = 0; 848 struct ksignal ksig; 849 bool syscall = in_syscall(regs); 850 851 /* 852 * If we were from a system call, check for system call restarting... 853 */ 854 if (syscall) { 855 continue_addr = regs->pc; 856 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 857 retval = regs->regs[0]; 858 859 /* 860 * Avoid additional syscall restarting via ret_to_user. 861 */ 862 forget_syscall(regs); 863 864 /* 865 * Prepare for system call restart. We do this here so that a 866 * debugger will see the already changed PC. 867 */ 868 switch (retval) { 869 case -ERESTARTNOHAND: 870 case -ERESTARTSYS: 871 case -ERESTARTNOINTR: 872 case -ERESTART_RESTARTBLOCK: 873 regs->regs[0] = regs->orig_x0; 874 regs->pc = restart_addr; 875 break; 876 } 877 } 878 879 /* 880 * Get the signal to deliver. When running under ptrace, at this point 881 * the debugger may change all of our registers. 882 */ 883 if (get_signal(&ksig)) { 884 /* 885 * Depending on the signal settings, we may need to revert the 886 * decision to restart the system call, but skip this if a 887 * debugger has chosen to restart at a different PC. 888 */ 889 if (regs->pc == restart_addr && 890 (retval == -ERESTARTNOHAND || 891 retval == -ERESTART_RESTARTBLOCK || 892 (retval == -ERESTARTSYS && 893 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 894 syscall_set_return_value(current, regs, -EINTR, 0); 895 regs->pc = continue_addr; 896 } 897 898 handle_signal(&ksig, regs); 899 return; 900 } 901 902 /* 903 * Handle restarting a different system call. As above, if a debugger 904 * has chosen to restart at a different PC, ignore the restart. 905 */ 906 if (syscall && regs->pc == restart_addr) { 907 if (retval == -ERESTART_RESTARTBLOCK) 908 setup_restart_syscall(regs); 909 user_rewind_single_step(current); 910 } 911 912 restore_saved_sigmask(); 913 } 914 915 static bool cpu_affinity_invalid(struct pt_regs *regs) 916 { 917 if (!compat_user_mode(regs)) 918 return false; 919 920 /* 921 * We're preemptible, but a reschedule will cause us to check the 922 * affinity again. 923 */ 924 return !cpumask_test_cpu(raw_smp_processor_id(), 925 system_32bit_el0_cpumask()); 926 } 927 928 asmlinkage void do_notify_resume(struct pt_regs *regs, 929 unsigned long thread_flags) 930 { 931 do { 932 if (thread_flags & _TIF_NEED_RESCHED) { 933 /* Unmask Debug and SError for the next task */ 934 local_daif_restore(DAIF_PROCCTX_NOIRQ); 935 936 schedule(); 937 } else { 938 local_daif_restore(DAIF_PROCCTX); 939 940 if (thread_flags & _TIF_UPROBE) 941 uprobe_notify_resume(regs); 942 943 if (thread_flags & _TIF_MTE_ASYNC_FAULT) { 944 clear_thread_flag(TIF_MTE_ASYNC_FAULT); 945 send_sig_fault(SIGSEGV, SEGV_MTEAERR, 946 (void __user *)NULL, current); 947 } 948 949 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 950 do_signal(regs); 951 952 if (thread_flags & _TIF_NOTIFY_RESUME) { 953 tracehook_notify_resume(regs); 954 rseq_handle_notify_resume(NULL, regs); 955 956 /* 957 * If we reschedule after checking the affinity 958 * then we must ensure that TIF_NOTIFY_RESUME 959 * is set so that we check the affinity again. 960 * Since tracehook_notify_resume() clears the 961 * flag, ensure that the compiler doesn't move 962 * it after the affinity check. 963 */ 964 barrier(); 965 966 if (cpu_affinity_invalid(regs)) 967 force_sig(SIGKILL); 968 } 969 970 if (thread_flags & _TIF_FOREIGN_FPSTATE) 971 fpsimd_restore_current_state(); 972 } 973 974 local_daif_mask(); 975 thread_flags = READ_ONCE(current_thread_info()->flags); 976 } while (thread_flags & _TIF_WORK_MASK); 977 } 978 979 unsigned long __ro_after_init signal_minsigstksz; 980 981 /* 982 * Determine the stack space required for guaranteed signal devliery. 983 * This function is used to populate AT_MINSIGSTKSZ at process startup. 984 * cpufeatures setup is assumed to be complete. 985 */ 986 void __init minsigstksz_setup(void) 987 { 988 struct rt_sigframe_user_layout user; 989 990 init_user_layout(&user); 991 992 /* 993 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 994 * be big enough, but it's our best guess: 995 */ 996 if (WARN_ON(setup_sigframe_layout(&user, true))) 997 return; 998 999 signal_minsigstksz = sigframe_size(&user) + 1000 round_up(sizeof(struct frame_record), 16) + 1001 16; /* max alignment padding */ 1002 } 1003