1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/resume_user_mode.h> 20 #include <linux/ratelimit.h> 21 #include <linux/syscalls.h> 22 23 #include <asm/daifflags.h> 24 #include <asm/debug-monitors.h> 25 #include <asm/elf.h> 26 #include <asm/cacheflush.h> 27 #include <asm/ucontext.h> 28 #include <asm/unistd.h> 29 #include <asm/fpsimd.h> 30 #include <asm/ptrace.h> 31 #include <asm/syscall.h> 32 #include <asm/signal32.h> 33 #include <asm/traps.h> 34 #include <asm/vdso.h> 35 36 /* 37 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 38 */ 39 struct rt_sigframe { 40 struct siginfo info; 41 struct ucontext uc; 42 }; 43 44 struct frame_record { 45 u64 fp; 46 u64 lr; 47 }; 48 49 struct rt_sigframe_user_layout { 50 struct rt_sigframe __user *sigframe; 51 struct frame_record __user *next_frame; 52 53 unsigned long size; /* size of allocated sigframe data */ 54 unsigned long limit; /* largest allowed size */ 55 56 unsigned long fpsimd_offset; 57 unsigned long esr_offset; 58 unsigned long sve_offset; 59 unsigned long tpidr2_offset; 60 unsigned long za_offset; 61 unsigned long zt_offset; 62 unsigned long extra_offset; 63 unsigned long end_offset; 64 }; 65 66 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) 67 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 68 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 69 70 static void init_user_layout(struct rt_sigframe_user_layout *user) 71 { 72 const size_t reserved_size = 73 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 74 75 memset(user, 0, sizeof(*user)); 76 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 77 78 user->limit = user->size + reserved_size; 79 80 user->limit -= TERMINATOR_SIZE; 81 user->limit -= EXTRA_CONTEXT_SIZE; 82 /* Reserve space for extension and terminator ^ */ 83 } 84 85 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 86 { 87 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 88 } 89 90 /* 91 * Sanity limit on the approximate maximum size of signal frame we'll 92 * try to generate. Stack alignment padding and the frame record are 93 * not taken into account. This limit is not a guarantee and is 94 * NOT ABI. 95 */ 96 #define SIGFRAME_MAXSZ SZ_256K 97 98 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 99 unsigned long *offset, size_t size, bool extend) 100 { 101 size_t padded_size = round_up(size, 16); 102 103 if (padded_size > user->limit - user->size && 104 !user->extra_offset && 105 extend) { 106 int ret; 107 108 user->limit += EXTRA_CONTEXT_SIZE; 109 ret = __sigframe_alloc(user, &user->extra_offset, 110 sizeof(struct extra_context), false); 111 if (ret) { 112 user->limit -= EXTRA_CONTEXT_SIZE; 113 return ret; 114 } 115 116 /* Reserve space for the __reserved[] terminator */ 117 user->size += TERMINATOR_SIZE; 118 119 /* 120 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 121 * the terminator: 122 */ 123 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 124 } 125 126 /* Still not enough space? Bad luck! */ 127 if (padded_size > user->limit - user->size) 128 return -ENOMEM; 129 130 *offset = user->size; 131 user->size += padded_size; 132 133 return 0; 134 } 135 136 /* 137 * Allocate space for an optional record of <size> bytes in the user 138 * signal frame. The offset from the signal frame base address to the 139 * allocated block is assigned to *offset. 140 */ 141 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 142 unsigned long *offset, size_t size) 143 { 144 return __sigframe_alloc(user, offset, size, true); 145 } 146 147 /* Allocate the null terminator record and prevent further allocations */ 148 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 149 { 150 int ret; 151 152 /* Un-reserve the space reserved for the terminator: */ 153 user->limit += TERMINATOR_SIZE; 154 155 ret = sigframe_alloc(user, &user->end_offset, 156 sizeof(struct _aarch64_ctx)); 157 if (ret) 158 return ret; 159 160 /* Prevent further allocation: */ 161 user->limit = user->size; 162 return 0; 163 } 164 165 static void __user *apply_user_offset( 166 struct rt_sigframe_user_layout const *user, unsigned long offset) 167 { 168 char __user *base = (char __user *)user->sigframe; 169 170 return base + offset; 171 } 172 173 struct user_ctxs { 174 struct fpsimd_context __user *fpsimd; 175 u32 fpsimd_size; 176 struct sve_context __user *sve; 177 u32 sve_size; 178 struct tpidr2_context __user *tpidr2; 179 u32 tpidr2_size; 180 struct za_context __user *za; 181 u32 za_size; 182 struct zt_context __user *zt; 183 u32 zt_size; 184 }; 185 186 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 187 { 188 struct user_fpsimd_state const *fpsimd = 189 ¤t->thread.uw.fpsimd_state; 190 int err; 191 192 /* copy the FP and status/control registers */ 193 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 194 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 195 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 196 197 /* copy the magic/size information */ 198 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 199 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 200 201 return err ? -EFAULT : 0; 202 } 203 204 static int restore_fpsimd_context(struct user_ctxs *user) 205 { 206 struct user_fpsimd_state fpsimd; 207 int err = 0; 208 209 /* check the size information */ 210 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 211 return -EINVAL; 212 213 /* copy the FP and status/control registers */ 214 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), 215 sizeof(fpsimd.vregs)); 216 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); 217 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); 218 219 clear_thread_flag(TIF_SVE); 220 current->thread.fp_type = FP_STATE_FPSIMD; 221 222 /* load the hardware registers from the fpsimd_state structure */ 223 if (!err) 224 fpsimd_update_current_state(&fpsimd); 225 226 return err ? -EFAULT : 0; 227 } 228 229 230 #ifdef CONFIG_ARM64_SVE 231 232 static int preserve_sve_context(struct sve_context __user *ctx) 233 { 234 int err = 0; 235 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 236 u16 flags = 0; 237 unsigned int vl = task_get_sve_vl(current); 238 unsigned int vq = 0; 239 240 if (thread_sm_enabled(¤t->thread)) { 241 vl = task_get_sme_vl(current); 242 vq = sve_vq_from_vl(vl); 243 flags |= SVE_SIG_FLAG_SM; 244 } else if (test_thread_flag(TIF_SVE)) { 245 vq = sve_vq_from_vl(vl); 246 } 247 248 memset(reserved, 0, sizeof(reserved)); 249 250 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 251 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 252 &ctx->head.size, err); 253 __put_user_error(vl, &ctx->vl, err); 254 __put_user_error(flags, &ctx->flags, err); 255 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 256 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 257 258 if (vq) { 259 /* 260 * This assumes that the SVE state has already been saved to 261 * the task struct by calling the function 262 * fpsimd_signal_preserve_current_state(). 263 */ 264 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 265 current->thread.sve_state, 266 SVE_SIG_REGS_SIZE(vq)); 267 } 268 269 return err ? -EFAULT : 0; 270 } 271 272 static int restore_sve_fpsimd_context(struct user_ctxs *user) 273 { 274 int err = 0; 275 unsigned int vl, vq; 276 struct user_fpsimd_state fpsimd; 277 u16 user_vl, flags; 278 279 if (user->sve_size < sizeof(*user->sve)) 280 return -EINVAL; 281 282 __get_user_error(user_vl, &(user->sve->vl), err); 283 __get_user_error(flags, &(user->sve->flags), err); 284 if (err) 285 return err; 286 287 if (flags & SVE_SIG_FLAG_SM) { 288 if (!system_supports_sme()) 289 return -EINVAL; 290 291 vl = task_get_sme_vl(current); 292 } else { 293 /* 294 * A SME only system use SVE for streaming mode so can 295 * have a SVE formatted context with a zero VL and no 296 * payload data. 297 */ 298 if (!system_supports_sve() && !system_supports_sme()) 299 return -EINVAL; 300 301 vl = task_get_sve_vl(current); 302 } 303 304 if (user_vl != vl) 305 return -EINVAL; 306 307 if (user->sve_size == sizeof(*user->sve)) { 308 clear_thread_flag(TIF_SVE); 309 current->thread.svcr &= ~SVCR_SM_MASK; 310 current->thread.fp_type = FP_STATE_FPSIMD; 311 goto fpsimd_only; 312 } 313 314 vq = sve_vq_from_vl(vl); 315 316 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 317 return -EINVAL; 318 319 /* 320 * Careful: we are about __copy_from_user() directly into 321 * thread.sve_state with preemption enabled, so protection is 322 * needed to prevent a racing context switch from writing stale 323 * registers back over the new data. 324 */ 325 326 fpsimd_flush_task_state(current); 327 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 328 329 sve_alloc(current, true); 330 if (!current->thread.sve_state) { 331 clear_thread_flag(TIF_SVE); 332 return -ENOMEM; 333 } 334 335 err = __copy_from_user(current->thread.sve_state, 336 (char __user const *)user->sve + 337 SVE_SIG_REGS_OFFSET, 338 SVE_SIG_REGS_SIZE(vq)); 339 if (err) 340 return -EFAULT; 341 342 if (flags & SVE_SIG_FLAG_SM) 343 current->thread.svcr |= SVCR_SM_MASK; 344 else 345 set_thread_flag(TIF_SVE); 346 current->thread.fp_type = FP_STATE_SVE; 347 348 fpsimd_only: 349 /* copy the FP and status/control registers */ 350 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 351 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 352 sizeof(fpsimd.vregs)); 353 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 354 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 355 356 /* load the hardware registers from the fpsimd_state structure */ 357 if (!err) 358 fpsimd_update_current_state(&fpsimd); 359 360 return err ? -EFAULT : 0; 361 } 362 363 #else /* ! CONFIG_ARM64_SVE */ 364 365 static int restore_sve_fpsimd_context(struct user_ctxs *user) 366 { 367 WARN_ON_ONCE(1); 368 return -EINVAL; 369 } 370 371 /* Turn any non-optimised out attempts to use this into a link error: */ 372 extern int preserve_sve_context(void __user *ctx); 373 374 #endif /* ! CONFIG_ARM64_SVE */ 375 376 #ifdef CONFIG_ARM64_SME 377 378 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 379 { 380 int err = 0; 381 382 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 383 384 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 385 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 386 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); 387 388 return err; 389 } 390 391 static int restore_tpidr2_context(struct user_ctxs *user) 392 { 393 u64 tpidr2_el0; 394 int err = 0; 395 396 if (user->tpidr2_size != sizeof(*user->tpidr2)) 397 return -EINVAL; 398 399 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 400 if (!err) 401 current->thread.tpidr2_el0 = tpidr2_el0; 402 403 return err; 404 } 405 406 static int preserve_za_context(struct za_context __user *ctx) 407 { 408 int err = 0; 409 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 410 unsigned int vl = task_get_sme_vl(current); 411 unsigned int vq; 412 413 if (thread_za_enabled(¤t->thread)) 414 vq = sve_vq_from_vl(vl); 415 else 416 vq = 0; 417 418 memset(reserved, 0, sizeof(reserved)); 419 420 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 421 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 422 &ctx->head.size, err); 423 __put_user_error(vl, &ctx->vl, err); 424 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 425 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 426 427 if (vq) { 428 /* 429 * This assumes that the ZA state has already been saved to 430 * the task struct by calling the function 431 * fpsimd_signal_preserve_current_state(). 432 */ 433 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 434 current->thread.sme_state, 435 ZA_SIG_REGS_SIZE(vq)); 436 } 437 438 return err ? -EFAULT : 0; 439 } 440 441 static int restore_za_context(struct user_ctxs *user) 442 { 443 int err = 0; 444 unsigned int vq; 445 u16 user_vl; 446 447 if (user->za_size < sizeof(*user->za)) 448 return -EINVAL; 449 450 __get_user_error(user_vl, &(user->za->vl), err); 451 if (err) 452 return err; 453 454 if (user_vl != task_get_sme_vl(current)) 455 return -EINVAL; 456 457 if (user->za_size == sizeof(*user->za)) { 458 current->thread.svcr &= ~SVCR_ZA_MASK; 459 return 0; 460 } 461 462 vq = sve_vq_from_vl(user_vl); 463 464 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 465 return -EINVAL; 466 467 /* 468 * Careful: we are about __copy_from_user() directly into 469 * thread.sme_state with preemption enabled, so protection is 470 * needed to prevent a racing context switch from writing stale 471 * registers back over the new data. 472 */ 473 474 fpsimd_flush_task_state(current); 475 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 476 477 sme_alloc(current); 478 if (!current->thread.sme_state) { 479 current->thread.svcr &= ~SVCR_ZA_MASK; 480 clear_thread_flag(TIF_SME); 481 return -ENOMEM; 482 } 483 484 err = __copy_from_user(current->thread.sme_state, 485 (char __user const *)user->za + 486 ZA_SIG_REGS_OFFSET, 487 ZA_SIG_REGS_SIZE(vq)); 488 if (err) 489 return -EFAULT; 490 491 set_thread_flag(TIF_SME); 492 current->thread.svcr |= SVCR_ZA_MASK; 493 494 return 0; 495 } 496 497 static int preserve_zt_context(struct zt_context __user *ctx) 498 { 499 int err = 0; 500 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 501 502 if (WARN_ON(!thread_za_enabled(¤t->thread))) 503 return -EINVAL; 504 505 memset(reserved, 0, sizeof(reserved)); 506 507 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 508 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 509 &ctx->head.size, err); 510 __put_user_error(1, &ctx->nregs, err); 511 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 512 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 513 514 /* 515 * This assumes that the ZT state has already been saved to 516 * the task struct by calling the function 517 * fpsimd_signal_preserve_current_state(). 518 */ 519 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 520 thread_zt_state(¤t->thread), 521 ZT_SIG_REGS_SIZE(1)); 522 523 return err ? -EFAULT : 0; 524 } 525 526 static int restore_zt_context(struct user_ctxs *user) 527 { 528 int err; 529 u16 nregs; 530 531 /* ZA must be restored first for this check to be valid */ 532 if (!thread_za_enabled(¤t->thread)) 533 return -EINVAL; 534 535 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 536 return -EINVAL; 537 538 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 539 return -EFAULT; 540 541 if (nregs != 1) 542 return -EINVAL; 543 544 /* 545 * Careful: we are about __copy_from_user() directly into 546 * thread.zt_state with preemption enabled, so protection is 547 * needed to prevent a racing context switch from writing stale 548 * registers back over the new data. 549 */ 550 551 fpsimd_flush_task_state(current); 552 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ 553 554 err = __copy_from_user(thread_zt_state(¤t->thread), 555 (char __user const *)user->zt + 556 ZT_SIG_REGS_OFFSET, 557 ZT_SIG_REGS_SIZE(1)); 558 if (err) 559 return -EFAULT; 560 561 return 0; 562 } 563 564 #else /* ! CONFIG_ARM64_SME */ 565 566 /* Turn any non-optimised out attempts to use these into a link error: */ 567 extern int preserve_tpidr2_context(void __user *ctx); 568 extern int restore_tpidr2_context(struct user_ctxs *user); 569 extern int preserve_za_context(void __user *ctx); 570 extern int restore_za_context(struct user_ctxs *user); 571 extern int preserve_zt_context(void __user *ctx); 572 extern int restore_zt_context(struct user_ctxs *user); 573 574 #endif /* ! CONFIG_ARM64_SME */ 575 576 static int parse_user_sigframe(struct user_ctxs *user, 577 struct rt_sigframe __user *sf) 578 { 579 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 580 struct _aarch64_ctx __user *head; 581 char __user *base = (char __user *)&sc->__reserved; 582 size_t offset = 0; 583 size_t limit = sizeof(sc->__reserved); 584 bool have_extra_context = false; 585 char const __user *const sfp = (char const __user *)sf; 586 587 user->fpsimd = NULL; 588 user->sve = NULL; 589 user->tpidr2 = NULL; 590 user->za = NULL; 591 user->zt = NULL; 592 593 if (!IS_ALIGNED((unsigned long)base, 16)) 594 goto invalid; 595 596 while (1) { 597 int err = 0; 598 u32 magic, size; 599 char const __user *userp; 600 struct extra_context const __user *extra; 601 u64 extra_datap; 602 u32 extra_size; 603 struct _aarch64_ctx const __user *end; 604 u32 end_magic, end_size; 605 606 if (limit - offset < sizeof(*head)) 607 goto invalid; 608 609 if (!IS_ALIGNED(offset, 16)) 610 goto invalid; 611 612 head = (struct _aarch64_ctx __user *)(base + offset); 613 __get_user_error(magic, &head->magic, err); 614 __get_user_error(size, &head->size, err); 615 if (err) 616 return err; 617 618 if (limit - offset < size) 619 goto invalid; 620 621 switch (magic) { 622 case 0: 623 if (size) 624 goto invalid; 625 626 goto done; 627 628 case FPSIMD_MAGIC: 629 if (!system_supports_fpsimd()) 630 goto invalid; 631 if (user->fpsimd) 632 goto invalid; 633 634 user->fpsimd = (struct fpsimd_context __user *)head; 635 user->fpsimd_size = size; 636 break; 637 638 case ESR_MAGIC: 639 /* ignore */ 640 break; 641 642 case SVE_MAGIC: 643 if (!system_supports_sve() && !system_supports_sme()) 644 goto invalid; 645 646 if (user->sve) 647 goto invalid; 648 649 user->sve = (struct sve_context __user *)head; 650 user->sve_size = size; 651 break; 652 653 case TPIDR2_MAGIC: 654 if (!system_supports_tpidr2()) 655 goto invalid; 656 657 if (user->tpidr2) 658 goto invalid; 659 660 user->tpidr2 = (struct tpidr2_context __user *)head; 661 user->tpidr2_size = size; 662 break; 663 664 case ZA_MAGIC: 665 if (!system_supports_sme()) 666 goto invalid; 667 668 if (user->za) 669 goto invalid; 670 671 user->za = (struct za_context __user *)head; 672 user->za_size = size; 673 break; 674 675 case ZT_MAGIC: 676 if (!system_supports_sme2()) 677 goto invalid; 678 679 if (user->zt) 680 goto invalid; 681 682 user->zt = (struct zt_context __user *)head; 683 user->zt_size = size; 684 break; 685 686 case EXTRA_MAGIC: 687 if (have_extra_context) 688 goto invalid; 689 690 if (size < sizeof(*extra)) 691 goto invalid; 692 693 userp = (char const __user *)head; 694 695 extra = (struct extra_context const __user *)userp; 696 userp += size; 697 698 __get_user_error(extra_datap, &extra->datap, err); 699 __get_user_error(extra_size, &extra->size, err); 700 if (err) 701 return err; 702 703 /* Check for the dummy terminator in __reserved[]: */ 704 705 if (limit - offset - size < TERMINATOR_SIZE) 706 goto invalid; 707 708 end = (struct _aarch64_ctx const __user *)userp; 709 userp += TERMINATOR_SIZE; 710 711 __get_user_error(end_magic, &end->magic, err); 712 __get_user_error(end_size, &end->size, err); 713 if (err) 714 return err; 715 716 if (end_magic || end_size) 717 goto invalid; 718 719 /* Prevent looping/repeated parsing of extra_context */ 720 have_extra_context = true; 721 722 base = (__force void __user *)extra_datap; 723 if (!IS_ALIGNED((unsigned long)base, 16)) 724 goto invalid; 725 726 if (!IS_ALIGNED(extra_size, 16)) 727 goto invalid; 728 729 if (base != userp) 730 goto invalid; 731 732 /* Reject "unreasonably large" frames: */ 733 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 734 goto invalid; 735 736 /* 737 * Ignore trailing terminator in __reserved[] 738 * and start parsing extra data: 739 */ 740 offset = 0; 741 limit = extra_size; 742 743 if (!access_ok(base, limit)) 744 goto invalid; 745 746 continue; 747 748 default: 749 goto invalid; 750 } 751 752 if (size < sizeof(*head)) 753 goto invalid; 754 755 if (limit - offset < size) 756 goto invalid; 757 758 offset += size; 759 } 760 761 done: 762 return 0; 763 764 invalid: 765 return -EINVAL; 766 } 767 768 static int restore_sigframe(struct pt_regs *regs, 769 struct rt_sigframe __user *sf) 770 { 771 sigset_t set; 772 int i, err; 773 struct user_ctxs user; 774 775 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 776 if (err == 0) 777 set_current_blocked(&set); 778 779 for (i = 0; i < 31; i++) 780 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 781 err); 782 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 783 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 784 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 785 786 /* 787 * Avoid sys_rt_sigreturn() restarting. 788 */ 789 forget_syscall(regs); 790 791 err |= !valid_user_regs(®s->user_regs, current); 792 if (err == 0) 793 err = parse_user_sigframe(&user, sf); 794 795 if (err == 0 && system_supports_fpsimd()) { 796 if (!user.fpsimd) 797 return -EINVAL; 798 799 if (user.sve) 800 err = restore_sve_fpsimd_context(&user); 801 else 802 err = restore_fpsimd_context(&user); 803 } 804 805 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 806 err = restore_tpidr2_context(&user); 807 808 if (err == 0 && system_supports_sme() && user.za) 809 err = restore_za_context(&user); 810 811 if (err == 0 && system_supports_sme2() && user.zt) 812 err = restore_zt_context(&user); 813 814 return err; 815 } 816 817 SYSCALL_DEFINE0(rt_sigreturn) 818 { 819 struct pt_regs *regs = current_pt_regs(); 820 struct rt_sigframe __user *frame; 821 822 /* Always make any pending restarted system calls return -EINTR */ 823 current->restart_block.fn = do_no_restart_syscall; 824 825 /* 826 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 827 * be word aligned here. 828 */ 829 if (regs->sp & 15) 830 goto badframe; 831 832 frame = (struct rt_sigframe __user *)regs->sp; 833 834 if (!access_ok(frame, sizeof (*frame))) 835 goto badframe; 836 837 if (restore_sigframe(regs, frame)) 838 goto badframe; 839 840 if (restore_altstack(&frame->uc.uc_stack)) 841 goto badframe; 842 843 return regs->regs[0]; 844 845 badframe: 846 arm64_notify_segfault(regs->sp); 847 return 0; 848 } 849 850 /* 851 * Determine the layout of optional records in the signal frame 852 * 853 * add_all: if true, lays out the biggest possible signal frame for 854 * this task; otherwise, generates a layout for the current state 855 * of the task. 856 */ 857 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 858 bool add_all) 859 { 860 int err; 861 862 if (system_supports_fpsimd()) { 863 err = sigframe_alloc(user, &user->fpsimd_offset, 864 sizeof(struct fpsimd_context)); 865 if (err) 866 return err; 867 } 868 869 /* fault information, if valid */ 870 if (add_all || current->thread.fault_code) { 871 err = sigframe_alloc(user, &user->esr_offset, 872 sizeof(struct esr_context)); 873 if (err) 874 return err; 875 } 876 877 if (system_supports_sve() || system_supports_sme()) { 878 unsigned int vq = 0; 879 880 if (add_all || test_thread_flag(TIF_SVE) || 881 thread_sm_enabled(¤t->thread)) { 882 int vl = max(sve_max_vl(), sme_max_vl()); 883 884 if (!add_all) 885 vl = thread_get_cur_vl(¤t->thread); 886 887 vq = sve_vq_from_vl(vl); 888 } 889 890 err = sigframe_alloc(user, &user->sve_offset, 891 SVE_SIG_CONTEXT_SIZE(vq)); 892 if (err) 893 return err; 894 } 895 896 if (system_supports_tpidr2()) { 897 err = sigframe_alloc(user, &user->tpidr2_offset, 898 sizeof(struct tpidr2_context)); 899 if (err) 900 return err; 901 } 902 903 if (system_supports_sme()) { 904 unsigned int vl; 905 unsigned int vq = 0; 906 907 if (add_all) 908 vl = sme_max_vl(); 909 else 910 vl = task_get_sme_vl(current); 911 912 if (thread_za_enabled(¤t->thread)) 913 vq = sve_vq_from_vl(vl); 914 915 err = sigframe_alloc(user, &user->za_offset, 916 ZA_SIG_CONTEXT_SIZE(vq)); 917 if (err) 918 return err; 919 } 920 921 if (system_supports_sme2()) { 922 if (add_all || thread_za_enabled(¤t->thread)) { 923 err = sigframe_alloc(user, &user->zt_offset, 924 ZT_SIG_CONTEXT_SIZE(1)); 925 if (err) 926 return err; 927 } 928 } 929 930 return sigframe_alloc_end(user); 931 } 932 933 static int setup_sigframe(struct rt_sigframe_user_layout *user, 934 struct pt_regs *regs, sigset_t *set) 935 { 936 int i, err = 0; 937 struct rt_sigframe __user *sf = user->sigframe; 938 939 /* set up the stack frame for unwinding */ 940 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 941 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 942 943 for (i = 0; i < 31; i++) 944 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 945 err); 946 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 947 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 948 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 949 950 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 951 952 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 953 954 if (err == 0 && system_supports_fpsimd()) { 955 struct fpsimd_context __user *fpsimd_ctx = 956 apply_user_offset(user, user->fpsimd_offset); 957 err |= preserve_fpsimd_context(fpsimd_ctx); 958 } 959 960 /* fault information, if valid */ 961 if (err == 0 && user->esr_offset) { 962 struct esr_context __user *esr_ctx = 963 apply_user_offset(user, user->esr_offset); 964 965 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 966 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 967 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 968 } 969 970 /* Scalable Vector Extension state (including streaming), if present */ 971 if ((system_supports_sve() || system_supports_sme()) && 972 err == 0 && user->sve_offset) { 973 struct sve_context __user *sve_ctx = 974 apply_user_offset(user, user->sve_offset); 975 err |= preserve_sve_context(sve_ctx); 976 } 977 978 /* TPIDR2 if supported */ 979 if (system_supports_tpidr2() && err == 0) { 980 struct tpidr2_context __user *tpidr2_ctx = 981 apply_user_offset(user, user->tpidr2_offset); 982 err |= preserve_tpidr2_context(tpidr2_ctx); 983 } 984 985 /* ZA state if present */ 986 if (system_supports_sme() && err == 0 && user->za_offset) { 987 struct za_context __user *za_ctx = 988 apply_user_offset(user, user->za_offset); 989 err |= preserve_za_context(za_ctx); 990 } 991 992 /* ZT state if present */ 993 if (system_supports_sme2() && err == 0 && user->zt_offset) { 994 struct zt_context __user *zt_ctx = 995 apply_user_offset(user, user->zt_offset); 996 err |= preserve_zt_context(zt_ctx); 997 } 998 999 if (err == 0 && user->extra_offset) { 1000 char __user *sfp = (char __user *)user->sigframe; 1001 char __user *userp = 1002 apply_user_offset(user, user->extra_offset); 1003 1004 struct extra_context __user *extra; 1005 struct _aarch64_ctx __user *end; 1006 u64 extra_datap; 1007 u32 extra_size; 1008 1009 extra = (struct extra_context __user *)userp; 1010 userp += EXTRA_CONTEXT_SIZE; 1011 1012 end = (struct _aarch64_ctx __user *)userp; 1013 userp += TERMINATOR_SIZE; 1014 1015 /* 1016 * extra_datap is just written to the signal frame. 1017 * The value gets cast back to a void __user * 1018 * during sigreturn. 1019 */ 1020 extra_datap = (__force u64)userp; 1021 extra_size = sfp + round_up(user->size, 16) - userp; 1022 1023 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1024 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1025 __put_user_error(extra_datap, &extra->datap, err); 1026 __put_user_error(extra_size, &extra->size, err); 1027 1028 /* Add the terminator */ 1029 __put_user_error(0, &end->magic, err); 1030 __put_user_error(0, &end->size, err); 1031 } 1032 1033 /* set the "end" magic */ 1034 if (err == 0) { 1035 struct _aarch64_ctx __user *end = 1036 apply_user_offset(user, user->end_offset); 1037 1038 __put_user_error(0, &end->magic, err); 1039 __put_user_error(0, &end->size, err); 1040 } 1041 1042 return err; 1043 } 1044 1045 static int get_sigframe(struct rt_sigframe_user_layout *user, 1046 struct ksignal *ksig, struct pt_regs *regs) 1047 { 1048 unsigned long sp, sp_top; 1049 int err; 1050 1051 init_user_layout(user); 1052 err = setup_sigframe_layout(user, false); 1053 if (err) 1054 return err; 1055 1056 sp = sp_top = sigsp(regs->sp, ksig); 1057 1058 sp = round_down(sp - sizeof(struct frame_record), 16); 1059 user->next_frame = (struct frame_record __user *)sp; 1060 1061 sp = round_down(sp, 16) - sigframe_size(user); 1062 user->sigframe = (struct rt_sigframe __user *)sp; 1063 1064 /* 1065 * Check that we can actually write to the signal frame. 1066 */ 1067 if (!access_ok(user->sigframe, sp_top - sp)) 1068 return -EFAULT; 1069 1070 return 0; 1071 } 1072 1073 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, 1074 struct rt_sigframe_user_layout *user, int usig) 1075 { 1076 __sigrestore_t sigtramp; 1077 1078 regs->regs[0] = usig; 1079 regs->sp = (unsigned long)user->sigframe; 1080 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1081 regs->pc = (unsigned long)ka->sa.sa_handler; 1082 1083 /* 1084 * Signal delivery is a (wacky) indirect function call in 1085 * userspace, so simulate the same setting of BTYPE as a BLR 1086 * <register containing the signal handler entry point>. 1087 * Signal delivery to a location in a PROT_BTI guarded page 1088 * that is not a function entry point will now trigger a 1089 * SIGILL in userspace. 1090 * 1091 * If the signal handler entry point is not in a PROT_BTI 1092 * guarded page, this is harmless. 1093 */ 1094 if (system_supports_bti()) { 1095 regs->pstate &= ~PSR_BTYPE_MASK; 1096 regs->pstate |= PSR_BTYPE_C; 1097 } 1098 1099 /* TCO (Tag Check Override) always cleared for signal handlers */ 1100 regs->pstate &= ~PSR_TCO_BIT; 1101 1102 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1103 if (system_supports_sme()) { 1104 /* 1105 * If we were in streaming mode the saved register 1106 * state was SVE but we will exit SM and use the 1107 * FPSIMD register state - flush the saved FPSIMD 1108 * register state in case it gets loaded. 1109 */ 1110 if (current->thread.svcr & SVCR_SM_MASK) { 1111 memset(¤t->thread.uw.fpsimd_state, 0, 1112 sizeof(current->thread.uw.fpsimd_state)); 1113 current->thread.fp_type = FP_STATE_FPSIMD; 1114 } 1115 1116 current->thread.svcr &= ~(SVCR_ZA_MASK | 1117 SVCR_SM_MASK); 1118 sme_smstop(); 1119 } 1120 1121 if (ka->sa.sa_flags & SA_RESTORER) 1122 sigtramp = ka->sa.sa_restorer; 1123 else 1124 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1125 1126 regs->regs[30] = (unsigned long)sigtramp; 1127 } 1128 1129 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1130 struct pt_regs *regs) 1131 { 1132 struct rt_sigframe_user_layout user; 1133 struct rt_sigframe __user *frame; 1134 int err = 0; 1135 1136 fpsimd_signal_preserve_current_state(); 1137 1138 if (get_sigframe(&user, ksig, regs)) 1139 return 1; 1140 1141 frame = user.sigframe; 1142 1143 __put_user_error(0, &frame->uc.uc_flags, err); 1144 __put_user_error(NULL, &frame->uc.uc_link, err); 1145 1146 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1147 err |= setup_sigframe(&user, regs, set); 1148 if (err == 0) { 1149 setup_return(regs, &ksig->ka, &user, usig); 1150 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1151 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1152 regs->regs[1] = (unsigned long)&frame->info; 1153 regs->regs[2] = (unsigned long)&frame->uc; 1154 } 1155 } 1156 1157 return err; 1158 } 1159 1160 static void setup_restart_syscall(struct pt_regs *regs) 1161 { 1162 if (is_compat_task()) 1163 compat_setup_restart_syscall(regs); 1164 else 1165 regs->regs[8] = __NR_restart_syscall; 1166 } 1167 1168 /* 1169 * OK, we're invoking a handler 1170 */ 1171 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1172 { 1173 sigset_t *oldset = sigmask_to_save(); 1174 int usig = ksig->sig; 1175 int ret; 1176 1177 rseq_signal_deliver(ksig, regs); 1178 1179 /* 1180 * Set up the stack frame 1181 */ 1182 if (is_compat_task()) { 1183 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1184 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1185 else 1186 ret = compat_setup_frame(usig, ksig, oldset, regs); 1187 } else { 1188 ret = setup_rt_frame(usig, ksig, oldset, regs); 1189 } 1190 1191 /* 1192 * Check that the resulting registers are actually sane. 1193 */ 1194 ret |= !valid_user_regs(®s->user_regs, current); 1195 1196 /* Step into the signal handler if we are stepping */ 1197 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1198 } 1199 1200 /* 1201 * Note that 'init' is a special process: it doesn't get signals it doesn't 1202 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1203 * mistake. 1204 * 1205 * Note that we go through the signals twice: once to check the signals that 1206 * the kernel can handle, and then we build all the user-level signal handling 1207 * stack-frames in one go after that. 1208 */ 1209 static void do_signal(struct pt_regs *regs) 1210 { 1211 unsigned long continue_addr = 0, restart_addr = 0; 1212 int retval = 0; 1213 struct ksignal ksig; 1214 bool syscall = in_syscall(regs); 1215 1216 /* 1217 * If we were from a system call, check for system call restarting... 1218 */ 1219 if (syscall) { 1220 continue_addr = regs->pc; 1221 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1222 retval = regs->regs[0]; 1223 1224 /* 1225 * Avoid additional syscall restarting via ret_to_user. 1226 */ 1227 forget_syscall(regs); 1228 1229 /* 1230 * Prepare for system call restart. We do this here so that a 1231 * debugger will see the already changed PC. 1232 */ 1233 switch (retval) { 1234 case -ERESTARTNOHAND: 1235 case -ERESTARTSYS: 1236 case -ERESTARTNOINTR: 1237 case -ERESTART_RESTARTBLOCK: 1238 regs->regs[0] = regs->orig_x0; 1239 regs->pc = restart_addr; 1240 break; 1241 } 1242 } 1243 1244 /* 1245 * Get the signal to deliver. When running under ptrace, at this point 1246 * the debugger may change all of our registers. 1247 */ 1248 if (get_signal(&ksig)) { 1249 /* 1250 * Depending on the signal settings, we may need to revert the 1251 * decision to restart the system call, but skip this if a 1252 * debugger has chosen to restart at a different PC. 1253 */ 1254 if (regs->pc == restart_addr && 1255 (retval == -ERESTARTNOHAND || 1256 retval == -ERESTART_RESTARTBLOCK || 1257 (retval == -ERESTARTSYS && 1258 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1259 syscall_set_return_value(current, regs, -EINTR, 0); 1260 regs->pc = continue_addr; 1261 } 1262 1263 handle_signal(&ksig, regs); 1264 return; 1265 } 1266 1267 /* 1268 * Handle restarting a different system call. As above, if a debugger 1269 * has chosen to restart at a different PC, ignore the restart. 1270 */ 1271 if (syscall && regs->pc == restart_addr) { 1272 if (retval == -ERESTART_RESTARTBLOCK) 1273 setup_restart_syscall(regs); 1274 user_rewind_single_step(current); 1275 } 1276 1277 restore_saved_sigmask(); 1278 } 1279 1280 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) 1281 { 1282 do { 1283 if (thread_flags & _TIF_NEED_RESCHED) { 1284 /* Unmask Debug and SError for the next task */ 1285 local_daif_restore(DAIF_PROCCTX_NOIRQ); 1286 1287 schedule(); 1288 } else { 1289 local_daif_restore(DAIF_PROCCTX); 1290 1291 if (thread_flags & _TIF_UPROBE) 1292 uprobe_notify_resume(regs); 1293 1294 if (thread_flags & _TIF_MTE_ASYNC_FAULT) { 1295 clear_thread_flag(TIF_MTE_ASYNC_FAULT); 1296 send_sig_fault(SIGSEGV, SEGV_MTEAERR, 1297 (void __user *)NULL, current); 1298 } 1299 1300 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 1301 do_signal(regs); 1302 1303 if (thread_flags & _TIF_NOTIFY_RESUME) 1304 resume_user_mode_work(regs); 1305 1306 if (thread_flags & _TIF_FOREIGN_FPSTATE) 1307 fpsimd_restore_current_state(); 1308 } 1309 1310 local_daif_mask(); 1311 thread_flags = read_thread_flags(); 1312 } while (thread_flags & _TIF_WORK_MASK); 1313 } 1314 1315 unsigned long __ro_after_init signal_minsigstksz; 1316 1317 /* 1318 * Determine the stack space required for guaranteed signal devliery. 1319 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1320 * cpufeatures setup is assumed to be complete. 1321 */ 1322 void __init minsigstksz_setup(void) 1323 { 1324 struct rt_sigframe_user_layout user; 1325 1326 init_user_layout(&user); 1327 1328 /* 1329 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1330 * be big enough, but it's our best guess: 1331 */ 1332 if (WARN_ON(setup_sigframe_layout(&user, true))) 1333 return; 1334 1335 signal_minsigstksz = sigframe_size(&user) + 1336 round_up(sizeof(struct frame_record), 16) + 1337 16; /* max alignment padding */ 1338 } 1339 1340 /* 1341 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1342 * changes likely come with new fields that should be added below. 1343 */ 1344 static_assert(NSIGILL == 11); 1345 static_assert(NSIGFPE == 15); 1346 static_assert(NSIGSEGV == 9); 1347 static_assert(NSIGBUS == 5); 1348 static_assert(NSIGTRAP == 6); 1349 static_assert(NSIGCHLD == 6); 1350 static_assert(NSIGSYS == 2); 1351 static_assert(sizeof(siginfo_t) == 128); 1352 static_assert(__alignof__(siginfo_t) == 8); 1353 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1354 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1355 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1356 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1357 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1358 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1359 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1360 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1361 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1362 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1363 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1364 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1365 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1366 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1367 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1368 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1369 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1370 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1371 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1372 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1373 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1374 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1375 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1376 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1377 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1378 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1379