1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 */ 7 #include <linux/errno.h> 8 #include <linux/random.h> 9 #include <linux/signal.h> 10 #include <linux/personality.h> 11 #include <linux/uaccess.h> 12 #include <linux/resume_user_mode.h> 13 #include <linux/uprobes.h> 14 #include <linux/syscalls.h> 15 16 #include <asm/elf.h> 17 #include <asm/cacheflush.h> 18 #include <asm/traps.h> 19 #include <asm/unistd.h> 20 #include <asm/vfp.h> 21 #include <asm/syscalls.h> 22 23 #include "signal.h" 24 25 extern const unsigned long sigreturn_codes[17]; 26 27 static unsigned long signal_return_offset; 28 29 #ifdef CONFIG_IWMMXT 30 31 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) 32 { 33 char kbuf[sizeof(*frame) + 8]; 34 struct iwmmxt_sigframe *kframe; 35 int err = 0; 36 37 /* the iWMMXt context must be 64 bit aligned */ 38 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 39 40 if (test_thread_flag(TIF_USING_IWMMXT)) { 41 kframe->magic = IWMMXT_MAGIC; 42 kframe->size = IWMMXT_STORAGE_SIZE; 43 iwmmxt_task_copy(current_thread_info(), &kframe->storage); 44 } else { 45 /* 46 * For bug-compatibility with older kernels, some space 47 * has to be reserved for iWMMXt even if it's not used. 48 * Set the magic and size appropriately so that properly 49 * written userspace can skip it reliably: 50 */ 51 *kframe = (struct iwmmxt_sigframe) { 52 .magic = DUMMY_MAGIC, 53 .size = IWMMXT_STORAGE_SIZE, 54 }; 55 } 56 57 err = __copy_to_user(frame, kframe, sizeof(*kframe)); 58 59 return err; 60 } 61 62 static int restore_iwmmxt_context(char __user **auxp) 63 { 64 struct iwmmxt_sigframe __user *frame = 65 (struct iwmmxt_sigframe __user *)*auxp; 66 char kbuf[sizeof(*frame) + 8]; 67 struct iwmmxt_sigframe *kframe; 68 69 /* the iWMMXt context must be 64 bit aligned */ 70 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 71 if (__copy_from_user(kframe, frame, sizeof(*frame))) 72 return -1; 73 74 /* 75 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy 76 * block is discarded for compatibility with setup_sigframe() if 77 * present, but we don't mandate its presence. If some other 78 * magic is here, it's not for us: 79 */ 80 if (!test_thread_flag(TIF_USING_IWMMXT) && 81 kframe->magic != DUMMY_MAGIC) 82 return 0; 83 84 if (kframe->size != IWMMXT_STORAGE_SIZE) 85 return -1; 86 87 if (test_thread_flag(TIF_USING_IWMMXT)) { 88 if (kframe->magic != IWMMXT_MAGIC) 89 return -1; 90 91 iwmmxt_task_restore(current_thread_info(), &kframe->storage); 92 } 93 94 *auxp += IWMMXT_STORAGE_SIZE; 95 return 0; 96 } 97 98 #endif 99 100 #ifdef CONFIG_VFP 101 102 static int preserve_vfp_context(struct vfp_sigframe __user *frame) 103 { 104 struct vfp_sigframe kframe; 105 int err = 0; 106 107 memset(&kframe, 0, sizeof(kframe)); 108 kframe.magic = VFP_MAGIC; 109 kframe.size = VFP_STORAGE_SIZE; 110 111 err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc); 112 if (err) 113 return err; 114 115 return __copy_to_user(frame, &kframe, sizeof(kframe)); 116 } 117 118 static int restore_vfp_context(char __user **auxp) 119 { 120 struct vfp_sigframe frame; 121 int err; 122 123 err = __copy_from_user(&frame, *auxp, sizeof(frame)); 124 if (err) 125 return err; 126 127 if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE) 128 return -EINVAL; 129 130 *auxp += sizeof(frame); 131 return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc); 132 } 133 134 #endif 135 136 /* 137 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 138 */ 139 140 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) 141 { 142 struct sigcontext context; 143 char __user *aux; 144 sigset_t set; 145 int err; 146 147 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 148 if (err == 0) 149 set_current_blocked(&set); 150 151 err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context)); 152 if (err == 0) { 153 regs->ARM_r0 = context.arm_r0; 154 regs->ARM_r1 = context.arm_r1; 155 regs->ARM_r2 = context.arm_r2; 156 regs->ARM_r3 = context.arm_r3; 157 regs->ARM_r4 = context.arm_r4; 158 regs->ARM_r5 = context.arm_r5; 159 regs->ARM_r6 = context.arm_r6; 160 regs->ARM_r7 = context.arm_r7; 161 regs->ARM_r8 = context.arm_r8; 162 regs->ARM_r9 = context.arm_r9; 163 regs->ARM_r10 = context.arm_r10; 164 regs->ARM_fp = context.arm_fp; 165 regs->ARM_ip = context.arm_ip; 166 regs->ARM_sp = context.arm_sp; 167 regs->ARM_lr = context.arm_lr; 168 regs->ARM_pc = context.arm_pc; 169 regs->ARM_cpsr = context.arm_cpsr; 170 } 171 172 err |= !valid_user_regs(regs); 173 174 aux = (char __user *) sf->uc.uc_regspace; 175 #ifdef CONFIG_IWMMXT 176 if (err == 0) 177 err |= restore_iwmmxt_context(&aux); 178 #endif 179 #ifdef CONFIG_VFP 180 if (err == 0) 181 err |= restore_vfp_context(&aux); 182 #endif 183 184 return err; 185 } 186 187 asmlinkage int sys_sigreturn(struct pt_regs *regs) 188 { 189 struct sigframe __user *frame; 190 191 /* Always make any pending restarted system calls return -EINTR */ 192 current->restart_block.fn = do_no_restart_syscall; 193 194 /* 195 * Since we stacked the signal on a 64-bit boundary, 196 * then 'sp' should be word aligned here. If it's 197 * not, then the user is trying to mess with us. 198 */ 199 if (regs->ARM_sp & 7) 200 goto badframe; 201 202 frame = (struct sigframe __user *)regs->ARM_sp; 203 204 if (!access_ok(frame, sizeof (*frame))) 205 goto badframe; 206 207 if (restore_sigframe(regs, frame)) 208 goto badframe; 209 210 return regs->ARM_r0; 211 212 badframe: 213 force_sig(SIGSEGV); 214 return 0; 215 } 216 217 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) 218 { 219 struct rt_sigframe __user *frame; 220 221 /* Always make any pending restarted system calls return -EINTR */ 222 current->restart_block.fn = do_no_restart_syscall; 223 224 /* 225 * Since we stacked the signal on a 64-bit boundary, 226 * then 'sp' should be word aligned here. If it's 227 * not, then the user is trying to mess with us. 228 */ 229 if (regs->ARM_sp & 7) 230 goto badframe; 231 232 frame = (struct rt_sigframe __user *)regs->ARM_sp; 233 234 if (!access_ok(frame, sizeof (*frame))) 235 goto badframe; 236 237 if (restore_sigframe(regs, &frame->sig)) 238 goto badframe; 239 240 if (restore_altstack(&frame->sig.uc.uc_stack)) 241 goto badframe; 242 243 return regs->ARM_r0; 244 245 badframe: 246 force_sig(SIGSEGV); 247 return 0; 248 } 249 250 static int 251 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) 252 { 253 struct aux_sigframe __user *aux; 254 struct sigcontext context; 255 int err = 0; 256 257 context = (struct sigcontext) { 258 .arm_r0 = regs->ARM_r0, 259 .arm_r1 = regs->ARM_r1, 260 .arm_r2 = regs->ARM_r2, 261 .arm_r3 = regs->ARM_r3, 262 .arm_r4 = regs->ARM_r4, 263 .arm_r5 = regs->ARM_r5, 264 .arm_r6 = regs->ARM_r6, 265 .arm_r7 = regs->ARM_r7, 266 .arm_r8 = regs->ARM_r8, 267 .arm_r9 = regs->ARM_r9, 268 .arm_r10 = regs->ARM_r10, 269 .arm_fp = regs->ARM_fp, 270 .arm_ip = regs->ARM_ip, 271 .arm_sp = regs->ARM_sp, 272 .arm_lr = regs->ARM_lr, 273 .arm_pc = regs->ARM_pc, 274 .arm_cpsr = regs->ARM_cpsr, 275 276 .trap_no = current->thread.trap_no, 277 .error_code = current->thread.error_code, 278 .fault_address = current->thread.address, 279 .oldmask = set->sig[0], 280 }; 281 282 err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context)); 283 284 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 285 286 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 287 #ifdef CONFIG_IWMMXT 288 if (err == 0) 289 err |= preserve_iwmmxt_context(&aux->iwmmxt); 290 #endif 291 #ifdef CONFIG_VFP 292 if (err == 0) 293 err |= preserve_vfp_context(&aux->vfp); 294 #endif 295 err |= __put_user(0, &aux->end_magic); 296 297 return err; 298 } 299 300 static inline void __user * 301 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) 302 { 303 unsigned long sp = sigsp(regs->ARM_sp, ksig); 304 void __user *frame; 305 306 /* 307 * ATPCS B01 mandates 8-byte alignment 308 */ 309 frame = (void __user *)((sp - framesize) & ~7); 310 311 /* 312 * Check that we can actually write to the signal frame. 313 */ 314 if (!access_ok(frame, framesize)) 315 frame = NULL; 316 317 return frame; 318 } 319 320 static int 321 setup_return(struct pt_regs *regs, struct ksignal *ksig, 322 unsigned long __user *rc, void __user *frame) 323 { 324 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; 325 unsigned long handler_fdpic_GOT = 0; 326 unsigned long retcode; 327 unsigned int idx, thumb = 0; 328 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); 329 bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && 330 (current->personality & FDPIC_FUNCPTRS); 331 332 if (fdpic) { 333 unsigned long __user *fdpic_func_desc = 334 (unsigned long __user *)handler; 335 if (__get_user(handler, &fdpic_func_desc[0]) || 336 __get_user(handler_fdpic_GOT, &fdpic_func_desc[1])) 337 return 1; 338 } 339 340 cpsr |= PSR_ENDSTATE; 341 342 /* 343 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 344 */ 345 if (ksig->ka.sa.sa_flags & SA_THIRTYTWO) 346 cpsr = (cpsr & ~MODE_MASK) | USR_MODE; 347 348 #ifdef CONFIG_ARM_THUMB 349 if (elf_hwcap & HWCAP_THUMB) { 350 /* 351 * The LSB of the handler determines if we're going to 352 * be using THUMB or ARM mode for this signal handler. 353 */ 354 thumb = handler & 1; 355 356 /* 357 * Clear the If-Then Thumb-2 execution state. ARM spec 358 * requires this to be all 000s in ARM mode. Snapdragon 359 * S4/Krait misbehaves on a Thumb=>ARM signal transition 360 * without this. 361 * 362 * We must do this whenever we are running on a Thumb-2 363 * capable CPU, which includes ARMv6T2. However, we elect 364 * to always do this to simplify the code; this field is 365 * marked UNK/SBZP for older architectures. 366 */ 367 cpsr &= ~PSR_IT_MASK; 368 369 if (thumb) { 370 cpsr |= PSR_T_BIT; 371 } else 372 cpsr &= ~PSR_T_BIT; 373 } 374 #endif 375 376 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 377 retcode = (unsigned long)ksig->ka.sa.sa_restorer; 378 if (fdpic) { 379 /* 380 * We need code to load the function descriptor. 381 * That code follows the standard sigreturn code 382 * (6 words), and is made of 3 + 2 words for each 383 * variant. The 4th copied word is the actual FD 384 * address that the assembly code expects. 385 */ 386 idx = 6 + thumb * 3; 387 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 388 idx += 5; 389 if (__put_user(sigreturn_codes[idx], rc ) || 390 __put_user(sigreturn_codes[idx+1], rc+1) || 391 __put_user(sigreturn_codes[idx+2], rc+2) || 392 __put_user(retcode, rc+3)) 393 return 1; 394 goto rc_finish; 395 } 396 } else { 397 idx = thumb << 1; 398 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 399 idx += 3; 400 401 /* 402 * Put the sigreturn code on the stack no matter which return 403 * mechanism we use in order to remain ABI compliant 404 */ 405 if (__put_user(sigreturn_codes[idx], rc) || 406 __put_user(sigreturn_codes[idx+1], rc+1)) 407 return 1; 408 409 rc_finish: 410 #ifdef CONFIG_MMU 411 if (cpsr & MODE32_BIT) { 412 struct mm_struct *mm = current->mm; 413 414 /* 415 * 32-bit code can use the signal return page 416 * except when the MPU has protected the vectors 417 * page from PL0 418 */ 419 retcode = mm->context.sigpage + signal_return_offset + 420 (idx << 2) + thumb; 421 } else 422 #endif 423 { 424 /* 425 * Ensure that the instruction cache sees 426 * the return code written onto the stack. 427 */ 428 flush_icache_range((unsigned long)rc, 429 (unsigned long)(rc + 3)); 430 431 retcode = ((unsigned long)rc) + thumb; 432 } 433 } 434 435 regs->ARM_r0 = ksig->sig; 436 regs->ARM_sp = (unsigned long)frame; 437 regs->ARM_lr = retcode; 438 regs->ARM_pc = handler; 439 if (fdpic) 440 regs->ARM_r9 = handler_fdpic_GOT; 441 regs->ARM_cpsr = cpsr; 442 443 return 0; 444 } 445 446 static int 447 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) 448 { 449 struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame)); 450 int err = 0; 451 452 if (!frame) 453 return 1; 454 455 /* 456 * Set uc.uc_flags to a value which sc.trap_no would never have. 457 */ 458 err = __put_user(0x5ac3c35a, &frame->uc.uc_flags); 459 460 err |= setup_sigframe(frame, regs, set); 461 if (err == 0) 462 err = setup_return(regs, ksig, frame->retcode, frame); 463 464 return err; 465 } 466 467 static int 468 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) 469 { 470 struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame)); 471 int err = 0; 472 473 if (!frame) 474 return 1; 475 476 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 477 478 err |= __put_user(0, &frame->sig.uc.uc_flags); 479 err |= __put_user(NULL, &frame->sig.uc.uc_link); 480 481 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); 482 err |= setup_sigframe(&frame->sig, regs, set); 483 if (err == 0) 484 err = setup_return(regs, ksig, frame->sig.retcode, frame); 485 486 if (err == 0) { 487 /* 488 * For realtime signals we must also set the second and third 489 * arguments for the signal handler. 490 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 491 */ 492 regs->ARM_r1 = (unsigned long)&frame->info; 493 regs->ARM_r2 = (unsigned long)&frame->sig.uc; 494 } 495 496 return err; 497 } 498 499 /* 500 * OK, we're invoking a handler 501 */ 502 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 503 { 504 sigset_t *oldset = sigmask_to_save(); 505 int ret; 506 507 /* 508 * Perform fixup for the pre-signal frame. 509 */ 510 rseq_signal_deliver(ksig, regs); 511 512 /* 513 * Set up the stack frame 514 */ 515 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 516 ret = setup_rt_frame(ksig, oldset, regs); 517 else 518 ret = setup_frame(ksig, oldset, regs); 519 520 /* 521 * Check that the resulting registers are actually sane. 522 */ 523 ret |= !valid_user_regs(regs); 524 525 signal_setup_done(ret, ksig, 0); 526 } 527 528 /* 529 * Note that 'init' is a special process: it doesn't get signals it doesn't 530 * want to handle. Thus you cannot kill init even with a SIGKILL even by 531 * mistake. 532 * 533 * Note that we go through the signals twice: once to check the signals that 534 * the kernel can handle, and then we build all the user-level signal handling 535 * stack-frames in one go after that. 536 */ 537 static int do_signal(struct pt_regs *regs, int syscall) 538 { 539 unsigned int retval = 0, continue_addr = 0, restart_addr = 0; 540 struct ksignal ksig; 541 int restart = 0; 542 543 /* 544 * If we were from a system call, check for system call restarting... 545 */ 546 if (syscall) { 547 continue_addr = regs->ARM_pc; 548 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); 549 retval = regs->ARM_r0; 550 551 /* 552 * Prepare for system call restart. We do this here so that a 553 * debugger will see the already changed PSW. 554 */ 555 switch (retval) { 556 case -ERESTART_RESTARTBLOCK: 557 restart -= 2; 558 fallthrough; 559 case -ERESTARTNOHAND: 560 case -ERESTARTSYS: 561 case -ERESTARTNOINTR: 562 restart++; 563 regs->ARM_r0 = regs->ARM_ORIG_r0; 564 regs->ARM_pc = restart_addr; 565 break; 566 } 567 } 568 569 /* 570 * Get the signal to deliver. When running under ptrace, at this 571 * point the debugger may change all our registers ... 572 */ 573 /* 574 * Depending on the signal settings we may need to revert the 575 * decision to restart the system call. But skip this if a 576 * debugger has chosen to restart at a different PC. 577 */ 578 if (get_signal(&ksig)) { 579 /* handler */ 580 if (unlikely(restart) && regs->ARM_pc == restart_addr) { 581 if (retval == -ERESTARTNOHAND || 582 retval == -ERESTART_RESTARTBLOCK 583 || (retval == -ERESTARTSYS 584 && !(ksig.ka.sa.sa_flags & SA_RESTART))) { 585 regs->ARM_r0 = -EINTR; 586 regs->ARM_pc = continue_addr; 587 } 588 } 589 handle_signal(&ksig, regs); 590 } else { 591 /* no handler */ 592 restore_saved_sigmask(); 593 if (unlikely(restart) && regs->ARM_pc == restart_addr) { 594 regs->ARM_pc = continue_addr; 595 return restart; 596 } 597 } 598 return 0; 599 } 600 601 asmlinkage int 602 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) 603 { 604 /* 605 * The assembly code enters us with IRQs off, but it hasn't 606 * informed the tracing code of that for efficiency reasons. 607 * Update the trace code with the current status. 608 */ 609 trace_hardirqs_off(); 610 do { 611 if (likely(thread_flags & _TIF_NEED_RESCHED)) { 612 schedule(); 613 } else { 614 if (unlikely(!user_mode(regs))) 615 return 0; 616 local_irq_enable(); 617 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { 618 int restart = do_signal(regs, syscall); 619 if (unlikely(restart)) { 620 /* 621 * Restart without handlers. 622 * Deal with it without leaving 623 * the kernel space. 624 */ 625 return restart; 626 } 627 syscall = 0; 628 } else if (thread_flags & _TIF_UPROBE) { 629 uprobe_notify_resume(regs); 630 } else { 631 resume_user_mode_work(regs); 632 } 633 } 634 local_irq_disable(); 635 thread_flags = read_thread_flags(); 636 } while (thread_flags & _TIF_WORK_MASK); 637 return 0; 638 } 639 640 struct page *get_signal_page(void) 641 { 642 unsigned long ptr; 643 unsigned offset; 644 struct page *page; 645 void *addr; 646 647 page = alloc_pages(GFP_KERNEL, 0); 648 649 if (!page) 650 return NULL; 651 652 addr = page_address(page); 653 654 /* Poison the entire page */ 655 memset32(addr, __opcode_to_mem_arm(0xe7fddef1), 656 PAGE_SIZE / sizeof(u32)); 657 658 /* Give the signal return code some randomness */ 659 offset = 0x200 + (get_random_u16() & 0x7fc); 660 signal_return_offset = offset; 661 662 /* Copy signal return handlers into the page */ 663 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); 664 665 /* Flush out all instructions in this page */ 666 ptr = (unsigned long)addr; 667 flush_icache_range(ptr, ptr + PAGE_SIZE); 668 669 return page; 670 } 671 672 #ifdef CONFIG_DEBUG_RSEQ 673 asmlinkage void do_rseq_syscall(struct pt_regs *regs) 674 { 675 rseq_syscall(regs); 676 } 677 #endif 678 679 /* 680 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 681 * changes likely come with new fields that should be added below. 682 */ 683 static_assert(NSIGILL == 11); 684 static_assert(NSIGFPE == 15); 685 static_assert(NSIGSEGV == 9); 686 static_assert(NSIGBUS == 5); 687 static_assert(NSIGTRAP == 6); 688 static_assert(NSIGCHLD == 6); 689 static_assert(NSIGSYS == 2); 690 static_assert(sizeof(siginfo_t) == 128); 691 static_assert(__alignof__(siginfo_t) == 4); 692 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 693 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 694 static_assert(offsetof(siginfo_t, si_code) == 0x08); 695 static_assert(offsetof(siginfo_t, si_pid) == 0x0c); 696 static_assert(offsetof(siginfo_t, si_uid) == 0x10); 697 static_assert(offsetof(siginfo_t, si_tid) == 0x0c); 698 static_assert(offsetof(siginfo_t, si_overrun) == 0x10); 699 static_assert(offsetof(siginfo_t, si_status) == 0x14); 700 static_assert(offsetof(siginfo_t, si_utime) == 0x18); 701 static_assert(offsetof(siginfo_t, si_stime) == 0x1c); 702 static_assert(offsetof(siginfo_t, si_value) == 0x14); 703 static_assert(offsetof(siginfo_t, si_int) == 0x14); 704 static_assert(offsetof(siginfo_t, si_ptr) == 0x14); 705 static_assert(offsetof(siginfo_t, si_addr) == 0x0c); 706 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x10); 707 static_assert(offsetof(siginfo_t, si_lower) == 0x14); 708 static_assert(offsetof(siginfo_t, si_upper) == 0x18); 709 static_assert(offsetof(siginfo_t, si_pkey) == 0x14); 710 static_assert(offsetof(siginfo_t, si_perf_data) == 0x10); 711 static_assert(offsetof(siginfo_t, si_perf_type) == 0x14); 712 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x18); 713 static_assert(offsetof(siginfo_t, si_band) == 0x0c); 714 static_assert(offsetof(siginfo_t, si_fd) == 0x10); 715 static_assert(offsetof(siginfo_t, si_call_addr) == 0x0c); 716 static_assert(offsetof(siginfo_t, si_syscall) == 0x10); 717 static_assert(offsetof(siginfo_t, si_arch) == 0x14); 718