1 /* 2 * linux/arch/arm/kernel/signal.c 3 * 4 * Copyright (C) 1995-2009 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/errno.h> 11 #include <linux/random.h> 12 #include <linux/signal.h> 13 #include <linux/personality.h> 14 #include <linux/uaccess.h> 15 #include <linux/tracehook.h> 16 #include <linux/uprobes.h> 17 #include <linux/syscalls.h> 18 19 #include <asm/elf.h> 20 #include <asm/cacheflush.h> 21 #include <asm/traps.h> 22 #include <asm/unistd.h> 23 #include <asm/vfp.h> 24 25 #include "signal.h" 26 27 extern const unsigned long sigreturn_codes[17]; 28 29 static unsigned long signal_return_offset; 30 31 #ifdef CONFIG_CRUNCH 32 static int preserve_crunch_context(struct crunch_sigframe __user *frame) 33 { 34 char kbuf[sizeof(*frame) + 8]; 35 struct crunch_sigframe *kframe; 36 37 /* the crunch context must be 64 bit aligned */ 38 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 39 kframe->magic = CRUNCH_MAGIC; 40 kframe->size = CRUNCH_STORAGE_SIZE; 41 crunch_task_copy(current_thread_info(), &kframe->storage); 42 return __copy_to_user(frame, kframe, sizeof(*frame)); 43 } 44 45 static int restore_crunch_context(char __user **auxp) 46 { 47 struct crunch_sigframe __user *frame = 48 (struct crunch_sigframe __user *)*auxp; 49 char kbuf[sizeof(*frame) + 8]; 50 struct crunch_sigframe *kframe; 51 52 /* the crunch context must be 64 bit aligned */ 53 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 54 if (__copy_from_user(kframe, frame, sizeof(*frame))) 55 return -1; 56 if (kframe->magic != CRUNCH_MAGIC || 57 kframe->size != CRUNCH_STORAGE_SIZE) 58 return -1; 59 *auxp += CRUNCH_STORAGE_SIZE; 60 crunch_task_restore(current_thread_info(), &kframe->storage); 61 return 0; 62 } 63 #endif 64 65 #ifdef CONFIG_IWMMXT 66 67 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) 68 { 69 char kbuf[sizeof(*frame) + 8]; 70 struct iwmmxt_sigframe *kframe; 71 int err = 0; 72 73 /* the iWMMXt context must be 64 bit aligned */ 74 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 75 76 if (test_thread_flag(TIF_USING_IWMMXT)) { 77 kframe->magic = IWMMXT_MAGIC; 78 kframe->size = IWMMXT_STORAGE_SIZE; 79 iwmmxt_task_copy(current_thread_info(), &kframe->storage); 80 81 err = __copy_to_user(frame, kframe, sizeof(*frame)); 82 } else { 83 /* 84 * For bug-compatibility with older kernels, some space 85 * has to be reserved for iWMMXt even if it's not used. 86 * Set the magic and size appropriately so that properly 87 * written userspace can skip it reliably: 88 */ 89 __put_user_error(DUMMY_MAGIC, &frame->magic, err); 90 __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err); 91 } 92 93 return err; 94 } 95 96 static int restore_iwmmxt_context(char __user **auxp) 97 { 98 struct iwmmxt_sigframe __user *frame = 99 (struct iwmmxt_sigframe __user *)*auxp; 100 char kbuf[sizeof(*frame) + 8]; 101 struct iwmmxt_sigframe *kframe; 102 103 /* the iWMMXt context must be 64 bit aligned */ 104 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 105 if (__copy_from_user(kframe, frame, sizeof(*frame))) 106 return -1; 107 108 /* 109 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy 110 * block is discarded for compatibility with setup_sigframe() if 111 * present, but we don't mandate its presence. If some other 112 * magic is here, it's not for us: 113 */ 114 if (!test_thread_flag(TIF_USING_IWMMXT) && 115 kframe->magic != DUMMY_MAGIC) 116 return 0; 117 118 if (kframe->size != IWMMXT_STORAGE_SIZE) 119 return -1; 120 121 if (test_thread_flag(TIF_USING_IWMMXT)) { 122 if (kframe->magic != IWMMXT_MAGIC) 123 return -1; 124 125 iwmmxt_task_restore(current_thread_info(), &kframe->storage); 126 } 127 128 *auxp += IWMMXT_STORAGE_SIZE; 129 return 0; 130 } 131 132 #endif 133 134 #ifdef CONFIG_VFP 135 136 static int preserve_vfp_context(struct vfp_sigframe __user *frame) 137 { 138 const unsigned long magic = VFP_MAGIC; 139 const unsigned long size = VFP_STORAGE_SIZE; 140 int err = 0; 141 142 __put_user_error(magic, &frame->magic, err); 143 __put_user_error(size, &frame->size, err); 144 145 if (err) 146 return -EFAULT; 147 148 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); 149 } 150 151 static int restore_vfp_context(char __user **auxp) 152 { 153 struct vfp_sigframe frame; 154 int err; 155 156 err = __copy_from_user(&frame, *auxp, sizeof(frame)); 157 if (err) 158 return err; 159 160 if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE) 161 return -EINVAL; 162 163 *auxp += sizeof(frame); 164 return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc); 165 } 166 167 #endif 168 169 /* 170 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 171 */ 172 173 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) 174 { 175 struct sigcontext context; 176 char __user *aux; 177 sigset_t set; 178 int err; 179 180 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 181 if (err == 0) 182 set_current_blocked(&set); 183 184 err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context)); 185 if (err == 0) { 186 regs->ARM_r0 = context.arm_r0; 187 regs->ARM_r1 = context.arm_r1; 188 regs->ARM_r2 = context.arm_r2; 189 regs->ARM_r3 = context.arm_r3; 190 regs->ARM_r4 = context.arm_r4; 191 regs->ARM_r5 = context.arm_r5; 192 regs->ARM_r6 = context.arm_r6; 193 regs->ARM_r7 = context.arm_r7; 194 regs->ARM_r8 = context.arm_r8; 195 regs->ARM_r9 = context.arm_r9; 196 regs->ARM_r10 = context.arm_r10; 197 regs->ARM_fp = context.arm_fp; 198 regs->ARM_ip = context.arm_ip; 199 regs->ARM_sp = context.arm_sp; 200 regs->ARM_lr = context.arm_lr; 201 regs->ARM_pc = context.arm_pc; 202 regs->ARM_cpsr = context.arm_cpsr; 203 } 204 205 err |= !valid_user_regs(regs); 206 207 aux = (char __user *) sf->uc.uc_regspace; 208 #ifdef CONFIG_CRUNCH 209 if (err == 0) 210 err |= restore_crunch_context(&aux); 211 #endif 212 #ifdef CONFIG_IWMMXT 213 if (err == 0) 214 err |= restore_iwmmxt_context(&aux); 215 #endif 216 #ifdef CONFIG_VFP 217 if (err == 0) 218 err |= restore_vfp_context(&aux); 219 #endif 220 221 return err; 222 } 223 224 asmlinkage int sys_sigreturn(struct pt_regs *regs) 225 { 226 struct sigframe __user *frame; 227 228 /* Always make any pending restarted system calls return -EINTR */ 229 current->restart_block.fn = do_no_restart_syscall; 230 231 /* 232 * Since we stacked the signal on a 64-bit boundary, 233 * then 'sp' should be word aligned here. If it's 234 * not, then the user is trying to mess with us. 235 */ 236 if (regs->ARM_sp & 7) 237 goto badframe; 238 239 frame = (struct sigframe __user *)regs->ARM_sp; 240 241 if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 242 goto badframe; 243 244 if (restore_sigframe(regs, frame)) 245 goto badframe; 246 247 return regs->ARM_r0; 248 249 badframe: 250 force_sig(SIGSEGV, current); 251 return 0; 252 } 253 254 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) 255 { 256 struct rt_sigframe __user *frame; 257 258 /* Always make any pending restarted system calls return -EINTR */ 259 current->restart_block.fn = do_no_restart_syscall; 260 261 /* 262 * Since we stacked the signal on a 64-bit boundary, 263 * then 'sp' should be word aligned here. If it's 264 * not, then the user is trying to mess with us. 265 */ 266 if (regs->ARM_sp & 7) 267 goto badframe; 268 269 frame = (struct rt_sigframe __user *)regs->ARM_sp; 270 271 if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 272 goto badframe; 273 274 if (restore_sigframe(regs, &frame->sig)) 275 goto badframe; 276 277 if (restore_altstack(&frame->sig.uc.uc_stack)) 278 goto badframe; 279 280 return regs->ARM_r0; 281 282 badframe: 283 force_sig(SIGSEGV, current); 284 return 0; 285 } 286 287 static int 288 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) 289 { 290 struct aux_sigframe __user *aux; 291 int err = 0; 292 293 __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); 294 __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); 295 __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); 296 __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); 297 __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); 298 __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); 299 __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); 300 __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); 301 __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); 302 __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); 303 __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); 304 __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); 305 __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); 306 __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); 307 __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); 308 __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); 309 __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); 310 311 __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); 312 __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); 313 __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); 314 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); 315 316 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 317 318 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 319 #ifdef CONFIG_CRUNCH 320 if (err == 0) 321 err |= preserve_crunch_context(&aux->crunch); 322 #endif 323 #ifdef CONFIG_IWMMXT 324 if (err == 0) 325 err |= preserve_iwmmxt_context(&aux->iwmmxt); 326 #endif 327 #ifdef CONFIG_VFP 328 if (err == 0) 329 err |= preserve_vfp_context(&aux->vfp); 330 #endif 331 __put_user_error(0, &aux->end_magic, err); 332 333 return err; 334 } 335 336 static inline void __user * 337 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) 338 { 339 unsigned long sp = sigsp(regs->ARM_sp, ksig); 340 void __user *frame; 341 342 /* 343 * ATPCS B01 mandates 8-byte alignment 344 */ 345 frame = (void __user *)((sp - framesize) & ~7); 346 347 /* 348 * Check that we can actually write to the signal frame. 349 */ 350 if (!access_ok(VERIFY_WRITE, frame, framesize)) 351 frame = NULL; 352 353 return frame; 354 } 355 356 static int 357 setup_return(struct pt_regs *regs, struct ksignal *ksig, 358 unsigned long __user *rc, void __user *frame) 359 { 360 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; 361 unsigned long handler_fdpic_GOT = 0; 362 unsigned long retcode; 363 unsigned int idx, thumb = 0; 364 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); 365 bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && 366 (current->personality & FDPIC_FUNCPTRS); 367 368 if (fdpic) { 369 unsigned long __user *fdpic_func_desc = 370 (unsigned long __user *)handler; 371 if (__get_user(handler, &fdpic_func_desc[0]) || 372 __get_user(handler_fdpic_GOT, &fdpic_func_desc[1])) 373 return 1; 374 } 375 376 cpsr |= PSR_ENDSTATE; 377 378 /* 379 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 380 */ 381 if (ksig->ka.sa.sa_flags & SA_THIRTYTWO) 382 cpsr = (cpsr & ~MODE_MASK) | USR_MODE; 383 384 #ifdef CONFIG_ARM_THUMB 385 if (elf_hwcap & HWCAP_THUMB) { 386 /* 387 * The LSB of the handler determines if we're going to 388 * be using THUMB or ARM mode for this signal handler. 389 */ 390 thumb = handler & 1; 391 392 /* 393 * Clear the If-Then Thumb-2 execution state. ARM spec 394 * requires this to be all 000s in ARM mode. Snapdragon 395 * S4/Krait misbehaves on a Thumb=>ARM signal transition 396 * without this. 397 * 398 * We must do this whenever we are running on a Thumb-2 399 * capable CPU, which includes ARMv6T2. However, we elect 400 * to always do this to simplify the code; this field is 401 * marked UNK/SBZP for older architectures. 402 */ 403 cpsr &= ~PSR_IT_MASK; 404 405 if (thumb) { 406 cpsr |= PSR_T_BIT; 407 } else 408 cpsr &= ~PSR_T_BIT; 409 } 410 #endif 411 412 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 413 retcode = (unsigned long)ksig->ka.sa.sa_restorer; 414 if (fdpic) { 415 /* 416 * We need code to load the function descriptor. 417 * That code follows the standard sigreturn code 418 * (6 words), and is made of 3 + 2 words for each 419 * variant. The 4th copied word is the actual FD 420 * address that the assembly code expects. 421 */ 422 idx = 6 + thumb * 3; 423 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 424 idx += 5; 425 if (__put_user(sigreturn_codes[idx], rc ) || 426 __put_user(sigreturn_codes[idx+1], rc+1) || 427 __put_user(sigreturn_codes[idx+2], rc+2) || 428 __put_user(retcode, rc+3)) 429 return 1; 430 goto rc_finish; 431 } 432 } else { 433 idx = thumb << 1; 434 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 435 idx += 3; 436 437 /* 438 * Put the sigreturn code on the stack no matter which return 439 * mechanism we use in order to remain ABI compliant 440 */ 441 if (__put_user(sigreturn_codes[idx], rc) || 442 __put_user(sigreturn_codes[idx+1], rc+1)) 443 return 1; 444 445 rc_finish: 446 #ifdef CONFIG_MMU 447 if (cpsr & MODE32_BIT) { 448 struct mm_struct *mm = current->mm; 449 450 /* 451 * 32-bit code can use the signal return page 452 * except when the MPU has protected the vectors 453 * page from PL0 454 */ 455 retcode = mm->context.sigpage + signal_return_offset + 456 (idx << 2) + thumb; 457 } else 458 #endif 459 { 460 /* 461 * Ensure that the instruction cache sees 462 * the return code written onto the stack. 463 */ 464 flush_icache_range((unsigned long)rc, 465 (unsigned long)(rc + 3)); 466 467 retcode = ((unsigned long)rc) + thumb; 468 } 469 } 470 471 regs->ARM_r0 = ksig->sig; 472 regs->ARM_sp = (unsigned long)frame; 473 regs->ARM_lr = retcode; 474 regs->ARM_pc = handler; 475 if (fdpic) 476 regs->ARM_r9 = handler_fdpic_GOT; 477 regs->ARM_cpsr = cpsr; 478 479 return 0; 480 } 481 482 static int 483 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) 484 { 485 struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame)); 486 int err = 0; 487 488 if (!frame) 489 return 1; 490 491 /* 492 * Set uc.uc_flags to a value which sc.trap_no would never have. 493 */ 494 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); 495 496 err |= setup_sigframe(frame, regs, set); 497 if (err == 0) 498 err = setup_return(regs, ksig, frame->retcode, frame); 499 500 return err; 501 } 502 503 static int 504 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) 505 { 506 struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame)); 507 int err = 0; 508 509 if (!frame) 510 return 1; 511 512 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 513 514 __put_user_error(0, &frame->sig.uc.uc_flags, err); 515 __put_user_error(NULL, &frame->sig.uc.uc_link, err); 516 517 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); 518 err |= setup_sigframe(&frame->sig, regs, set); 519 if (err == 0) 520 err = setup_return(regs, ksig, frame->sig.retcode, frame); 521 522 if (err == 0) { 523 /* 524 * For realtime signals we must also set the second and third 525 * arguments for the signal handler. 526 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 527 */ 528 regs->ARM_r1 = (unsigned long)&frame->info; 529 regs->ARM_r2 = (unsigned long)&frame->sig.uc; 530 } 531 532 return err; 533 } 534 535 /* 536 * OK, we're invoking a handler 537 */ 538 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 539 { 540 sigset_t *oldset = sigmask_to_save(); 541 int ret; 542 543 /* 544 * Increment event counter and perform fixup for the pre-signal 545 * frame. 546 */ 547 rseq_signal_deliver(ksig, regs); 548 549 /* 550 * Set up the stack frame 551 */ 552 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 553 ret = setup_rt_frame(ksig, oldset, regs); 554 else 555 ret = setup_frame(ksig, oldset, regs); 556 557 /* 558 * Check that the resulting registers are actually sane. 559 */ 560 ret |= !valid_user_regs(regs); 561 562 signal_setup_done(ret, ksig, 0); 563 } 564 565 /* 566 * Note that 'init' is a special process: it doesn't get signals it doesn't 567 * want to handle. Thus you cannot kill init even with a SIGKILL even by 568 * mistake. 569 * 570 * Note that we go through the signals twice: once to check the signals that 571 * the kernel can handle, and then we build all the user-level signal handling 572 * stack-frames in one go after that. 573 */ 574 static int do_signal(struct pt_regs *regs, int syscall) 575 { 576 unsigned int retval = 0, continue_addr = 0, restart_addr = 0; 577 struct ksignal ksig; 578 int restart = 0; 579 580 /* 581 * If we were from a system call, check for system call restarting... 582 */ 583 if (syscall) { 584 continue_addr = regs->ARM_pc; 585 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); 586 retval = regs->ARM_r0; 587 588 /* 589 * Prepare for system call restart. We do this here so that a 590 * debugger will see the already changed PSW. 591 */ 592 switch (retval) { 593 case -ERESTART_RESTARTBLOCK: 594 restart -= 2; 595 case -ERESTARTNOHAND: 596 case -ERESTARTSYS: 597 case -ERESTARTNOINTR: 598 restart++; 599 regs->ARM_r0 = regs->ARM_ORIG_r0; 600 regs->ARM_pc = restart_addr; 601 break; 602 } 603 } 604 605 /* 606 * Get the signal to deliver. When running under ptrace, at this 607 * point the debugger may change all our registers ... 608 */ 609 /* 610 * Depending on the signal settings we may need to revert the 611 * decision to restart the system call. But skip this if a 612 * debugger has chosen to restart at a different PC. 613 */ 614 if (get_signal(&ksig)) { 615 /* handler */ 616 if (unlikely(restart) && regs->ARM_pc == restart_addr) { 617 if (retval == -ERESTARTNOHAND || 618 retval == -ERESTART_RESTARTBLOCK 619 || (retval == -ERESTARTSYS 620 && !(ksig.ka.sa.sa_flags & SA_RESTART))) { 621 regs->ARM_r0 = -EINTR; 622 regs->ARM_pc = continue_addr; 623 } 624 } 625 handle_signal(&ksig, regs); 626 } else { 627 /* no handler */ 628 restore_saved_sigmask(); 629 if (unlikely(restart) && regs->ARM_pc == restart_addr) { 630 regs->ARM_pc = continue_addr; 631 return restart; 632 } 633 } 634 return 0; 635 } 636 637 asmlinkage int 638 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) 639 { 640 /* 641 * The assembly code enters us with IRQs off, but it hasn't 642 * informed the tracing code of that for efficiency reasons. 643 * Update the trace code with the current status. 644 */ 645 trace_hardirqs_off(); 646 do { 647 if (likely(thread_flags & _TIF_NEED_RESCHED)) { 648 schedule(); 649 } else { 650 if (unlikely(!user_mode(regs))) 651 return 0; 652 local_irq_enable(); 653 if (thread_flags & _TIF_SIGPENDING) { 654 int restart = do_signal(regs, syscall); 655 if (unlikely(restart)) { 656 /* 657 * Restart without handlers. 658 * Deal with it without leaving 659 * the kernel space. 660 */ 661 return restart; 662 } 663 syscall = 0; 664 } else if (thread_flags & _TIF_UPROBE) { 665 uprobe_notify_resume(regs); 666 } else { 667 clear_thread_flag(TIF_NOTIFY_RESUME); 668 tracehook_notify_resume(regs); 669 rseq_handle_notify_resume(NULL, regs); 670 } 671 } 672 local_irq_disable(); 673 thread_flags = current_thread_info()->flags; 674 } while (thread_flags & _TIF_WORK_MASK); 675 return 0; 676 } 677 678 struct page *get_signal_page(void) 679 { 680 unsigned long ptr; 681 unsigned offset; 682 struct page *page; 683 void *addr; 684 685 page = alloc_pages(GFP_KERNEL, 0); 686 687 if (!page) 688 return NULL; 689 690 addr = page_address(page); 691 692 /* Give the signal return code some randomness */ 693 offset = 0x200 + (get_random_int() & 0x7fc); 694 signal_return_offset = offset; 695 696 /* 697 * Copy signal return handlers into the vector page, and 698 * set sigreturn to be a pointer to these. 699 */ 700 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); 701 702 ptr = (unsigned long)addr + offset; 703 flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); 704 705 return page; 706 } 707 708 /* Defer to generic check */ 709 asmlinkage void addr_limit_check_failed(void) 710 { 711 addr_limit_user_check(); 712 } 713 714 #ifdef CONFIG_DEBUG_RSEQ 715 asmlinkage void do_rseq_syscall(struct pt_regs *regs) 716 { 717 rseq_syscall(regs); 718 } 719 #endif 720