1 /* 2 * linux/arch/arm/kernel/signal.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/config.h> 11 #include <linux/errno.h> 12 #include <linux/signal.h> 13 #include <linux/ptrace.h> 14 #include <linux/personality.h> 15 16 #include <asm/cacheflush.h> 17 #include <asm/ucontext.h> 18 #include <asm/uaccess.h> 19 #include <asm/unistd.h> 20 21 #include "ptrace.h" 22 23 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 24 25 /* 26 * For ARM syscalls, we encode the syscall number into the instruction. 27 */ 28 #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)) 29 #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)) 30 31 /* 32 * For Thumb syscalls, we pass the syscall number via r7. We therefore 33 * need two 16-bit instructions. 34 */ 35 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) 36 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) 37 38 static const unsigned long retcodes[4] = { 39 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 40 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 41 }; 42 43 static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall); 44 45 /* 46 * atomically swap in the new signal mask, and wait for a signal. 47 */ 48 asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs) 49 { 50 sigset_t saveset; 51 52 mask &= _BLOCKABLE; 53 spin_lock_irq(¤t->sighand->siglock); 54 saveset = current->blocked; 55 siginitset(¤t->blocked, mask); 56 recalc_sigpending(); 57 spin_unlock_irq(¤t->sighand->siglock); 58 regs->ARM_r0 = -EINTR; 59 60 while (1) { 61 current->state = TASK_INTERRUPTIBLE; 62 schedule(); 63 if (do_signal(&saveset, regs, 0)) 64 return regs->ARM_r0; 65 } 66 } 67 68 asmlinkage int 69 sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs) 70 { 71 sigset_t saveset, newset; 72 73 /* XXX: Don't preclude handling different sized sigset_t's. */ 74 if (sigsetsize != sizeof(sigset_t)) 75 return -EINVAL; 76 77 if (copy_from_user(&newset, unewset, sizeof(newset))) 78 return -EFAULT; 79 sigdelsetmask(&newset, ~_BLOCKABLE); 80 81 spin_lock_irq(¤t->sighand->siglock); 82 saveset = current->blocked; 83 current->blocked = newset; 84 recalc_sigpending(); 85 spin_unlock_irq(¤t->sighand->siglock); 86 regs->ARM_r0 = -EINTR; 87 88 while (1) { 89 current->state = TASK_INTERRUPTIBLE; 90 schedule(); 91 if (do_signal(&saveset, regs, 0)) 92 return regs->ARM_r0; 93 } 94 } 95 96 asmlinkage int 97 sys_sigaction(int sig, const struct old_sigaction __user *act, 98 struct old_sigaction __user *oact) 99 { 100 struct k_sigaction new_ka, old_ka; 101 int ret; 102 103 if (act) { 104 old_sigset_t mask; 105 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 106 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 107 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) 108 return -EFAULT; 109 __get_user(new_ka.sa.sa_flags, &act->sa_flags); 110 __get_user(mask, &act->sa_mask); 111 siginitset(&new_ka.sa.sa_mask, mask); 112 } 113 114 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 115 116 if (!ret && oact) { 117 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 118 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 119 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) 120 return -EFAULT; 121 __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 122 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); 123 } 124 125 return ret; 126 } 127 128 #ifdef CONFIG_IWMMXT 129 130 /* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */ 131 #define IWMMXT_STORAGE_SIZE (0x98 + 8) 132 #define IWMMXT_MAGIC0 0x12ef842a 133 #define IWMMXT_MAGIC1 0x1c07ca71 134 135 struct iwmmxt_sigframe { 136 unsigned long magic0; 137 unsigned long magic1; 138 unsigned long storage[0x98/4]; 139 }; 140 141 static int page_present(struct mm_struct *mm, void __user *uptr, int wr) 142 { 143 unsigned long addr = (unsigned long)uptr; 144 pgd_t *pgd = pgd_offset(mm, addr); 145 if (pgd_present(*pgd)) { 146 pmd_t *pmd = pmd_offset(pgd, addr); 147 if (pmd_present(*pmd)) { 148 pte_t *pte = pte_offset_map(pmd, addr); 149 return (pte_present(*pte) && (!wr || pte_write(*pte))); 150 } 151 } 152 return 0; 153 } 154 155 static int copy_locked(void __user *uptr, void *kptr, size_t size, int write, 156 void (*copyfn)(void *, void __user *)) 157 { 158 unsigned char v, __user *userptr = uptr; 159 int err = 0; 160 161 do { 162 struct mm_struct *mm; 163 164 if (write) { 165 __put_user_error(0, userptr, err); 166 __put_user_error(0, userptr + size - 1, err); 167 } else { 168 __get_user_error(v, userptr, err); 169 __get_user_error(v, userptr + size - 1, err); 170 } 171 172 if (err) 173 break; 174 175 mm = current->mm; 176 spin_lock(&mm->page_table_lock); 177 if (page_present(mm, userptr, write) && 178 page_present(mm, userptr + size - 1, write)) { 179 copyfn(kptr, uptr); 180 } else 181 err = 1; 182 spin_unlock(&mm->page_table_lock); 183 } while (err); 184 185 return err; 186 } 187 188 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) 189 { 190 int err = 0; 191 192 /* the iWMMXt context must be 64 bit aligned */ 193 WARN_ON((unsigned long)frame & 7); 194 195 __put_user_error(IWMMXT_MAGIC0, &frame->magic0, err); 196 __put_user_error(IWMMXT_MAGIC1, &frame->magic1, err); 197 198 /* 199 * iwmmxt_task_copy() doesn't check user permissions. 200 * Let's do a dummy write on the upper boundary to ensure 201 * access to user mem is OK all way up. 202 */ 203 err |= copy_locked(&frame->storage, current_thread_info(), 204 sizeof(frame->storage), 1, iwmmxt_task_copy); 205 return err; 206 } 207 208 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) 209 { 210 unsigned long magic0, magic1; 211 int err = 0; 212 213 /* the iWMMXt context is 64 bit aligned */ 214 WARN_ON((unsigned long)frame & 7); 215 216 /* 217 * Validate iWMMXt context signature. 218 * Also, iwmmxt_task_restore() doesn't check user permissions. 219 * Let's do a dummy write on the upper boundary to ensure 220 * access to user mem is OK all way up. 221 */ 222 __get_user_error(magic0, &frame->magic0, err); 223 __get_user_error(magic1, &frame->magic1, err); 224 if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1) 225 err = copy_locked(&frame->storage, current_thread_info(), 226 sizeof(frame->storage), 0, iwmmxt_task_restore); 227 return err; 228 } 229 230 #endif 231 232 /* 233 * Auxiliary signal frame. This saves stuff like FP state. 234 * The layout of this structure is not part of the user ABI. 235 */ 236 struct aux_sigframe { 237 #ifdef CONFIG_IWMMXT 238 struct iwmmxt_sigframe iwmmxt; 239 #endif 240 #ifdef CONFIG_VFP 241 union vfp_state vfp; 242 #endif 243 }; 244 245 /* 246 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 247 */ 248 struct sigframe { 249 struct sigcontext sc; 250 unsigned long extramask[_NSIG_WORDS-1]; 251 unsigned long retcode; 252 struct aux_sigframe aux __attribute__((aligned(8))); 253 }; 254 255 struct rt_sigframe { 256 struct siginfo __user *pinfo; 257 void __user *puc; 258 struct siginfo info; 259 struct ucontext uc; 260 unsigned long retcode; 261 struct aux_sigframe aux __attribute__((aligned(8))); 262 }; 263 264 static int 265 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 266 struct aux_sigframe __user *aux) 267 { 268 int err = 0; 269 270 __get_user_error(regs->ARM_r0, &sc->arm_r0, err); 271 __get_user_error(regs->ARM_r1, &sc->arm_r1, err); 272 __get_user_error(regs->ARM_r2, &sc->arm_r2, err); 273 __get_user_error(regs->ARM_r3, &sc->arm_r3, err); 274 __get_user_error(regs->ARM_r4, &sc->arm_r4, err); 275 __get_user_error(regs->ARM_r5, &sc->arm_r5, err); 276 __get_user_error(regs->ARM_r6, &sc->arm_r6, err); 277 __get_user_error(regs->ARM_r7, &sc->arm_r7, err); 278 __get_user_error(regs->ARM_r8, &sc->arm_r8, err); 279 __get_user_error(regs->ARM_r9, &sc->arm_r9, err); 280 __get_user_error(regs->ARM_r10, &sc->arm_r10, err); 281 __get_user_error(regs->ARM_fp, &sc->arm_fp, err); 282 __get_user_error(regs->ARM_ip, &sc->arm_ip, err); 283 __get_user_error(regs->ARM_sp, &sc->arm_sp, err); 284 __get_user_error(regs->ARM_lr, &sc->arm_lr, err); 285 __get_user_error(regs->ARM_pc, &sc->arm_pc, err); 286 __get_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err); 287 288 err |= !valid_user_regs(regs); 289 290 #ifdef CONFIG_IWMMXT 291 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 292 err |= restore_iwmmxt_context(&aux->iwmmxt); 293 #endif 294 #ifdef CONFIG_VFP 295 // if (err == 0) 296 // err |= vfp_restore_state(&aux->vfp); 297 #endif 298 299 return err; 300 } 301 302 asmlinkage int sys_sigreturn(struct pt_regs *regs) 303 { 304 struct sigframe __user *frame; 305 sigset_t set; 306 307 /* Always make any pending restarted system calls return -EINTR */ 308 current_thread_info()->restart_block.fn = do_no_restart_syscall; 309 310 /* 311 * Since we stacked the signal on a 64-bit boundary, 312 * then 'sp' should be word aligned here. If it's 313 * not, then the user is trying to mess with us. 314 */ 315 if (regs->ARM_sp & 7) 316 goto badframe; 317 318 frame = (struct sigframe __user *)regs->ARM_sp; 319 320 if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 321 goto badframe; 322 if (__get_user(set.sig[0], &frame->sc.oldmask) 323 || (_NSIG_WORDS > 1 324 && __copy_from_user(&set.sig[1], &frame->extramask, 325 sizeof(frame->extramask)))) 326 goto badframe; 327 328 sigdelsetmask(&set, ~_BLOCKABLE); 329 spin_lock_irq(¤t->sighand->siglock); 330 current->blocked = set; 331 recalc_sigpending(); 332 spin_unlock_irq(¤t->sighand->siglock); 333 334 if (restore_sigcontext(regs, &frame->sc, &frame->aux)) 335 goto badframe; 336 337 /* Send SIGTRAP if we're single-stepping */ 338 if (current->ptrace & PT_SINGLESTEP) { 339 ptrace_cancel_bpt(current); 340 send_sig(SIGTRAP, current, 1); 341 } 342 343 return regs->ARM_r0; 344 345 badframe: 346 force_sig(SIGSEGV, current); 347 return 0; 348 } 349 350 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) 351 { 352 struct rt_sigframe __user *frame; 353 sigset_t set; 354 355 /* Always make any pending restarted system calls return -EINTR */ 356 current_thread_info()->restart_block.fn = do_no_restart_syscall; 357 358 /* 359 * Since we stacked the signal on a 64-bit boundary, 360 * then 'sp' should be word aligned here. If it's 361 * not, then the user is trying to mess with us. 362 */ 363 if (regs->ARM_sp & 7) 364 goto badframe; 365 366 frame = (struct rt_sigframe __user *)regs->ARM_sp; 367 368 if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 369 goto badframe; 370 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 371 goto badframe; 372 373 sigdelsetmask(&set, ~_BLOCKABLE); 374 spin_lock_irq(¤t->sighand->siglock); 375 current->blocked = set; 376 recalc_sigpending(); 377 spin_unlock_irq(¤t->sighand->siglock); 378 379 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &frame->aux)) 380 goto badframe; 381 382 if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) 383 goto badframe; 384 385 /* Send SIGTRAP if we're single-stepping */ 386 if (current->ptrace & PT_SINGLESTEP) { 387 ptrace_cancel_bpt(current); 388 send_sig(SIGTRAP, current, 1); 389 } 390 391 return regs->ARM_r0; 392 393 badframe: 394 force_sig(SIGSEGV, current); 395 return 0; 396 } 397 398 static int 399 setup_sigcontext(struct sigcontext __user *sc, struct aux_sigframe __user *aux, 400 struct pt_regs *regs, unsigned long mask) 401 { 402 int err = 0; 403 404 __put_user_error(regs->ARM_r0, &sc->arm_r0, err); 405 __put_user_error(regs->ARM_r1, &sc->arm_r1, err); 406 __put_user_error(regs->ARM_r2, &sc->arm_r2, err); 407 __put_user_error(regs->ARM_r3, &sc->arm_r3, err); 408 __put_user_error(regs->ARM_r4, &sc->arm_r4, err); 409 __put_user_error(regs->ARM_r5, &sc->arm_r5, err); 410 __put_user_error(regs->ARM_r6, &sc->arm_r6, err); 411 __put_user_error(regs->ARM_r7, &sc->arm_r7, err); 412 __put_user_error(regs->ARM_r8, &sc->arm_r8, err); 413 __put_user_error(regs->ARM_r9, &sc->arm_r9, err); 414 __put_user_error(regs->ARM_r10, &sc->arm_r10, err); 415 __put_user_error(regs->ARM_fp, &sc->arm_fp, err); 416 __put_user_error(regs->ARM_ip, &sc->arm_ip, err); 417 __put_user_error(regs->ARM_sp, &sc->arm_sp, err); 418 __put_user_error(regs->ARM_lr, &sc->arm_lr, err); 419 __put_user_error(regs->ARM_pc, &sc->arm_pc, err); 420 __put_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err); 421 422 __put_user_error(current->thread.trap_no, &sc->trap_no, err); 423 __put_user_error(current->thread.error_code, &sc->error_code, err); 424 __put_user_error(current->thread.address, &sc->fault_address, err); 425 __put_user_error(mask, &sc->oldmask, err); 426 427 #ifdef CONFIG_IWMMXT 428 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 429 err |= preserve_iwmmxt_context(&aux->iwmmxt); 430 #endif 431 #ifdef CONFIG_VFP 432 // if (err == 0) 433 // err |= vfp_save_state(&aux->vfp); 434 #endif 435 436 return err; 437 } 438 439 static inline void __user * 440 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) 441 { 442 unsigned long sp = regs->ARM_sp; 443 void __user *frame; 444 445 /* 446 * This is the X/Open sanctioned signal stack switching. 447 */ 448 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) 449 sp = current->sas_ss_sp + current->sas_ss_size; 450 451 /* 452 * ATPCS B01 mandates 8-byte alignment 453 */ 454 frame = (void __user *)((sp - framesize) & ~7); 455 456 /* 457 * Check that we can actually write to the signal frame. 458 */ 459 if (!access_ok(VERIFY_WRITE, frame, framesize)) 460 frame = NULL; 461 462 return frame; 463 } 464 465 static int 466 setup_return(struct pt_regs *regs, struct k_sigaction *ka, 467 unsigned long __user *rc, void __user *frame, int usig) 468 { 469 unsigned long handler = (unsigned long)ka->sa.sa_handler; 470 unsigned long retcode; 471 int thumb = 0; 472 unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; 473 474 /* 475 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 476 */ 477 if (ka->sa.sa_flags & SA_THIRTYTWO) 478 cpsr = (cpsr & ~MODE_MASK) | USR_MODE; 479 480 #ifdef CONFIG_ARM_THUMB 481 if (elf_hwcap & HWCAP_THUMB) { 482 /* 483 * The LSB of the handler determines if we're going to 484 * be using THUMB or ARM mode for this signal handler. 485 */ 486 thumb = handler & 1; 487 488 if (thumb) 489 cpsr |= PSR_T_BIT; 490 else 491 cpsr &= ~PSR_T_BIT; 492 } 493 #endif 494 495 if (ka->sa.sa_flags & SA_RESTORER) { 496 retcode = (unsigned long)ka->sa.sa_restorer; 497 } else { 498 unsigned int idx = thumb; 499 500 if (ka->sa.sa_flags & SA_SIGINFO) 501 idx += 2; 502 503 if (__put_user(retcodes[idx], rc)) 504 return 1; 505 506 /* 507 * Ensure that the instruction cache sees 508 * the return code written onto the stack. 509 */ 510 flush_icache_range((unsigned long)rc, 511 (unsigned long)(rc + 1)); 512 513 retcode = ((unsigned long)rc) + thumb; 514 } 515 516 regs->ARM_r0 = usig; 517 regs->ARM_sp = (unsigned long)frame; 518 regs->ARM_lr = retcode; 519 regs->ARM_pc = handler; 520 regs->ARM_cpsr = cpsr; 521 522 return 0; 523 } 524 525 static int 526 setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) 527 { 528 struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); 529 int err = 0; 530 531 if (!frame) 532 return 1; 533 534 err |= setup_sigcontext(&frame->sc, &frame->aux, regs, set->sig[0]); 535 536 if (_NSIG_WORDS > 1) { 537 err |= __copy_to_user(frame->extramask, &set->sig[1], 538 sizeof(frame->extramask)); 539 } 540 541 if (err == 0) 542 err = setup_return(regs, ka, &frame->retcode, frame, usig); 543 544 return err; 545 } 546 547 static int 548 setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, 549 sigset_t *set, struct pt_regs *regs) 550 { 551 struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); 552 stack_t stack; 553 int err = 0; 554 555 if (!frame) 556 return 1; 557 558 __put_user_error(&frame->info, &frame->pinfo, err); 559 __put_user_error(&frame->uc, &frame->puc, err); 560 err |= copy_siginfo_to_user(&frame->info, info); 561 562 __put_user_error(0, &frame->uc.uc_flags, err); 563 __put_user_error(NULL, &frame->uc.uc_link, err); 564 565 memset(&stack, 0, sizeof(stack)); 566 stack.ss_sp = (void __user *)current->sas_ss_sp; 567 stack.ss_flags = sas_ss_flags(regs->ARM_sp); 568 stack.ss_size = current->sas_ss_size; 569 err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); 570 571 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->aux, 572 regs, set->sig[0]); 573 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 574 575 if (err == 0) 576 err = setup_return(regs, ka, &frame->retcode, frame, usig); 577 578 if (err == 0) { 579 /* 580 * For realtime signals we must also set the second and third 581 * arguments for the signal handler. 582 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 583 */ 584 regs->ARM_r1 = (unsigned long)&frame->info; 585 regs->ARM_r2 = (unsigned long)&frame->uc; 586 } 587 588 return err; 589 } 590 591 static inline void restart_syscall(struct pt_regs *regs) 592 { 593 regs->ARM_r0 = regs->ARM_ORIG_r0; 594 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; 595 } 596 597 /* 598 * OK, we're invoking a handler 599 */ 600 static void 601 handle_signal(unsigned long sig, struct k_sigaction *ka, 602 siginfo_t *info, sigset_t *oldset, 603 struct pt_regs * regs, int syscall) 604 { 605 struct thread_info *thread = current_thread_info(); 606 struct task_struct *tsk = current; 607 int usig = sig; 608 int ret; 609 610 /* 611 * If we were from a system call, check for system call restarting... 612 */ 613 if (syscall) { 614 switch (regs->ARM_r0) { 615 case -ERESTART_RESTARTBLOCK: 616 case -ERESTARTNOHAND: 617 regs->ARM_r0 = -EINTR; 618 break; 619 case -ERESTARTSYS: 620 if (!(ka->sa.sa_flags & SA_RESTART)) { 621 regs->ARM_r0 = -EINTR; 622 break; 623 } 624 /* fallthrough */ 625 case -ERESTARTNOINTR: 626 restart_syscall(regs); 627 } 628 } 629 630 /* 631 * translate the signal 632 */ 633 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) 634 usig = thread->exec_domain->signal_invmap[usig]; 635 636 /* 637 * Set up the stack frame 638 */ 639 if (ka->sa.sa_flags & SA_SIGINFO) 640 ret = setup_rt_frame(usig, ka, info, oldset, regs); 641 else 642 ret = setup_frame(usig, ka, oldset, regs); 643 644 /* 645 * Check that the resulting registers are actually sane. 646 */ 647 ret |= !valid_user_regs(regs); 648 649 /* 650 * Block the signal if we were unsuccessful. 651 */ 652 if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) { 653 spin_lock_irq(&tsk->sighand->siglock); 654 sigorsets(&tsk->blocked, &tsk->blocked, 655 &ka->sa.sa_mask); 656 sigaddset(&tsk->blocked, sig); 657 recalc_sigpending(); 658 spin_unlock_irq(&tsk->sighand->siglock); 659 } 660 661 if (ret == 0) 662 return; 663 664 force_sigsegv(sig, tsk); 665 } 666 667 /* 668 * Note that 'init' is a special process: it doesn't get signals it doesn't 669 * want to handle. Thus you cannot kill init even with a SIGKILL even by 670 * mistake. 671 * 672 * Note that we go through the signals twice: once to check the signals that 673 * the kernel can handle, and then we build all the user-level signal handling 674 * stack-frames in one go after that. 675 */ 676 static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) 677 { 678 struct k_sigaction ka; 679 siginfo_t info; 680 int signr; 681 682 /* 683 * We want the common case to go fast, which 684 * is why we may in certain cases get here from 685 * kernel mode. Just return without doing anything 686 * if so. 687 */ 688 if (!user_mode(regs)) 689 return 0; 690 691 if (try_to_freeze(0)) 692 goto no_signal; 693 694 if (current->ptrace & PT_SINGLESTEP) 695 ptrace_cancel_bpt(current); 696 697 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 698 if (signr > 0) { 699 handle_signal(signr, &ka, &info, oldset, regs, syscall); 700 if (current->ptrace & PT_SINGLESTEP) 701 ptrace_set_bpt(current); 702 return 1; 703 } 704 705 no_signal: 706 /* 707 * No signal to deliver to the process - restart the syscall. 708 */ 709 if (syscall) { 710 if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { 711 if (thumb_mode(regs)) { 712 regs->ARM_r7 = __NR_restart_syscall; 713 regs->ARM_pc -= 2; 714 } else { 715 u32 __user *usp; 716 717 regs->ARM_sp -= 12; 718 usp = (u32 __user *)regs->ARM_sp; 719 720 put_user(regs->ARM_pc, &usp[0]); 721 /* swi __NR_restart_syscall */ 722 put_user(0xef000000 | __NR_restart_syscall, &usp[1]); 723 /* ldr pc, [sp], #12 */ 724 put_user(0xe49df00c, &usp[2]); 725 726 flush_icache_range((unsigned long)usp, 727 (unsigned long)(usp + 3)); 728 729 regs->ARM_pc = regs->ARM_sp + 4; 730 } 731 } 732 if (regs->ARM_r0 == -ERESTARTNOHAND || 733 regs->ARM_r0 == -ERESTARTSYS || 734 regs->ARM_r0 == -ERESTARTNOINTR) { 735 restart_syscall(regs); 736 } 737 } 738 if (current->ptrace & PT_SINGLESTEP) 739 ptrace_set_bpt(current); 740 return 0; 741 } 742 743 asmlinkage void 744 do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) 745 { 746 if (thread_flags & _TIF_SIGPENDING) 747 do_signal(¤t->blocked, regs, syscall); 748 } 749