1 /* 2 * linux/arch/arm/kernel/signal.c 3 * 4 * Copyright (C) 1995-2009 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/errno.h> 11 #include <linux/signal.h> 12 #include <linux/personality.h> 13 #include <linux/uaccess.h> 14 #include <linux/tracehook.h> 15 16 #include <asm/elf.h> 17 #include <asm/cacheflush.h> 18 #include <asm/ucontext.h> 19 #include <asm/unistd.h> 20 #include <asm/vfp.h> 21 22 #include "signal.h" 23 24 /* 25 * For ARM syscalls, we encode the syscall number into the instruction. 26 */ 27 #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) 28 #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) 29 30 /* 31 * With EABI, the syscall number has to be loaded into r7. 32 */ 33 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE)) 34 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) 35 36 /* 37 * For Thumb syscalls, we pass the syscall number via r7. We therefore 38 * need two 16-bit instructions. 39 */ 40 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) 41 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) 42 43 const unsigned long sigreturn_codes[7] = { 44 MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 45 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, 46 }; 47 48 /* 49 * atomically swap in the new signal mask, and wait for a signal. 50 */ 51 asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) 52 { 53 sigset_t blocked; 54 siginitset(&blocked, mask); 55 return sigsuspend(&blocked); 56 } 57 58 asmlinkage int 59 sys_sigaction(int sig, const struct old_sigaction __user *act, 60 struct old_sigaction __user *oact) 61 { 62 struct k_sigaction new_ka, old_ka; 63 int ret; 64 65 if (act) { 66 old_sigset_t mask; 67 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 68 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 69 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || 70 __get_user(new_ka.sa.sa_flags, &act->sa_flags) || 71 __get_user(mask, &act->sa_mask)) 72 return -EFAULT; 73 siginitset(&new_ka.sa.sa_mask, mask); 74 } 75 76 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 77 78 if (!ret && oact) { 79 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 80 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 81 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || 82 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 83 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 84 return -EFAULT; 85 } 86 87 return ret; 88 } 89 90 #ifdef CONFIG_CRUNCH 91 static int preserve_crunch_context(struct crunch_sigframe __user *frame) 92 { 93 char kbuf[sizeof(*frame) + 8]; 94 struct crunch_sigframe *kframe; 95 96 /* the crunch context must be 64 bit aligned */ 97 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 98 kframe->magic = CRUNCH_MAGIC; 99 kframe->size = CRUNCH_STORAGE_SIZE; 100 crunch_task_copy(current_thread_info(), &kframe->storage); 101 return __copy_to_user(frame, kframe, sizeof(*frame)); 102 } 103 104 static int restore_crunch_context(struct crunch_sigframe __user *frame) 105 { 106 char kbuf[sizeof(*frame) + 8]; 107 struct crunch_sigframe *kframe; 108 109 /* the crunch context must be 64 bit aligned */ 110 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); 111 if (__copy_from_user(kframe, frame, sizeof(*frame))) 112 return -1; 113 if (kframe->magic != CRUNCH_MAGIC || 114 kframe->size != CRUNCH_STORAGE_SIZE) 115 return -1; 116 crunch_task_restore(current_thread_info(), &kframe->storage); 117 return 0; 118 } 119 #endif 120 121 #ifdef CONFIG_IWMMXT 122 123 static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) 124 { 125 char kbuf[sizeof(*frame) + 8]; 126 struct iwmmxt_sigframe *kframe; 127 128 /* the iWMMXt context must be 64 bit aligned */ 129 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 130 kframe->magic = IWMMXT_MAGIC; 131 kframe->size = IWMMXT_STORAGE_SIZE; 132 iwmmxt_task_copy(current_thread_info(), &kframe->storage); 133 return __copy_to_user(frame, kframe, sizeof(*frame)); 134 } 135 136 static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) 137 { 138 char kbuf[sizeof(*frame) + 8]; 139 struct iwmmxt_sigframe *kframe; 140 141 /* the iWMMXt context must be 64 bit aligned */ 142 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 143 if (__copy_from_user(kframe, frame, sizeof(*frame))) 144 return -1; 145 if (kframe->magic != IWMMXT_MAGIC || 146 kframe->size != IWMMXT_STORAGE_SIZE) 147 return -1; 148 iwmmxt_task_restore(current_thread_info(), &kframe->storage); 149 return 0; 150 } 151 152 #endif 153 154 #ifdef CONFIG_VFP 155 156 static int preserve_vfp_context(struct vfp_sigframe __user *frame) 157 { 158 const unsigned long magic = VFP_MAGIC; 159 const unsigned long size = VFP_STORAGE_SIZE; 160 int err = 0; 161 162 __put_user_error(magic, &frame->magic, err); 163 __put_user_error(size, &frame->size, err); 164 165 if (err) 166 return -EFAULT; 167 168 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); 169 } 170 171 static int restore_vfp_context(struct vfp_sigframe __user *frame) 172 { 173 unsigned long magic; 174 unsigned long size; 175 int err = 0; 176 177 __get_user_error(magic, &frame->magic, err); 178 __get_user_error(size, &frame->size, err); 179 180 if (err) 181 return -EFAULT; 182 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) 183 return -EINVAL; 184 185 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); 186 } 187 188 #endif 189 190 /* 191 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 192 */ 193 struct sigframe { 194 struct ucontext uc; 195 unsigned long retcode[2]; 196 }; 197 198 struct rt_sigframe { 199 struct siginfo info; 200 struct sigframe sig; 201 }; 202 203 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) 204 { 205 struct aux_sigframe __user *aux; 206 sigset_t set; 207 int err; 208 209 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 210 if (err == 0) 211 set_current_blocked(&set); 212 213 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); 214 __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); 215 __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); 216 __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); 217 __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); 218 __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); 219 __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); 220 __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); 221 __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); 222 __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); 223 __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); 224 __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); 225 __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); 226 __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); 227 __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); 228 __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); 229 __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); 230 231 err |= !valid_user_regs(regs); 232 233 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 234 #ifdef CONFIG_CRUNCH 235 if (err == 0) 236 err |= restore_crunch_context(&aux->crunch); 237 #endif 238 #ifdef CONFIG_IWMMXT 239 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 240 err |= restore_iwmmxt_context(&aux->iwmmxt); 241 #endif 242 #ifdef CONFIG_VFP 243 if (err == 0) 244 err |= restore_vfp_context(&aux->vfp); 245 #endif 246 247 return err; 248 } 249 250 asmlinkage int sys_sigreturn(struct pt_regs *regs) 251 { 252 struct sigframe __user *frame; 253 254 /* Always make any pending restarted system calls return -EINTR */ 255 current_thread_info()->restart_block.fn = do_no_restart_syscall; 256 257 /* 258 * Since we stacked the signal on a 64-bit boundary, 259 * then 'sp' should be word aligned here. If it's 260 * not, then the user is trying to mess with us. 261 */ 262 if (regs->ARM_sp & 7) 263 goto badframe; 264 265 frame = (struct sigframe __user *)regs->ARM_sp; 266 267 if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 268 goto badframe; 269 270 if (restore_sigframe(regs, frame)) 271 goto badframe; 272 273 return regs->ARM_r0; 274 275 badframe: 276 force_sig(SIGSEGV, current); 277 return 0; 278 } 279 280 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) 281 { 282 struct rt_sigframe __user *frame; 283 284 /* Always make any pending restarted system calls return -EINTR */ 285 current_thread_info()->restart_block.fn = do_no_restart_syscall; 286 287 /* 288 * Since we stacked the signal on a 64-bit boundary, 289 * then 'sp' should be word aligned here. If it's 290 * not, then the user is trying to mess with us. 291 */ 292 if (regs->ARM_sp & 7) 293 goto badframe; 294 295 frame = (struct rt_sigframe __user *)regs->ARM_sp; 296 297 if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) 298 goto badframe; 299 300 if (restore_sigframe(regs, &frame->sig)) 301 goto badframe; 302 303 if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) 304 goto badframe; 305 306 return regs->ARM_r0; 307 308 badframe: 309 force_sig(SIGSEGV, current); 310 return 0; 311 } 312 313 static int 314 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) 315 { 316 struct aux_sigframe __user *aux; 317 int err = 0; 318 319 __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); 320 __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); 321 __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); 322 __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); 323 __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); 324 __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); 325 __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); 326 __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); 327 __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); 328 __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); 329 __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); 330 __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); 331 __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); 332 __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); 333 __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); 334 __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); 335 __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); 336 337 __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); 338 __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); 339 __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); 340 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); 341 342 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 343 344 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 345 #ifdef CONFIG_CRUNCH 346 if (err == 0) 347 err |= preserve_crunch_context(&aux->crunch); 348 #endif 349 #ifdef CONFIG_IWMMXT 350 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 351 err |= preserve_iwmmxt_context(&aux->iwmmxt); 352 #endif 353 #ifdef CONFIG_VFP 354 if (err == 0) 355 err |= preserve_vfp_context(&aux->vfp); 356 #endif 357 __put_user_error(0, &aux->end_magic, err); 358 359 return err; 360 } 361 362 static inline void __user * 363 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) 364 { 365 unsigned long sp = regs->ARM_sp; 366 void __user *frame; 367 368 /* 369 * This is the X/Open sanctioned signal stack switching. 370 */ 371 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) 372 sp = current->sas_ss_sp + current->sas_ss_size; 373 374 /* 375 * ATPCS B01 mandates 8-byte alignment 376 */ 377 frame = (void __user *)((sp - framesize) & ~7); 378 379 /* 380 * Check that we can actually write to the signal frame. 381 */ 382 if (!access_ok(VERIFY_WRITE, frame, framesize)) 383 frame = NULL; 384 385 return frame; 386 } 387 388 static int 389 setup_return(struct pt_regs *regs, struct k_sigaction *ka, 390 unsigned long __user *rc, void __user *frame, int usig) 391 { 392 unsigned long handler = (unsigned long)ka->sa.sa_handler; 393 unsigned long retcode; 394 int thumb = 0; 395 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); 396 397 cpsr |= PSR_ENDSTATE; 398 399 /* 400 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 401 */ 402 if (ka->sa.sa_flags & SA_THIRTYTWO) 403 cpsr = (cpsr & ~MODE_MASK) | USR_MODE; 404 405 #ifdef CONFIG_ARM_THUMB 406 if (elf_hwcap & HWCAP_THUMB) { 407 /* 408 * The LSB of the handler determines if we're going to 409 * be using THUMB or ARM mode for this signal handler. 410 */ 411 thumb = handler & 1; 412 413 if (thumb) { 414 cpsr |= PSR_T_BIT; 415 #if __LINUX_ARM_ARCH__ >= 7 416 /* clear the If-Then Thumb-2 execution state */ 417 cpsr &= ~PSR_IT_MASK; 418 #endif 419 } else 420 cpsr &= ~PSR_T_BIT; 421 } 422 #endif 423 424 if (ka->sa.sa_flags & SA_RESTORER) { 425 retcode = (unsigned long)ka->sa.sa_restorer; 426 } else { 427 unsigned int idx = thumb << 1; 428 429 if (ka->sa.sa_flags & SA_SIGINFO) 430 idx += 3; 431 432 if (__put_user(sigreturn_codes[idx], rc) || 433 __put_user(sigreturn_codes[idx+1], rc+1)) 434 return 1; 435 436 if (cpsr & MODE32_BIT) { 437 /* 438 * 32-bit code can use the new high-page 439 * signal return code support. 440 */ 441 retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; 442 } else { 443 /* 444 * Ensure that the instruction cache sees 445 * the return code written onto the stack. 446 */ 447 flush_icache_range((unsigned long)rc, 448 (unsigned long)(rc + 2)); 449 450 retcode = ((unsigned long)rc) + thumb; 451 } 452 } 453 454 regs->ARM_r0 = usig; 455 regs->ARM_sp = (unsigned long)frame; 456 regs->ARM_lr = retcode; 457 regs->ARM_pc = handler; 458 regs->ARM_cpsr = cpsr; 459 460 return 0; 461 } 462 463 static int 464 setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) 465 { 466 struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); 467 int err = 0; 468 469 if (!frame) 470 return 1; 471 472 /* 473 * Set uc.uc_flags to a value which sc.trap_no would never have. 474 */ 475 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); 476 477 err |= setup_sigframe(frame, regs, set); 478 if (err == 0) 479 err = setup_return(regs, ka, frame->retcode, frame, usig); 480 481 return err; 482 } 483 484 static int 485 setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, 486 sigset_t *set, struct pt_regs *regs) 487 { 488 struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); 489 stack_t stack; 490 int err = 0; 491 492 if (!frame) 493 return 1; 494 495 err |= copy_siginfo_to_user(&frame->info, info); 496 497 __put_user_error(0, &frame->sig.uc.uc_flags, err); 498 __put_user_error(NULL, &frame->sig.uc.uc_link, err); 499 500 memset(&stack, 0, sizeof(stack)); 501 stack.ss_sp = (void __user *)current->sas_ss_sp; 502 stack.ss_flags = sas_ss_flags(regs->ARM_sp); 503 stack.ss_size = current->sas_ss_size; 504 err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); 505 506 err |= setup_sigframe(&frame->sig, regs, set); 507 if (err == 0) 508 err = setup_return(regs, ka, frame->sig.retcode, frame, usig); 509 510 if (err == 0) { 511 /* 512 * For realtime signals we must also set the second and third 513 * arguments for the signal handler. 514 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 515 */ 516 regs->ARM_r1 = (unsigned long)&frame->info; 517 regs->ARM_r2 = (unsigned long)&frame->sig.uc; 518 } 519 520 return err; 521 } 522 523 /* 524 * OK, we're invoking a handler 525 */ 526 static void 527 handle_signal(unsigned long sig, struct k_sigaction *ka, 528 siginfo_t *info, struct pt_regs *regs) 529 { 530 struct thread_info *thread = current_thread_info(); 531 struct task_struct *tsk = current; 532 sigset_t *oldset = sigmask_to_save(); 533 int usig = sig; 534 int ret; 535 536 /* 537 * translate the signal 538 */ 539 if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) 540 usig = thread->exec_domain->signal_invmap[usig]; 541 542 /* 543 * Set up the stack frame 544 */ 545 if (ka->sa.sa_flags & SA_SIGINFO) 546 ret = setup_rt_frame(usig, ka, info, oldset, regs); 547 else 548 ret = setup_frame(usig, ka, oldset, regs); 549 550 /* 551 * Check that the resulting registers are actually sane. 552 */ 553 ret |= !valid_user_regs(regs); 554 555 if (ret != 0) { 556 force_sigsegv(sig, tsk); 557 return; 558 } 559 signal_delivered(sig, info, ka, regs, 0); 560 } 561 562 /* 563 * Note that 'init' is a special process: it doesn't get signals it doesn't 564 * want to handle. Thus you cannot kill init even with a SIGKILL even by 565 * mistake. 566 * 567 * Note that we go through the signals twice: once to check the signals that 568 * the kernel can handle, and then we build all the user-level signal handling 569 * stack-frames in one go after that. 570 */ 571 static int do_signal(struct pt_regs *regs, int syscall) 572 { 573 unsigned int retval = 0, continue_addr = 0, restart_addr = 0; 574 struct k_sigaction ka; 575 siginfo_t info; 576 int signr; 577 int restart = 0; 578 579 /* 580 * If we were from a system call, check for system call restarting... 581 */ 582 if (syscall) { 583 continue_addr = regs->ARM_pc; 584 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); 585 retval = regs->ARM_r0; 586 587 /* 588 * Prepare for system call restart. We do this here so that a 589 * debugger will see the already changed PSW. 590 */ 591 switch (retval) { 592 case -ERESTART_RESTARTBLOCK: 593 restart -= 2; 594 case -ERESTARTNOHAND: 595 case -ERESTARTSYS: 596 case -ERESTARTNOINTR: 597 restart++; 598 regs->ARM_r0 = regs->ARM_ORIG_r0; 599 regs->ARM_pc = restart_addr; 600 break; 601 } 602 } 603 604 /* 605 * Get the signal to deliver. When running under ptrace, at this 606 * point the debugger may change all our registers ... 607 */ 608 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 609 /* 610 * Depending on the signal settings we may need to revert the 611 * decision to restart the system call. But skip this if a 612 * debugger has chosen to restart at a different PC. 613 */ 614 if (regs->ARM_pc != restart_addr) 615 restart = 0; 616 if (signr > 0) { 617 if (unlikely(restart)) { 618 if (retval == -ERESTARTNOHAND || 619 retval == -ERESTART_RESTARTBLOCK 620 || (retval == -ERESTARTSYS 621 && !(ka.sa.sa_flags & SA_RESTART))) { 622 regs->ARM_r0 = -EINTR; 623 regs->ARM_pc = continue_addr; 624 } 625 } 626 627 handle_signal(signr, &ka, &info, regs); 628 return 0; 629 } 630 631 restore_saved_sigmask(); 632 if (unlikely(restart)) 633 regs->ARM_pc = continue_addr; 634 return restart; 635 } 636 637 asmlinkage int 638 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) 639 { 640 do { 641 if (likely(thread_flags & _TIF_NEED_RESCHED)) { 642 schedule(); 643 } else { 644 if (unlikely(!user_mode(regs))) 645 return 0; 646 local_irq_enable(); 647 if (thread_flags & _TIF_SIGPENDING) { 648 int restart = do_signal(regs, syscall); 649 if (unlikely(restart)) { 650 /* 651 * Restart without handlers. 652 * Deal with it without leaving 653 * the kernel space. 654 */ 655 return restart; 656 } 657 syscall = 0; 658 } else { 659 clear_thread_flag(TIF_NOTIFY_RESUME); 660 tracehook_notify_resume(regs); 661 } 662 } 663 local_irq_disable(); 664 thread_flags = current_thread_info()->flags; 665 } while (thread_flags & _TIF_WORK_MASK); 666 return 0; 667 } 668