1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 */ 10 #include <linux/cache.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/personality.h> 14 #include <linux/smp.h> 15 #include <linux/kernel.h> 16 #include <linux/signal.h> 17 #include <linux/errno.h> 18 #include <linux/wait.h> 19 #include <linux/ptrace.h> 20 #include <linux/unistd.h> 21 #include <linux/compiler.h> 22 #include <linux/syscalls.h> 23 #include <linux/uaccess.h> 24 #include <linux/tracehook.h> 25 26 #include <asm/abi.h> 27 #include <asm/asm.h> 28 #include <linux/bitops.h> 29 #include <asm/cacheflush.h> 30 #include <asm/fpu.h> 31 #include <asm/sim.h> 32 #include <asm/ucontext.h> 33 #include <asm/cpu-features.h> 34 #include <asm/war.h> 35 #include <asm/vdso.h> 36 37 #include "signal-common.h" 38 39 static int (*save_fp_context)(struct sigcontext __user *sc); 40 static int (*restore_fp_context)(struct sigcontext __user *sc); 41 42 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 43 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 44 45 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 46 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 47 48 struct sigframe { 49 u32 sf_ass[4]; /* argument save space for o32 */ 50 u32 sf_pad[2]; /* Was: signal trampoline */ 51 struct sigcontext sf_sc; 52 sigset_t sf_mask; 53 }; 54 55 struct rt_sigframe { 56 u32 rs_ass[4]; /* argument save space for o32 */ 57 u32 rs_pad[2]; /* Was: signal trampoline */ 58 struct siginfo rs_info; 59 struct ucontext rs_uc; 60 }; 61 62 /* 63 * Helper routines 64 */ 65 static int protected_save_fp_context(struct sigcontext __user *sc) 66 { 67 int err; 68 while (1) { 69 lock_fpu_owner(); 70 own_fpu_inatomic(1); 71 err = save_fp_context(sc); /* this might fail */ 72 unlock_fpu_owner(); 73 if (likely(!err)) 74 break; 75 /* touch the sigcontext and try again */ 76 err = __put_user(0, &sc->sc_fpregs[0]) | 77 __put_user(0, &sc->sc_fpregs[31]) | 78 __put_user(0, &sc->sc_fpc_csr); 79 if (err) 80 break; /* really bad sigcontext */ 81 } 82 return err; 83 } 84 85 static int protected_restore_fp_context(struct sigcontext __user *sc) 86 { 87 int err, tmp __maybe_unused; 88 while (1) { 89 lock_fpu_owner(); 90 own_fpu_inatomic(0); 91 err = restore_fp_context(sc); /* this might fail */ 92 unlock_fpu_owner(); 93 if (likely(!err)) 94 break; 95 /* touch the sigcontext and try again */ 96 err = __get_user(tmp, &sc->sc_fpregs[0]) | 97 __get_user(tmp, &sc->sc_fpregs[31]) | 98 __get_user(tmp, &sc->sc_fpc_csr); 99 if (err) 100 break; /* really bad sigcontext */ 101 } 102 return err; 103 } 104 105 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 106 { 107 int err = 0; 108 int i; 109 unsigned int used_math; 110 111 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 112 113 err |= __put_user(0, &sc->sc_regs[0]); 114 for (i = 1; i < 32; i++) 115 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 116 117 #ifdef CONFIG_CPU_HAS_SMARTMIPS 118 err |= __put_user(regs->acx, &sc->sc_acx); 119 #endif 120 err |= __put_user(regs->hi, &sc->sc_mdhi); 121 err |= __put_user(regs->lo, &sc->sc_mdlo); 122 if (cpu_has_dsp) { 123 err |= __put_user(mfhi1(), &sc->sc_hi1); 124 err |= __put_user(mflo1(), &sc->sc_lo1); 125 err |= __put_user(mfhi2(), &sc->sc_hi2); 126 err |= __put_user(mflo2(), &sc->sc_lo2); 127 err |= __put_user(mfhi3(), &sc->sc_hi3); 128 err |= __put_user(mflo3(), &sc->sc_lo3); 129 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 130 } 131 132 used_math = !!used_math(); 133 err |= __put_user(used_math, &sc->sc_used_math); 134 135 if (used_math) { 136 /* 137 * Save FPU state to signal context. Signal handler 138 * will "inherit" current FPU state. 139 */ 140 err |= protected_save_fp_context(sc); 141 } 142 return err; 143 } 144 145 int fpcsr_pending(unsigned int __user *fpcsr) 146 { 147 int err, sig = 0; 148 unsigned int csr, enabled; 149 150 err = __get_user(csr, fpcsr); 151 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 152 /* 153 * If the signal handler set some FPU exceptions, clear it and 154 * send SIGFPE. 155 */ 156 if (csr & enabled) { 157 csr &= ~enabled; 158 err |= __put_user(csr, fpcsr); 159 sig = SIGFPE; 160 } 161 return err ?: sig; 162 } 163 164 static int 165 check_and_restore_fp_context(struct sigcontext __user *sc) 166 { 167 int err, sig; 168 169 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 170 if (err > 0) 171 err = 0; 172 err |= protected_restore_fp_context(sc); 173 return err ?: sig; 174 } 175 176 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 177 { 178 unsigned int used_math; 179 unsigned long treg; 180 int err = 0; 181 int i; 182 183 /* Always make any pending restarted system calls return -EINTR */ 184 current_thread_info()->restart_block.fn = do_no_restart_syscall; 185 186 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 187 188 #ifdef CONFIG_CPU_HAS_SMARTMIPS 189 err |= __get_user(regs->acx, &sc->sc_acx); 190 #endif 191 err |= __get_user(regs->hi, &sc->sc_mdhi); 192 err |= __get_user(regs->lo, &sc->sc_mdlo); 193 if (cpu_has_dsp) { 194 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 195 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 196 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 197 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 198 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 199 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 200 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 201 } 202 203 for (i = 1; i < 32; i++) 204 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 205 206 err |= __get_user(used_math, &sc->sc_used_math); 207 conditional_used_math(used_math); 208 209 if (used_math) { 210 /* restore fpu context if we have used it before */ 211 if (!err) 212 err = check_and_restore_fp_context(sc); 213 } else { 214 /* signal handler may have used FPU. Give it up. */ 215 lose_fpu(0); 216 } 217 218 return err; 219 } 220 221 void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 222 size_t frame_size) 223 { 224 unsigned long sp; 225 226 /* Default to using normal stack */ 227 sp = regs->regs[29]; 228 229 /* 230 * FPU emulator may have it's own trampoline active just 231 * above the user stack, 16-bytes before the next lowest 232 * 16 byte boundary. Try to avoid trashing it. 233 */ 234 sp -= 32; 235 236 /* This is the X/Open sanctioned signal stack switching. */ 237 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) 238 sp = current->sas_ss_sp + current->sas_ss_size; 239 240 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 241 } 242 243 /* 244 * Atomically swap in the new signal mask, and wait for a signal. 245 */ 246 247 #ifdef CONFIG_TRAD_SIGNALS 248 asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) 249 { 250 sigset_t newset; 251 sigset_t __user *uset; 252 253 uset = (sigset_t __user *) regs.regs[4]; 254 if (copy_from_user(&newset, uset, sizeof(sigset_t))) 255 return -EFAULT; 256 sigdelsetmask(&newset, ~_BLOCKABLE); 257 258 spin_lock_irq(¤t->sighand->siglock); 259 current->saved_sigmask = current->blocked; 260 current->blocked = newset; 261 recalc_sigpending(); 262 spin_unlock_irq(¤t->sighand->siglock); 263 264 current->state = TASK_INTERRUPTIBLE; 265 schedule(); 266 set_thread_flag(TIF_RESTORE_SIGMASK); 267 return -ERESTARTNOHAND; 268 } 269 #endif 270 271 asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) 272 { 273 sigset_t newset; 274 sigset_t __user *unewset; 275 size_t sigsetsize; 276 277 /* XXX Don't preclude handling different sized sigset_t's. */ 278 sigsetsize = regs.regs[5]; 279 if (sigsetsize != sizeof(sigset_t)) 280 return -EINVAL; 281 282 unewset = (sigset_t __user *) regs.regs[4]; 283 if (copy_from_user(&newset, unewset, sizeof(newset))) 284 return -EFAULT; 285 sigdelsetmask(&newset, ~_BLOCKABLE); 286 287 spin_lock_irq(¤t->sighand->siglock); 288 current->saved_sigmask = current->blocked; 289 current->blocked = newset; 290 recalc_sigpending(); 291 spin_unlock_irq(¤t->sighand->siglock); 292 293 current->state = TASK_INTERRUPTIBLE; 294 schedule(); 295 set_thread_flag(TIF_RESTORE_SIGMASK); 296 return -ERESTARTNOHAND; 297 } 298 299 #ifdef CONFIG_TRAD_SIGNALS 300 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 301 struct sigaction __user *, oact) 302 { 303 struct k_sigaction new_ka, old_ka; 304 int ret; 305 int err = 0; 306 307 if (act) { 308 old_sigset_t mask; 309 310 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 311 return -EFAULT; 312 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 313 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 314 err |= __get_user(mask, &act->sa_mask.sig[0]); 315 if (err) 316 return -EFAULT; 317 318 siginitset(&new_ka.sa.sa_mask, mask); 319 } 320 321 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 322 323 if (!ret && oact) { 324 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 325 return -EFAULT; 326 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 327 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 328 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 329 err |= __put_user(0, &oact->sa_mask.sig[1]); 330 err |= __put_user(0, &oact->sa_mask.sig[2]); 331 err |= __put_user(0, &oact->sa_mask.sig[3]); 332 if (err) 333 return -EFAULT; 334 } 335 336 return ret; 337 } 338 #endif 339 340 asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs) 341 { 342 const stack_t __user *uss = (const stack_t __user *) regs.regs[4]; 343 stack_t __user *uoss = (stack_t __user *) regs.regs[5]; 344 unsigned long usp = regs.regs[29]; 345 346 return do_sigaltstack(uss, uoss, usp); 347 } 348 349 #ifdef CONFIG_TRAD_SIGNALS 350 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) 351 { 352 struct sigframe __user *frame; 353 sigset_t blocked; 354 int sig; 355 356 frame = (struct sigframe __user *) regs.regs[29]; 357 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 358 goto badframe; 359 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 360 goto badframe; 361 362 sigdelsetmask(&blocked, ~_BLOCKABLE); 363 spin_lock_irq(¤t->sighand->siglock); 364 current->blocked = blocked; 365 recalc_sigpending(); 366 spin_unlock_irq(¤t->sighand->siglock); 367 368 sig = restore_sigcontext(®s, &frame->sf_sc); 369 if (sig < 0) 370 goto badframe; 371 else if (sig) 372 force_sig(sig, current); 373 374 /* 375 * Don't let your children do this ... 376 */ 377 __asm__ __volatile__( 378 "move\t$29, %0\n\t" 379 "j\tsyscall_exit" 380 :/* no outputs */ 381 :"r" (®s)); 382 /* Unreached */ 383 384 badframe: 385 force_sig(SIGSEGV, current); 386 } 387 #endif /* CONFIG_TRAD_SIGNALS */ 388 389 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 390 { 391 struct rt_sigframe __user *frame; 392 sigset_t set; 393 int sig; 394 395 frame = (struct rt_sigframe __user *) regs.regs[29]; 396 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 397 goto badframe; 398 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 399 goto badframe; 400 401 sigdelsetmask(&set, ~_BLOCKABLE); 402 spin_lock_irq(¤t->sighand->siglock); 403 current->blocked = set; 404 recalc_sigpending(); 405 spin_unlock_irq(¤t->sighand->siglock); 406 407 sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); 408 if (sig < 0) 409 goto badframe; 410 else if (sig) 411 force_sig(sig, current); 412 413 /* It is more difficult to avoid calling this function than to 414 call it and ignore errors. */ 415 do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]); 416 417 /* 418 * Don't let your children do this ... 419 */ 420 __asm__ __volatile__( 421 "move\t$29, %0\n\t" 422 "j\tsyscall_exit" 423 :/* no outputs */ 424 :"r" (®s)); 425 /* Unreached */ 426 427 badframe: 428 force_sig(SIGSEGV, current); 429 } 430 431 #ifdef CONFIG_TRAD_SIGNALS 432 static int setup_frame(void *sig_return, struct k_sigaction *ka, 433 struct pt_regs *regs, int signr, sigset_t *set) 434 { 435 struct sigframe __user *frame; 436 int err = 0; 437 438 frame = get_sigframe(ka, regs, sizeof(*frame)); 439 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 440 goto give_sigsegv; 441 442 err |= setup_sigcontext(regs, &frame->sf_sc); 443 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 444 if (err) 445 goto give_sigsegv; 446 447 /* 448 * Arguments to signal handler: 449 * 450 * a0 = signal number 451 * a1 = 0 (should be cause) 452 * a2 = pointer to struct sigcontext 453 * 454 * $25 and c0_epc point to the signal handler, $29 points to the 455 * struct sigframe. 456 */ 457 regs->regs[ 4] = signr; 458 regs->regs[ 5] = 0; 459 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 460 regs->regs[29] = (unsigned long) frame; 461 regs->regs[31] = (unsigned long) sig_return; 462 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 463 464 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 465 current->comm, current->pid, 466 frame, regs->cp0_epc, regs->regs[31]); 467 return 0; 468 469 give_sigsegv: 470 force_sigsegv(signr, current); 471 return -EFAULT; 472 } 473 #endif 474 475 static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, 476 struct pt_regs *regs, int signr, sigset_t *set, 477 siginfo_t *info) 478 { 479 struct rt_sigframe __user *frame; 480 int err = 0; 481 482 frame = get_sigframe(ka, regs, sizeof(*frame)); 483 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 484 goto give_sigsegv; 485 486 /* Create siginfo. */ 487 err |= copy_siginfo_to_user(&frame->rs_info, info); 488 489 /* Create the ucontext. */ 490 err |= __put_user(0, &frame->rs_uc.uc_flags); 491 err |= __put_user(NULL, &frame->rs_uc.uc_link); 492 err |= __put_user((void __user *)current->sas_ss_sp, 493 &frame->rs_uc.uc_stack.ss_sp); 494 err |= __put_user(sas_ss_flags(regs->regs[29]), 495 &frame->rs_uc.uc_stack.ss_flags); 496 err |= __put_user(current->sas_ss_size, 497 &frame->rs_uc.uc_stack.ss_size); 498 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 499 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 500 501 if (err) 502 goto give_sigsegv; 503 504 /* 505 * Arguments to signal handler: 506 * 507 * a0 = signal number 508 * a1 = 0 (should be cause) 509 * a2 = pointer to ucontext 510 * 511 * $25 and c0_epc point to the signal handler, $29 points to 512 * the struct rt_sigframe. 513 */ 514 regs->regs[ 4] = signr; 515 regs->regs[ 5] = (unsigned long) &frame->rs_info; 516 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 517 regs->regs[29] = (unsigned long) frame; 518 regs->regs[31] = (unsigned long) sig_return; 519 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 520 521 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 522 current->comm, current->pid, 523 frame, regs->cp0_epc, regs->regs[31]); 524 525 return 0; 526 527 give_sigsegv: 528 force_sigsegv(signr, current); 529 return -EFAULT; 530 } 531 532 struct mips_abi mips_abi = { 533 #ifdef CONFIG_TRAD_SIGNALS 534 .setup_frame = setup_frame, 535 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), 536 #endif 537 .setup_rt_frame = setup_rt_frame, 538 .rt_signal_return_offset = 539 offsetof(struct mips_vdso, rt_signal_trampoline), 540 .restart = __NR_restart_syscall 541 }; 542 543 static int handle_signal(unsigned long sig, siginfo_t *info, 544 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) 545 { 546 int ret; 547 struct mips_abi *abi = current->thread.abi; 548 void *vdso = current->mm->context.vdso; 549 550 if (regs->regs[0]) { 551 switch(regs->regs[2]) { 552 case ERESTART_RESTARTBLOCK: 553 case ERESTARTNOHAND: 554 regs->regs[2] = EINTR; 555 break; 556 case ERESTARTSYS: 557 if (!(ka->sa.sa_flags & SA_RESTART)) { 558 regs->regs[2] = EINTR; 559 break; 560 } 561 /* fallthrough */ 562 case ERESTARTNOINTR: 563 regs->regs[7] = regs->regs[26]; 564 regs->regs[2] = regs->regs[0]; 565 regs->cp0_epc -= 4; 566 } 567 568 regs->regs[0] = 0; /* Don't deal with this again. */ 569 } 570 571 if (sig_uses_siginfo(ka)) 572 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, 573 ka, regs, sig, oldset, info); 574 else 575 ret = abi->setup_frame(vdso + abi->signal_return_offset, 576 ka, regs, sig, oldset); 577 578 if (ret) 579 return ret; 580 581 spin_lock_irq(¤t->sighand->siglock); 582 sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); 583 if (!(ka->sa.sa_flags & SA_NODEFER)) 584 sigaddset(¤t->blocked, sig); 585 recalc_sigpending(); 586 spin_unlock_irq(¤t->sighand->siglock); 587 588 return ret; 589 } 590 591 static void do_signal(struct pt_regs *regs) 592 { 593 struct k_sigaction ka; 594 sigset_t *oldset; 595 siginfo_t info; 596 int signr; 597 598 /* 599 * We want the common case to go fast, which is why we may in certain 600 * cases get here from kernel mode. Just return without doing anything 601 * if so. 602 */ 603 if (!user_mode(regs)) 604 return; 605 606 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 607 oldset = ¤t->saved_sigmask; 608 else 609 oldset = ¤t->blocked; 610 611 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 612 if (signr > 0) { 613 /* Whee! Actually deliver the signal. */ 614 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 615 /* 616 * A signal was successfully delivered; the saved 617 * sigmask will have been stored in the signal frame, 618 * and will be restored by sigreturn, so we can simply 619 * clear the TIF_RESTORE_SIGMASK flag. 620 */ 621 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 622 clear_thread_flag(TIF_RESTORE_SIGMASK); 623 } 624 625 return; 626 } 627 628 if (regs->regs[0]) { 629 if (regs->regs[2] == ERESTARTNOHAND || 630 regs->regs[2] == ERESTARTSYS || 631 regs->regs[2] == ERESTARTNOINTR) { 632 regs->regs[2] = regs->regs[0]; 633 regs->regs[7] = regs->regs[26]; 634 regs->cp0_epc -= 4; 635 } 636 if (regs->regs[2] == ERESTART_RESTARTBLOCK) { 637 regs->regs[2] = current->thread.abi->restart; 638 regs->regs[7] = regs->regs[26]; 639 regs->cp0_epc -= 4; 640 } 641 regs->regs[0] = 0; /* Don't deal with this again. */ 642 } 643 644 /* 645 * If there's no signal to deliver, we just put the saved sigmask 646 * back 647 */ 648 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 649 clear_thread_flag(TIF_RESTORE_SIGMASK); 650 sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); 651 } 652 } 653 654 /* 655 * notification of userspace execution resumption 656 * - triggered by the TIF_WORK_MASK flags 657 */ 658 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 659 __u32 thread_info_flags) 660 { 661 /* deal with pending signal delivery */ 662 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 663 do_signal(regs); 664 665 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 666 clear_thread_flag(TIF_NOTIFY_RESUME); 667 tracehook_notify_resume(regs); 668 if (current->replacement_session_keyring) 669 key_replace_session_keyring(); 670 } 671 } 672 673 #ifdef CONFIG_SMP 674 static int smp_save_fp_context(struct sigcontext __user *sc) 675 { 676 return raw_cpu_has_fpu 677 ? _save_fp_context(sc) 678 : fpu_emulator_save_context(sc); 679 } 680 681 static int smp_restore_fp_context(struct sigcontext __user *sc) 682 { 683 return raw_cpu_has_fpu 684 ? _restore_fp_context(sc) 685 : fpu_emulator_restore_context(sc); 686 } 687 #endif 688 689 static int signal_setup(void) 690 { 691 #ifdef CONFIG_SMP 692 /* For now just do the cpu_has_fpu check when the functions are invoked */ 693 save_fp_context = smp_save_fp_context; 694 restore_fp_context = smp_restore_fp_context; 695 #else 696 if (cpu_has_fpu) { 697 save_fp_context = _save_fp_context; 698 restore_fp_context = _restore_fp_context; 699 } else { 700 save_fp_context = fpu_emulator_save_context; 701 restore_fp_context = fpu_emulator_restore_context; 702 } 703 #endif 704 705 return 0; 706 } 707 708 arch_initcall(signal_setup); 709