1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2014, Imagination Technologies Ltd. 10 */ 11 #include <linux/cache.h> 12 #include <linux/context_tracking.h> 13 #include <linux/irqflags.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/personality.h> 17 #include <linux/smp.h> 18 #include <linux/kernel.h> 19 #include <linux/signal.h> 20 #include <linux/errno.h> 21 #include <linux/wait.h> 22 #include <linux/ptrace.h> 23 #include <linux/unistd.h> 24 #include <linux/compiler.h> 25 #include <linux/syscalls.h> 26 #include <linux/uaccess.h> 27 #include <linux/tracehook.h> 28 29 #include <asm/abi.h> 30 #include <asm/asm.h> 31 #include <linux/bitops.h> 32 #include <asm/cacheflush.h> 33 #include <asm/fpu.h> 34 #include <asm/msa.h> 35 #include <asm/sim.h> 36 #include <asm/ucontext.h> 37 #include <asm/cpu-features.h> 38 #include <asm/war.h> 39 #include <asm/vdso.h> 40 #include <asm/dsp.h> 41 #include <asm/inst.h> 42 43 #include "signal-common.h" 44 45 static int (*save_fp_context)(struct sigcontext __user *sc); 46 static int (*restore_fp_context)(struct sigcontext __user *sc); 47 48 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 49 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 50 51 extern asmlinkage int _save_msa_context(struct sigcontext __user *sc); 52 extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc); 53 54 struct sigframe { 55 u32 sf_ass[4]; /* argument save space for o32 */ 56 u32 sf_pad[2]; /* Was: signal trampoline */ 57 struct sigcontext sf_sc; 58 sigset_t sf_mask; 59 }; 60 61 struct rt_sigframe { 62 u32 rs_ass[4]; /* argument save space for o32 */ 63 u32 rs_pad[2]; /* Was: signal trampoline */ 64 struct siginfo rs_info; 65 struct ucontext rs_uc; 66 }; 67 68 /* 69 * Thread saved context copy to/from a signal context presumed to be on the 70 * user stack, and therefore accessed with appropriate macros from uaccess.h. 71 */ 72 static int copy_fp_to_sigcontext(struct sigcontext __user *sc) 73 { 74 int i; 75 int err = 0; 76 77 for (i = 0; i < NUM_FPU_REGS; i++) { 78 err |= 79 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), 80 &sc->sc_fpregs[i]); 81 } 82 err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr); 83 84 return err; 85 } 86 87 static int copy_fp_from_sigcontext(struct sigcontext __user *sc) 88 { 89 int i; 90 int err = 0; 91 u64 fpr_val; 92 93 for (i = 0; i < NUM_FPU_REGS; i++) { 94 err |= __get_user(fpr_val, &sc->sc_fpregs[i]); 95 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); 96 } 97 err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr); 98 99 return err; 100 } 101 102 /* 103 * These functions will save only the upper 64 bits of the vector registers, 104 * since the lower 64 bits have already been saved as the scalar FP context. 105 */ 106 static int copy_msa_to_sigcontext(struct sigcontext __user *sc) 107 { 108 int i; 109 int err = 0; 110 111 for (i = 0; i < NUM_FPU_REGS; i++) { 112 err |= 113 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1), 114 &sc->sc_msaregs[i]); 115 } 116 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); 117 118 return err; 119 } 120 121 static int copy_msa_from_sigcontext(struct sigcontext __user *sc) 122 { 123 int i; 124 int err = 0; 125 u64 val; 126 127 for (i = 0; i < NUM_FPU_REGS; i++) { 128 err |= __get_user(val, &sc->sc_msaregs[i]); 129 set_fpr64(¤t->thread.fpu.fpr[i], 1, val); 130 } 131 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr); 132 133 return err; 134 } 135 136 /* 137 * Helper routines 138 */ 139 static int protected_save_fp_context(struct sigcontext __user *sc, 140 unsigned used_math) 141 { 142 int err; 143 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA); 144 #ifndef CONFIG_EVA 145 while (1) { 146 lock_fpu_owner(); 147 if (is_fpu_owner()) { 148 err = save_fp_context(sc); 149 if (save_msa && !err) 150 err = _save_msa_context(sc); 151 unlock_fpu_owner(); 152 } else { 153 unlock_fpu_owner(); 154 err = copy_fp_to_sigcontext(sc); 155 if (save_msa && !err) 156 err = copy_msa_to_sigcontext(sc); 157 } 158 if (likely(!err)) 159 break; 160 /* touch the sigcontext and try again */ 161 err = __put_user(0, &sc->sc_fpregs[0]) | 162 __put_user(0, &sc->sc_fpregs[31]) | 163 __put_user(0, &sc->sc_fpc_csr); 164 if (err) 165 break; /* really bad sigcontext */ 166 } 167 #else 168 /* 169 * EVA does not have FPU EVA instructions so saving fpu context directly 170 * does not work. 171 */ 172 disable_msa(); 173 lose_fpu(1); 174 err = save_fp_context(sc); /* this might fail */ 175 if (save_msa && !err) 176 err = copy_msa_to_sigcontext(sc); 177 #endif 178 return err; 179 } 180 181 static int protected_restore_fp_context(struct sigcontext __user *sc, 182 unsigned used_math) 183 { 184 int err, tmp __maybe_unused; 185 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA); 186 #ifndef CONFIG_EVA 187 while (1) { 188 lock_fpu_owner(); 189 if (is_fpu_owner()) { 190 err = restore_fp_context(sc); 191 if (restore_msa && !err) { 192 enable_msa(); 193 err = _restore_msa_context(sc); 194 } else { 195 /* signal handler may have used MSA */ 196 disable_msa(); 197 } 198 unlock_fpu_owner(); 199 } else { 200 unlock_fpu_owner(); 201 err = copy_fp_from_sigcontext(sc); 202 if (!err && (used_math & USEDMATH_MSA)) 203 err = copy_msa_from_sigcontext(sc); 204 } 205 if (likely(!err)) 206 break; 207 /* touch the sigcontext and try again */ 208 err = __get_user(tmp, &sc->sc_fpregs[0]) | 209 __get_user(tmp, &sc->sc_fpregs[31]) | 210 __get_user(tmp, &sc->sc_fpc_csr); 211 if (err) 212 break; /* really bad sigcontext */ 213 } 214 #else 215 /* 216 * EVA does not have FPU EVA instructions so restoring fpu context 217 * directly does not work. 218 */ 219 enable_msa(); 220 lose_fpu(0); 221 err = restore_fp_context(sc); /* this might fail */ 222 if (restore_msa && !err) 223 err = copy_msa_from_sigcontext(sc); 224 #endif 225 return err; 226 } 227 228 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 229 { 230 int err = 0; 231 int i; 232 unsigned int used_math; 233 234 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 235 236 err |= __put_user(0, &sc->sc_regs[0]); 237 for (i = 1; i < 32; i++) 238 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 239 240 #ifdef CONFIG_CPU_HAS_SMARTMIPS 241 err |= __put_user(regs->acx, &sc->sc_acx); 242 #endif 243 err |= __put_user(regs->hi, &sc->sc_mdhi); 244 err |= __put_user(regs->lo, &sc->sc_mdlo); 245 if (cpu_has_dsp) { 246 err |= __put_user(mfhi1(), &sc->sc_hi1); 247 err |= __put_user(mflo1(), &sc->sc_lo1); 248 err |= __put_user(mfhi2(), &sc->sc_hi2); 249 err |= __put_user(mflo2(), &sc->sc_lo2); 250 err |= __put_user(mfhi3(), &sc->sc_hi3); 251 err |= __put_user(mflo3(), &sc->sc_lo3); 252 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 253 } 254 255 used_math = used_math() ? USEDMATH_FP : 0; 256 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0; 257 err |= __put_user(used_math, &sc->sc_used_math); 258 259 if (used_math) { 260 /* 261 * Save FPU state to signal context. Signal handler 262 * will "inherit" current FPU state. 263 */ 264 err |= protected_save_fp_context(sc, used_math); 265 } 266 return err; 267 } 268 269 int fpcsr_pending(unsigned int __user *fpcsr) 270 { 271 int err, sig = 0; 272 unsigned int csr, enabled; 273 274 err = __get_user(csr, fpcsr); 275 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 276 /* 277 * If the signal handler set some FPU exceptions, clear it and 278 * send SIGFPE. 279 */ 280 if (csr & enabled) { 281 csr &= ~enabled; 282 err |= __put_user(csr, fpcsr); 283 sig = SIGFPE; 284 } 285 return err ?: sig; 286 } 287 288 static int 289 check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math) 290 { 291 int err, sig; 292 293 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 294 if (err > 0) 295 err = 0; 296 err |= protected_restore_fp_context(sc, used_math); 297 return err ?: sig; 298 } 299 300 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 301 { 302 unsigned int used_math; 303 unsigned long treg; 304 int err = 0; 305 int i; 306 307 /* Always make any pending restarted system calls return -EINTR */ 308 current_thread_info()->restart_block.fn = do_no_restart_syscall; 309 310 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 311 312 #ifdef CONFIG_CPU_HAS_SMARTMIPS 313 err |= __get_user(regs->acx, &sc->sc_acx); 314 #endif 315 err |= __get_user(regs->hi, &sc->sc_mdhi); 316 err |= __get_user(regs->lo, &sc->sc_mdlo); 317 if (cpu_has_dsp) { 318 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 319 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 320 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 321 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 322 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 323 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 324 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 325 } 326 327 for (i = 1; i < 32; i++) 328 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 329 330 err |= __get_user(used_math, &sc->sc_used_math); 331 conditional_used_math(used_math); 332 333 if (used_math) { 334 /* restore fpu context if we have used it before */ 335 if (!err) 336 err = check_and_restore_fp_context(sc, used_math); 337 } else { 338 /* signal handler may have used FPU or MSA. Disable them. */ 339 disable_msa(); 340 lose_fpu(0); 341 } 342 343 return err; 344 } 345 346 void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 347 size_t frame_size) 348 { 349 unsigned long sp; 350 351 /* Default to using normal stack */ 352 sp = regs->regs[29]; 353 354 /* 355 * FPU emulator may have it's own trampoline active just 356 * above the user stack, 16-bytes before the next lowest 357 * 16 byte boundary. Try to avoid trashing it. 358 */ 359 sp -= 32; 360 361 /* This is the X/Open sanctioned signal stack switching. */ 362 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) 363 sp = current->sas_ss_sp + current->sas_ss_size; 364 365 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 366 } 367 368 /* 369 * Atomically swap in the new signal mask, and wait for a signal. 370 */ 371 372 #ifdef CONFIG_TRAD_SIGNALS 373 SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) 374 { 375 return sys_rt_sigsuspend(uset, sizeof(sigset_t)); 376 } 377 #endif 378 379 #ifdef CONFIG_TRAD_SIGNALS 380 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 381 struct sigaction __user *, oact) 382 { 383 struct k_sigaction new_ka, old_ka; 384 int ret; 385 int err = 0; 386 387 if (act) { 388 old_sigset_t mask; 389 390 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 391 return -EFAULT; 392 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 393 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 394 err |= __get_user(mask, &act->sa_mask.sig[0]); 395 if (err) 396 return -EFAULT; 397 398 siginitset(&new_ka.sa.sa_mask, mask); 399 } 400 401 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 402 403 if (!ret && oact) { 404 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 405 return -EFAULT; 406 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 407 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 408 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 409 err |= __put_user(0, &oact->sa_mask.sig[1]); 410 err |= __put_user(0, &oact->sa_mask.sig[2]); 411 err |= __put_user(0, &oact->sa_mask.sig[3]); 412 if (err) 413 return -EFAULT; 414 } 415 416 return ret; 417 } 418 #endif 419 420 #ifdef CONFIG_TRAD_SIGNALS 421 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) 422 { 423 struct sigframe __user *frame; 424 sigset_t blocked; 425 int sig; 426 427 frame = (struct sigframe __user *) regs.regs[29]; 428 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 429 goto badframe; 430 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 431 goto badframe; 432 433 set_current_blocked(&blocked); 434 435 sig = restore_sigcontext(®s, &frame->sf_sc); 436 if (sig < 0) 437 goto badframe; 438 else if (sig) 439 force_sig(sig, current); 440 441 /* 442 * Don't let your children do this ... 443 */ 444 __asm__ __volatile__( 445 "move\t$29, %0\n\t" 446 "j\tsyscall_exit" 447 :/* no outputs */ 448 :"r" (®s)); 449 /* Unreached */ 450 451 badframe: 452 force_sig(SIGSEGV, current); 453 } 454 #endif /* CONFIG_TRAD_SIGNALS */ 455 456 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 457 { 458 struct rt_sigframe __user *frame; 459 sigset_t set; 460 int sig; 461 462 frame = (struct rt_sigframe __user *) regs.regs[29]; 463 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 464 goto badframe; 465 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 466 goto badframe; 467 468 set_current_blocked(&set); 469 470 sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); 471 if (sig < 0) 472 goto badframe; 473 else if (sig) 474 force_sig(sig, current); 475 476 if (restore_altstack(&frame->rs_uc.uc_stack)) 477 goto badframe; 478 479 /* 480 * Don't let your children do this ... 481 */ 482 __asm__ __volatile__( 483 "move\t$29, %0\n\t" 484 "j\tsyscall_exit" 485 :/* no outputs */ 486 :"r" (®s)); 487 /* Unreached */ 488 489 badframe: 490 force_sig(SIGSEGV, current); 491 } 492 493 #ifdef CONFIG_TRAD_SIGNALS 494 static int setup_frame(void *sig_return, struct k_sigaction *ka, 495 struct pt_regs *regs, int signr, sigset_t *set) 496 { 497 struct sigframe __user *frame; 498 int err = 0; 499 500 frame = get_sigframe(ka, regs, sizeof(*frame)); 501 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 502 goto give_sigsegv; 503 504 err |= setup_sigcontext(regs, &frame->sf_sc); 505 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 506 if (err) 507 goto give_sigsegv; 508 509 /* 510 * Arguments to signal handler: 511 * 512 * a0 = signal number 513 * a1 = 0 (should be cause) 514 * a2 = pointer to struct sigcontext 515 * 516 * $25 and c0_epc point to the signal handler, $29 points to the 517 * struct sigframe. 518 */ 519 regs->regs[ 4] = signr; 520 regs->regs[ 5] = 0; 521 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 522 regs->regs[29] = (unsigned long) frame; 523 regs->regs[31] = (unsigned long) sig_return; 524 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 525 526 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 527 current->comm, current->pid, 528 frame, regs->cp0_epc, regs->regs[31]); 529 return 0; 530 531 give_sigsegv: 532 force_sigsegv(signr, current); 533 return -EFAULT; 534 } 535 #endif 536 537 static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, 538 struct pt_regs *regs, int signr, sigset_t *set, 539 siginfo_t *info) 540 { 541 struct rt_sigframe __user *frame; 542 int err = 0; 543 544 frame = get_sigframe(ka, regs, sizeof(*frame)); 545 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 546 goto give_sigsegv; 547 548 /* Create siginfo. */ 549 err |= copy_siginfo_to_user(&frame->rs_info, info); 550 551 /* Create the ucontext. */ 552 err |= __put_user(0, &frame->rs_uc.uc_flags); 553 err |= __put_user(NULL, &frame->rs_uc.uc_link); 554 err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); 555 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 556 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 557 558 if (err) 559 goto give_sigsegv; 560 561 /* 562 * Arguments to signal handler: 563 * 564 * a0 = signal number 565 * a1 = 0 (should be cause) 566 * a2 = pointer to ucontext 567 * 568 * $25 and c0_epc point to the signal handler, $29 points to 569 * the struct rt_sigframe. 570 */ 571 regs->regs[ 4] = signr; 572 regs->regs[ 5] = (unsigned long) &frame->rs_info; 573 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 574 regs->regs[29] = (unsigned long) frame; 575 regs->regs[31] = (unsigned long) sig_return; 576 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 577 578 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 579 current->comm, current->pid, 580 frame, regs->cp0_epc, regs->regs[31]); 581 582 return 0; 583 584 give_sigsegv: 585 force_sigsegv(signr, current); 586 return -EFAULT; 587 } 588 589 struct mips_abi mips_abi = { 590 #ifdef CONFIG_TRAD_SIGNALS 591 .setup_frame = setup_frame, 592 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), 593 #endif 594 .setup_rt_frame = setup_rt_frame, 595 .rt_signal_return_offset = 596 offsetof(struct mips_vdso, rt_signal_trampoline), 597 .restart = __NR_restart_syscall 598 }; 599 600 static void handle_signal(unsigned long sig, siginfo_t *info, 601 struct k_sigaction *ka, struct pt_regs *regs) 602 { 603 sigset_t *oldset = sigmask_to_save(); 604 int ret; 605 struct mips_abi *abi = current->thread.abi; 606 #ifdef CONFIG_CPU_MICROMIPS 607 void *vdso; 608 unsigned int tmp = (unsigned int)current->mm->context.vdso; 609 610 set_isa16_mode(tmp); 611 vdso = (void *)tmp; 612 #else 613 void *vdso = current->mm->context.vdso; 614 #endif 615 616 if (regs->regs[0]) { 617 switch(regs->regs[2]) { 618 case ERESTART_RESTARTBLOCK: 619 case ERESTARTNOHAND: 620 regs->regs[2] = EINTR; 621 break; 622 case ERESTARTSYS: 623 if (!(ka->sa.sa_flags & SA_RESTART)) { 624 regs->regs[2] = EINTR; 625 break; 626 } 627 /* fallthrough */ 628 case ERESTARTNOINTR: 629 regs->regs[7] = regs->regs[26]; 630 regs->regs[2] = regs->regs[0]; 631 regs->cp0_epc -= 4; 632 } 633 634 regs->regs[0] = 0; /* Don't deal with this again. */ 635 } 636 637 if (sig_uses_siginfo(ka)) 638 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, 639 ka, regs, sig, oldset, info); 640 else 641 ret = abi->setup_frame(vdso + abi->signal_return_offset, 642 ka, regs, sig, oldset); 643 644 if (ret) 645 return; 646 647 signal_delivered(sig, info, ka, regs, 0); 648 } 649 650 static void do_signal(struct pt_regs *regs) 651 { 652 struct k_sigaction ka; 653 siginfo_t info; 654 int signr; 655 656 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 657 if (signr > 0) { 658 /* Whee! Actually deliver the signal. */ 659 handle_signal(signr, &info, &ka, regs); 660 return; 661 } 662 663 if (regs->regs[0]) { 664 switch (regs->regs[2]) { 665 case ERESTARTNOHAND: 666 case ERESTARTSYS: 667 case ERESTARTNOINTR: 668 regs->regs[2] = regs->regs[0]; 669 regs->regs[7] = regs->regs[26]; 670 regs->cp0_epc -= 4; 671 break; 672 673 case ERESTART_RESTARTBLOCK: 674 regs->regs[2] = current->thread.abi->restart; 675 regs->regs[7] = regs->regs[26]; 676 regs->cp0_epc -= 4; 677 break; 678 } 679 regs->regs[0] = 0; /* Don't deal with this again. */ 680 } 681 682 /* 683 * If there's no signal to deliver, we just put the saved sigmask 684 * back 685 */ 686 restore_saved_sigmask(); 687 } 688 689 /* 690 * notification of userspace execution resumption 691 * - triggered by the TIF_WORK_MASK flags 692 */ 693 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 694 __u32 thread_info_flags) 695 { 696 local_irq_enable(); 697 698 user_exit(); 699 700 /* deal with pending signal delivery */ 701 if (thread_info_flags & _TIF_SIGPENDING) 702 do_signal(regs); 703 704 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 705 clear_thread_flag(TIF_NOTIFY_RESUME); 706 tracehook_notify_resume(regs); 707 } 708 709 user_enter(); 710 } 711 712 #ifdef CONFIG_SMP 713 #ifndef CONFIG_EVA 714 static int smp_save_fp_context(struct sigcontext __user *sc) 715 { 716 return raw_cpu_has_fpu 717 ? _save_fp_context(sc) 718 : copy_fp_to_sigcontext(sc); 719 } 720 721 static int smp_restore_fp_context(struct sigcontext __user *sc) 722 { 723 return raw_cpu_has_fpu 724 ? _restore_fp_context(sc) 725 : copy_fp_from_sigcontext(sc); 726 } 727 #endif /* CONFIG_EVA */ 728 #endif 729 730 static int signal_setup(void) 731 { 732 #ifndef CONFIG_EVA 733 #ifdef CONFIG_SMP 734 /* For now just do the cpu_has_fpu check when the functions are invoked */ 735 save_fp_context = smp_save_fp_context; 736 restore_fp_context = smp_restore_fp_context; 737 #else 738 if (cpu_has_fpu) { 739 save_fp_context = _save_fp_context; 740 restore_fp_context = _restore_fp_context; 741 } else { 742 save_fp_context = copy_fp_from_sigcontext; 743 restore_fp_context = copy_fp_to_sigcontext; 744 } 745 #endif /* CONFIG_SMP */ 746 #else 747 save_fp_context = copy_fp_from_sigcontext;; 748 restore_fp_context = copy_fp_to_sigcontext; 749 #endif 750 751 return 0; 752 } 753 754 arch_initcall(signal_setup); 755