1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2014, Imagination Technologies Ltd. 10 */ 11 #include <linux/cache.h> 12 #include <linux/context_tracking.h> 13 #include <linux/irqflags.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/personality.h> 17 #include <linux/smp.h> 18 #include <linux/kernel.h> 19 #include <linux/signal.h> 20 #include <linux/errno.h> 21 #include <linux/wait.h> 22 #include <linux/ptrace.h> 23 #include <linux/unistd.h> 24 #include <linux/uprobes.h> 25 #include <linux/compiler.h> 26 #include <linux/syscalls.h> 27 #include <linux/uaccess.h> 28 #include <linux/tracehook.h> 29 30 #include <asm/abi.h> 31 #include <asm/asm.h> 32 #include <linux/bitops.h> 33 #include <asm/cacheflush.h> 34 #include <asm/fpu.h> 35 #include <asm/sim.h> 36 #include <asm/ucontext.h> 37 #include <asm/cpu-features.h> 38 #include <asm/war.h> 39 #include <asm/dsp.h> 40 #include <asm/inst.h> 41 #include <asm/msa.h> 42 43 #include "signal-common.h" 44 45 static int (*save_fp_context)(void __user *sc); 46 static int (*restore_fp_context)(void __user *sc); 47 48 struct sigframe { 49 u32 sf_ass[4]; /* argument save space for o32 */ 50 u32 sf_pad[2]; /* Was: signal trampoline */ 51 52 /* Matches struct ucontext from its uc_mcontext field onwards */ 53 struct sigcontext sf_sc; 54 sigset_t sf_mask; 55 unsigned long long sf_extcontext[0]; 56 }; 57 58 struct rt_sigframe { 59 u32 rs_ass[4]; /* argument save space for o32 */ 60 u32 rs_pad[2]; /* Was: signal trampoline */ 61 struct siginfo rs_info; 62 struct ucontext rs_uc; 63 }; 64 65 /* 66 * Thread saved context copy to/from a signal context presumed to be on the 67 * user stack, and therefore accessed with appropriate macros from uaccess.h. 68 */ 69 static int copy_fp_to_sigcontext(void __user *sc) 70 { 71 struct mips_abi *abi = current->thread.abi; 72 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 73 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 74 int i; 75 int err = 0; 76 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; 77 78 for (i = 0; i < NUM_FPU_REGS; i += inc) { 79 err |= 80 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), 81 &fpregs[i]); 82 } 83 err |= __put_user(current->thread.fpu.fcr31, csr); 84 85 return err; 86 } 87 88 static int copy_fp_from_sigcontext(void __user *sc) 89 { 90 struct mips_abi *abi = current->thread.abi; 91 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 92 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 93 int i; 94 int err = 0; 95 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; 96 u64 fpr_val; 97 98 for (i = 0; i < NUM_FPU_REGS; i += inc) { 99 err |= __get_user(fpr_val, &fpregs[i]); 100 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); 101 } 102 err |= __get_user(current->thread.fpu.fcr31, csr); 103 104 return err; 105 } 106 107 /* 108 * Wrappers for the assembly _{save,restore}_fp_context functions. 109 */ 110 static int save_hw_fp_context(void __user *sc) 111 { 112 struct mips_abi *abi = current->thread.abi; 113 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 114 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 115 116 return _save_fp_context(fpregs, csr); 117 } 118 119 static int restore_hw_fp_context(void __user *sc) 120 { 121 struct mips_abi *abi = current->thread.abi; 122 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 123 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 124 125 return _restore_fp_context(fpregs, csr); 126 } 127 128 /* 129 * Extended context handling. 130 */ 131 132 static inline void __user *sc_to_extcontext(void __user *sc) 133 { 134 struct ucontext __user *uc; 135 136 /* 137 * We can just pretend the sigcontext is always embedded in a struct 138 * ucontext here, because the offset from sigcontext to extended 139 * context is the same in the struct sigframe case. 140 */ 141 uc = container_of(sc, struct ucontext, uc_mcontext); 142 return &uc->uc_extcontext; 143 } 144 145 static int save_msa_extcontext(void __user *buf) 146 { 147 struct msa_extcontext __user *msa = buf; 148 uint64_t val; 149 int i, err; 150 151 if (!thread_msa_context_live()) 152 return 0; 153 154 /* 155 * Ensure that we can't lose the live MSA context between checking 156 * for it & writing it to memory. 157 */ 158 preempt_disable(); 159 160 if (is_msa_enabled()) { 161 /* 162 * There are no EVA versions of the vector register load/store 163 * instructions, so MSA context has to be saved to kernel memory 164 * and then copied to user memory. The save to kernel memory 165 * should already have been done when handling scalar FP 166 * context. 167 */ 168 BUG_ON(IS_ENABLED(CONFIG_EVA)); 169 170 err = __put_user(read_msa_csr(), &msa->csr); 171 err |= _save_msa_all_upper(&msa->wr); 172 173 preempt_enable(); 174 } else { 175 preempt_enable(); 176 177 err = __put_user(current->thread.fpu.msacsr, &msa->csr); 178 179 for (i = 0; i < NUM_FPU_REGS; i++) { 180 val = get_fpr64(¤t->thread.fpu.fpr[i], 1); 181 err |= __put_user(val, &msa->wr[i]); 182 } 183 } 184 185 err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic); 186 err |= __put_user(sizeof(*msa), &msa->ext.size); 187 188 return err ? -EFAULT : sizeof(*msa); 189 } 190 191 static int restore_msa_extcontext(void __user *buf, unsigned int size) 192 { 193 struct msa_extcontext __user *msa = buf; 194 unsigned long long val; 195 unsigned int csr; 196 int i, err; 197 198 if (!IS_ENABLED(CONFIG_CPU_HAS_MSA)) 199 return SIGSYS; 200 201 if (size != sizeof(*msa)) 202 return -EINVAL; 203 204 err = get_user(csr, &msa->csr); 205 if (err) 206 return err; 207 208 preempt_disable(); 209 210 if (is_msa_enabled()) { 211 /* 212 * There are no EVA versions of the vector register load/store 213 * instructions, so MSA context has to be copied to kernel 214 * memory and later loaded to registers. The same is true of 215 * scalar FP context, so FPU & MSA should have already been 216 * disabled whilst handling scalar FP context. 217 */ 218 BUG_ON(IS_ENABLED(CONFIG_EVA)); 219 220 write_msa_csr(csr); 221 err |= _restore_msa_all_upper(&msa->wr); 222 preempt_enable(); 223 } else { 224 preempt_enable(); 225 226 current->thread.fpu.msacsr = csr; 227 228 for (i = 0; i < NUM_FPU_REGS; i++) { 229 err |= __get_user(val, &msa->wr[i]); 230 set_fpr64(¤t->thread.fpu.fpr[i], 1, val); 231 } 232 } 233 234 return err; 235 } 236 237 static int save_extcontext(void __user *buf) 238 { 239 int sz; 240 241 sz = save_msa_extcontext(buf); 242 if (sz < 0) 243 return sz; 244 buf += sz; 245 246 /* If no context was saved then trivially return */ 247 if (!sz) 248 return 0; 249 250 /* Write the end marker */ 251 if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf)) 252 return -EFAULT; 253 254 sz += sizeof(((struct extcontext *)NULL)->magic); 255 return sz; 256 } 257 258 static int restore_extcontext(void __user *buf) 259 { 260 struct extcontext ext; 261 int err; 262 263 while (1) { 264 err = __get_user(ext.magic, (unsigned int *)buf); 265 if (err) 266 return err; 267 268 if (ext.magic == END_EXTCONTEXT_MAGIC) 269 return 0; 270 271 err = __get_user(ext.size, (unsigned int *)(buf 272 + offsetof(struct extcontext, size))); 273 if (err) 274 return err; 275 276 switch (ext.magic) { 277 case MSA_EXTCONTEXT_MAGIC: 278 err = restore_msa_extcontext(buf, ext.size); 279 break; 280 281 default: 282 err = -EINVAL; 283 break; 284 } 285 286 if (err) 287 return err; 288 289 buf += ext.size; 290 } 291 } 292 293 /* 294 * Helper routines 295 */ 296 int protected_save_fp_context(void __user *sc) 297 { 298 struct mips_abi *abi = current->thread.abi; 299 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 300 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 301 uint32_t __user *used_math = sc + abi->off_sc_used_math; 302 unsigned int used, ext_sz; 303 int err; 304 305 used = used_math() ? USED_FP : 0; 306 if (!used) 307 goto fp_done; 308 309 if (!test_thread_flag(TIF_32BIT_FPREGS)) 310 used |= USED_FR1; 311 if (test_thread_flag(TIF_HYBRID_FPREGS)) 312 used |= USED_HYBRID_FPRS; 313 314 /* 315 * EVA does not have userland equivalents of ldc1 or sdc1, so 316 * save to the kernel FP context & copy that to userland below. 317 */ 318 if (IS_ENABLED(CONFIG_EVA)) 319 lose_fpu(1); 320 321 while (1) { 322 lock_fpu_owner(); 323 if (is_fpu_owner()) { 324 err = save_fp_context(sc); 325 unlock_fpu_owner(); 326 } else { 327 unlock_fpu_owner(); 328 err = copy_fp_to_sigcontext(sc); 329 } 330 if (likely(!err)) 331 break; 332 /* touch the sigcontext and try again */ 333 err = __put_user(0, &fpregs[0]) | 334 __put_user(0, &fpregs[31]) | 335 __put_user(0, csr); 336 if (err) 337 return err; /* really bad sigcontext */ 338 } 339 340 fp_done: 341 ext_sz = err = save_extcontext(sc_to_extcontext(sc)); 342 if (err < 0) 343 return err; 344 used |= ext_sz ? USED_EXTCONTEXT : 0; 345 346 return __put_user(used, used_math); 347 } 348 349 int protected_restore_fp_context(void __user *sc) 350 { 351 struct mips_abi *abi = current->thread.abi; 352 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 353 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 354 uint32_t __user *used_math = sc + abi->off_sc_used_math; 355 unsigned int used; 356 int err, sig = 0, tmp __maybe_unused; 357 358 err = __get_user(used, used_math); 359 conditional_used_math(used & USED_FP); 360 361 /* 362 * The signal handler may have used FPU; give it up if the program 363 * doesn't want it following sigreturn. 364 */ 365 if (err || !(used & USED_FP)) 366 lose_fpu(0); 367 if (err) 368 return err; 369 if (!(used & USED_FP)) 370 goto fp_done; 371 372 err = sig = fpcsr_pending(csr); 373 if (err < 0) 374 return err; 375 376 /* 377 * EVA does not have userland equivalents of ldc1 or sdc1, so we 378 * disable the FPU here such that the code below simply copies to 379 * the kernel FP context. 380 */ 381 if (IS_ENABLED(CONFIG_EVA)) 382 lose_fpu(0); 383 384 while (1) { 385 lock_fpu_owner(); 386 if (is_fpu_owner()) { 387 err = restore_fp_context(sc); 388 unlock_fpu_owner(); 389 } else { 390 unlock_fpu_owner(); 391 err = copy_fp_from_sigcontext(sc); 392 } 393 if (likely(!err)) 394 break; 395 /* touch the sigcontext and try again */ 396 err = __get_user(tmp, &fpregs[0]) | 397 __get_user(tmp, &fpregs[31]) | 398 __get_user(tmp, csr); 399 if (err) 400 break; /* really bad sigcontext */ 401 } 402 403 fp_done: 404 if (!err && (used & USED_EXTCONTEXT)) 405 err = restore_extcontext(sc_to_extcontext(sc)); 406 407 return err ?: sig; 408 } 409 410 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 411 { 412 int err = 0; 413 int i; 414 415 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 416 417 err |= __put_user(0, &sc->sc_regs[0]); 418 for (i = 1; i < 32; i++) 419 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 420 421 #ifdef CONFIG_CPU_HAS_SMARTMIPS 422 err |= __put_user(regs->acx, &sc->sc_acx); 423 #endif 424 err |= __put_user(regs->hi, &sc->sc_mdhi); 425 err |= __put_user(regs->lo, &sc->sc_mdlo); 426 if (cpu_has_dsp) { 427 err |= __put_user(mfhi1(), &sc->sc_hi1); 428 err |= __put_user(mflo1(), &sc->sc_lo1); 429 err |= __put_user(mfhi2(), &sc->sc_hi2); 430 err |= __put_user(mflo2(), &sc->sc_lo2); 431 err |= __put_user(mfhi3(), &sc->sc_hi3); 432 err |= __put_user(mflo3(), &sc->sc_lo3); 433 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 434 } 435 436 437 /* 438 * Save FPU state to signal context. Signal handler 439 * will "inherit" current FPU state. 440 */ 441 err |= protected_save_fp_context(sc); 442 443 return err; 444 } 445 446 static size_t extcontext_max_size(void) 447 { 448 size_t sz = 0; 449 450 /* 451 * The assumption here is that between this point & the point at which 452 * the extended context is saved the size of the context should only 453 * ever be able to shrink (if the task is preempted), but never grow. 454 * That is, what this function returns is an upper bound on the size of 455 * the extended context for the current task at the current time. 456 */ 457 458 if (thread_msa_context_live()) 459 sz += sizeof(struct msa_extcontext); 460 461 /* If any context is saved then we'll append the end marker */ 462 if (sz) 463 sz += sizeof(((struct extcontext *)NULL)->magic); 464 465 return sz; 466 } 467 468 int fpcsr_pending(unsigned int __user *fpcsr) 469 { 470 int err, sig = 0; 471 unsigned int csr, enabled; 472 473 err = __get_user(csr, fpcsr); 474 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 475 /* 476 * If the signal handler set some FPU exceptions, clear it and 477 * send SIGFPE. 478 */ 479 if (csr & enabled) { 480 csr &= ~enabled; 481 err |= __put_user(csr, fpcsr); 482 sig = SIGFPE; 483 } 484 return err ?: sig; 485 } 486 487 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 488 { 489 unsigned long treg; 490 int err = 0; 491 int i; 492 493 /* Always make any pending restarted system calls return -EINTR */ 494 current->restart_block.fn = do_no_restart_syscall; 495 496 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 497 498 #ifdef CONFIG_CPU_HAS_SMARTMIPS 499 err |= __get_user(regs->acx, &sc->sc_acx); 500 #endif 501 err |= __get_user(regs->hi, &sc->sc_mdhi); 502 err |= __get_user(regs->lo, &sc->sc_mdlo); 503 if (cpu_has_dsp) { 504 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 505 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 506 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 507 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 508 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 509 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 510 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 511 } 512 513 for (i = 1; i < 32; i++) 514 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 515 516 return err ?: protected_restore_fp_context(sc); 517 } 518 519 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, 520 size_t frame_size) 521 { 522 unsigned long sp; 523 524 /* Leave space for potential extended context */ 525 frame_size += extcontext_max_size(); 526 527 /* Default to using normal stack */ 528 sp = regs->regs[29]; 529 530 /* 531 * FPU emulator may have it's own trampoline active just 532 * above the user stack, 16-bytes before the next lowest 533 * 16 byte boundary. Try to avoid trashing it. 534 */ 535 sp -= 32; 536 537 sp = sigsp(sp, ksig); 538 539 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 540 } 541 542 /* 543 * Atomically swap in the new signal mask, and wait for a signal. 544 */ 545 546 #ifdef CONFIG_TRAD_SIGNALS 547 SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) 548 { 549 return sys_rt_sigsuspend(uset, sizeof(sigset_t)); 550 } 551 #endif 552 553 #ifdef CONFIG_TRAD_SIGNALS 554 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 555 struct sigaction __user *, oact) 556 { 557 struct k_sigaction new_ka, old_ka; 558 int ret; 559 int err = 0; 560 561 if (act) { 562 old_sigset_t mask; 563 564 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 565 return -EFAULT; 566 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 567 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 568 err |= __get_user(mask, &act->sa_mask.sig[0]); 569 if (err) 570 return -EFAULT; 571 572 siginitset(&new_ka.sa.sa_mask, mask); 573 } 574 575 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 576 577 if (!ret && oact) { 578 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 579 return -EFAULT; 580 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 581 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 582 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 583 err |= __put_user(0, &oact->sa_mask.sig[1]); 584 err |= __put_user(0, &oact->sa_mask.sig[2]); 585 err |= __put_user(0, &oact->sa_mask.sig[3]); 586 if (err) 587 return -EFAULT; 588 } 589 590 return ret; 591 } 592 #endif 593 594 #ifdef CONFIG_TRAD_SIGNALS 595 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) 596 { 597 struct sigframe __user *frame; 598 sigset_t blocked; 599 int sig; 600 601 frame = (struct sigframe __user *) regs.regs[29]; 602 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 603 goto badframe; 604 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 605 goto badframe; 606 607 set_current_blocked(&blocked); 608 609 sig = restore_sigcontext(®s, &frame->sf_sc); 610 if (sig < 0) 611 goto badframe; 612 else if (sig) 613 force_sig(sig, current); 614 615 /* 616 * Don't let your children do this ... 617 */ 618 __asm__ __volatile__( 619 "move\t$29, %0\n\t" 620 "j\tsyscall_exit" 621 :/* no outputs */ 622 :"r" (®s)); 623 /* Unreached */ 624 625 badframe: 626 force_sig(SIGSEGV, current); 627 } 628 #endif /* CONFIG_TRAD_SIGNALS */ 629 630 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 631 { 632 struct rt_sigframe __user *frame; 633 sigset_t set; 634 int sig; 635 636 frame = (struct rt_sigframe __user *) regs.regs[29]; 637 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 638 goto badframe; 639 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 640 goto badframe; 641 642 set_current_blocked(&set); 643 644 sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); 645 if (sig < 0) 646 goto badframe; 647 else if (sig) 648 force_sig(sig, current); 649 650 if (restore_altstack(&frame->rs_uc.uc_stack)) 651 goto badframe; 652 653 /* 654 * Don't let your children do this ... 655 */ 656 __asm__ __volatile__( 657 "move\t$29, %0\n\t" 658 "j\tsyscall_exit" 659 :/* no outputs */ 660 :"r" (®s)); 661 /* Unreached */ 662 663 badframe: 664 force_sig(SIGSEGV, current); 665 } 666 667 #ifdef CONFIG_TRAD_SIGNALS 668 static int setup_frame(void *sig_return, struct ksignal *ksig, 669 struct pt_regs *regs, sigset_t *set) 670 { 671 struct sigframe __user *frame; 672 int err = 0; 673 674 frame = get_sigframe(ksig, regs, sizeof(*frame)); 675 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 676 return -EFAULT; 677 678 err |= setup_sigcontext(regs, &frame->sf_sc); 679 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 680 if (err) 681 return -EFAULT; 682 683 /* 684 * Arguments to signal handler: 685 * 686 * a0 = signal number 687 * a1 = 0 (should be cause) 688 * a2 = pointer to struct sigcontext 689 * 690 * $25 and c0_epc point to the signal handler, $29 points to the 691 * struct sigframe. 692 */ 693 regs->regs[ 4] = ksig->sig; 694 regs->regs[ 5] = 0; 695 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 696 regs->regs[29] = (unsigned long) frame; 697 regs->regs[31] = (unsigned long) sig_return; 698 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 699 700 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 701 current->comm, current->pid, 702 frame, regs->cp0_epc, regs->regs[31]); 703 return 0; 704 } 705 #endif 706 707 static int setup_rt_frame(void *sig_return, struct ksignal *ksig, 708 struct pt_regs *regs, sigset_t *set) 709 { 710 struct rt_sigframe __user *frame; 711 int err = 0; 712 713 frame = get_sigframe(ksig, regs, sizeof(*frame)); 714 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 715 return -EFAULT; 716 717 /* Create siginfo. */ 718 err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); 719 720 /* Create the ucontext. */ 721 err |= __put_user(0, &frame->rs_uc.uc_flags); 722 err |= __put_user(NULL, &frame->rs_uc.uc_link); 723 err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); 724 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 725 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 726 727 if (err) 728 return -EFAULT; 729 730 /* 731 * Arguments to signal handler: 732 * 733 * a0 = signal number 734 * a1 = 0 (should be cause) 735 * a2 = pointer to ucontext 736 * 737 * $25 and c0_epc point to the signal handler, $29 points to 738 * the struct rt_sigframe. 739 */ 740 regs->regs[ 4] = ksig->sig; 741 regs->regs[ 5] = (unsigned long) &frame->rs_info; 742 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 743 regs->regs[29] = (unsigned long) frame; 744 regs->regs[31] = (unsigned long) sig_return; 745 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 746 747 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 748 current->comm, current->pid, 749 frame, regs->cp0_epc, regs->regs[31]); 750 751 return 0; 752 } 753 754 struct mips_abi mips_abi = { 755 #ifdef CONFIG_TRAD_SIGNALS 756 .setup_frame = setup_frame, 757 #endif 758 .setup_rt_frame = setup_rt_frame, 759 .restart = __NR_restart_syscall, 760 761 .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), 762 .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), 763 .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), 764 765 .vdso = &vdso_image, 766 }; 767 768 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 769 { 770 sigset_t *oldset = sigmask_to_save(); 771 int ret; 772 struct mips_abi *abi = current->thread.abi; 773 void *vdso = current->mm->context.vdso; 774 775 /* 776 * If we were emulating a delay slot instruction, exit that frame such 777 * that addresses in the sigframe are as expected for userland and we 778 * don't have a problem if we reuse the thread's frame for an 779 * instruction within the signal handler. 780 */ 781 dsemul_thread_rollback(regs); 782 783 if (regs->regs[0]) { 784 switch(regs->regs[2]) { 785 case ERESTART_RESTARTBLOCK: 786 case ERESTARTNOHAND: 787 regs->regs[2] = EINTR; 788 break; 789 case ERESTARTSYS: 790 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { 791 regs->regs[2] = EINTR; 792 break; 793 } 794 /* fallthrough */ 795 case ERESTARTNOINTR: 796 regs->regs[7] = regs->regs[26]; 797 regs->regs[2] = regs->regs[0]; 798 regs->cp0_epc -= 4; 799 } 800 801 regs->regs[0] = 0; /* Don't deal with this again. */ 802 } 803 804 rseq_signal_deliver(regs); 805 806 if (sig_uses_siginfo(&ksig->ka, abi)) 807 ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, 808 ksig, regs, oldset); 809 else 810 ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn, 811 ksig, regs, oldset); 812 813 signal_setup_done(ret, ksig, 0); 814 } 815 816 static void do_signal(struct pt_regs *regs) 817 { 818 struct ksignal ksig; 819 820 if (get_signal(&ksig)) { 821 /* Whee! Actually deliver the signal. */ 822 handle_signal(&ksig, regs); 823 return; 824 } 825 826 if (regs->regs[0]) { 827 switch (regs->regs[2]) { 828 case ERESTARTNOHAND: 829 case ERESTARTSYS: 830 case ERESTARTNOINTR: 831 regs->regs[2] = regs->regs[0]; 832 regs->regs[7] = regs->regs[26]; 833 regs->cp0_epc -= 4; 834 break; 835 836 case ERESTART_RESTARTBLOCK: 837 regs->regs[2] = current->thread.abi->restart; 838 regs->regs[7] = regs->regs[26]; 839 regs->cp0_epc -= 4; 840 break; 841 } 842 regs->regs[0] = 0; /* Don't deal with this again. */ 843 } 844 845 /* 846 * If there's no signal to deliver, we just put the saved sigmask 847 * back 848 */ 849 restore_saved_sigmask(); 850 } 851 852 /* 853 * notification of userspace execution resumption 854 * - triggered by the TIF_WORK_MASK flags 855 */ 856 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 857 __u32 thread_info_flags) 858 { 859 local_irq_enable(); 860 861 user_exit(); 862 863 if (thread_info_flags & _TIF_UPROBE) 864 uprobe_notify_resume(regs); 865 866 /* deal with pending signal delivery */ 867 if (thread_info_flags & _TIF_SIGPENDING) 868 do_signal(regs); 869 870 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 871 clear_thread_flag(TIF_NOTIFY_RESUME); 872 tracehook_notify_resume(regs); 873 rseq_handle_notify_resume(regs); 874 } 875 876 user_enter(); 877 } 878 879 #ifdef CONFIG_SMP 880 static int smp_save_fp_context(void __user *sc) 881 { 882 return raw_cpu_has_fpu 883 ? save_hw_fp_context(sc) 884 : copy_fp_to_sigcontext(sc); 885 } 886 887 static int smp_restore_fp_context(void __user *sc) 888 { 889 return raw_cpu_has_fpu 890 ? restore_hw_fp_context(sc) 891 : copy_fp_from_sigcontext(sc); 892 } 893 #endif 894 895 static int signal_setup(void) 896 { 897 /* 898 * The offset from sigcontext to extended context should be the same 899 * regardless of the type of signal, such that userland can always know 900 * where to look if it wishes to find the extended context structures. 901 */ 902 BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) - 903 offsetof(struct sigframe, sf_sc)) != 904 (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - 905 offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); 906 907 #ifdef CONFIG_SMP 908 /* For now just do the cpu_has_fpu check when the functions are invoked */ 909 save_fp_context = smp_save_fp_context; 910 restore_fp_context = smp_restore_fp_context; 911 #else 912 if (cpu_has_fpu) { 913 save_fp_context = save_hw_fp_context; 914 restore_fp_context = restore_hw_fp_context; 915 } else { 916 save_fp_context = copy_fp_to_sigcontext; 917 restore_fp_context = copy_fp_from_sigcontext; 918 } 919 #endif /* CONFIG_SMP */ 920 921 return 0; 922 } 923 924 arch_initcall(signal_setup); 925