1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2014, Imagination Technologies Ltd. 10 */ 11 #include <linux/cache.h> 12 #include <linux/context_tracking.h> 13 #include <linux/irqflags.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/personality.h> 17 #include <linux/smp.h> 18 #include <linux/kernel.h> 19 #include <linux/signal.h> 20 #include <linux/errno.h> 21 #include <linux/wait.h> 22 #include <linux/ptrace.h> 23 #include <linux/unistd.h> 24 #include <linux/uprobes.h> 25 #include <linux/compiler.h> 26 #include <linux/syscalls.h> 27 #include <linux/uaccess.h> 28 #include <linux/tracehook.h> 29 30 #include <asm/abi.h> 31 #include <asm/asm.h> 32 #include <linux/bitops.h> 33 #include <asm/cacheflush.h> 34 #include <asm/fpu.h> 35 #include <asm/sim.h> 36 #include <asm/ucontext.h> 37 #include <asm/cpu-features.h> 38 #include <asm/war.h> 39 #include <asm/dsp.h> 40 #include <asm/inst.h> 41 #include <asm/msa.h> 42 43 #include "signal-common.h" 44 45 static int (*save_fp_context)(void __user *sc); 46 static int (*restore_fp_context)(void __user *sc); 47 48 struct sigframe { 49 u32 sf_ass[4]; /* argument save space for o32 */ 50 u32 sf_pad[2]; /* Was: signal trampoline */ 51 52 /* Matches struct ucontext from its uc_mcontext field onwards */ 53 struct sigcontext sf_sc; 54 sigset_t sf_mask; 55 unsigned long long sf_extcontext[0]; 56 }; 57 58 struct rt_sigframe { 59 u32 rs_ass[4]; /* argument save space for o32 */ 60 u32 rs_pad[2]; /* Was: signal trampoline */ 61 struct siginfo rs_info; 62 struct ucontext rs_uc; 63 }; 64 65 /* 66 * Thread saved context copy to/from a signal context presumed to be on the 67 * user stack, and therefore accessed with appropriate macros from uaccess.h. 68 */ 69 static int copy_fp_to_sigcontext(void __user *sc) 70 { 71 struct mips_abi *abi = current->thread.abi; 72 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 73 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 74 int i; 75 int err = 0; 76 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; 77 78 for (i = 0; i < NUM_FPU_REGS; i += inc) { 79 err |= 80 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), 81 &fpregs[i]); 82 } 83 err |= __put_user(current->thread.fpu.fcr31, csr); 84 85 return err; 86 } 87 88 static int copy_fp_from_sigcontext(void __user *sc) 89 { 90 struct mips_abi *abi = current->thread.abi; 91 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 92 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 93 int i; 94 int err = 0; 95 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; 96 u64 fpr_val; 97 98 for (i = 0; i < NUM_FPU_REGS; i += inc) { 99 err |= __get_user(fpr_val, &fpregs[i]); 100 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); 101 } 102 err |= __get_user(current->thread.fpu.fcr31, csr); 103 104 return err; 105 } 106 107 /* 108 * Wrappers for the assembly _{save,restore}_fp_context functions. 109 */ 110 static int save_hw_fp_context(void __user *sc) 111 { 112 struct mips_abi *abi = current->thread.abi; 113 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 114 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 115 116 return _save_fp_context(fpregs, csr); 117 } 118 119 static int restore_hw_fp_context(void __user *sc) 120 { 121 struct mips_abi *abi = current->thread.abi; 122 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 123 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 124 125 return _restore_fp_context(fpregs, csr); 126 } 127 128 /* 129 * Extended context handling. 130 */ 131 132 static inline void __user *sc_to_extcontext(void __user *sc) 133 { 134 struct ucontext __user *uc; 135 136 /* 137 * We can just pretend the sigcontext is always embedded in a struct 138 * ucontext here, because the offset from sigcontext to extended 139 * context is the same in the struct sigframe case. 140 */ 141 uc = container_of(sc, struct ucontext, uc_mcontext); 142 return &uc->uc_extcontext; 143 } 144 145 static int save_msa_extcontext(void __user *buf) 146 { 147 struct msa_extcontext __user *msa = buf; 148 uint64_t val; 149 int i, err; 150 151 if (!thread_msa_context_live()) 152 return 0; 153 154 /* 155 * Ensure that we can't lose the live MSA context between checking 156 * for it & writing it to memory. 157 */ 158 preempt_disable(); 159 160 if (is_msa_enabled()) { 161 /* 162 * There are no EVA versions of the vector register load/store 163 * instructions, so MSA context has to be saved to kernel memory 164 * and then copied to user memory. The save to kernel memory 165 * should already have been done when handling scalar FP 166 * context. 167 */ 168 BUG_ON(IS_ENABLED(CONFIG_EVA)); 169 170 err = __put_user(read_msa_csr(), &msa->csr); 171 err |= _save_msa_all_upper(&msa->wr); 172 173 preempt_enable(); 174 } else { 175 preempt_enable(); 176 177 err = __put_user(current->thread.fpu.msacsr, &msa->csr); 178 179 for (i = 0; i < NUM_FPU_REGS; i++) { 180 val = get_fpr64(¤t->thread.fpu.fpr[i], 1); 181 err |= __put_user(val, &msa->wr[i]); 182 } 183 } 184 185 err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic); 186 err |= __put_user(sizeof(*msa), &msa->ext.size); 187 188 return err ? -EFAULT : sizeof(*msa); 189 } 190 191 static int restore_msa_extcontext(void __user *buf, unsigned int size) 192 { 193 struct msa_extcontext __user *msa = buf; 194 unsigned long long val; 195 unsigned int csr; 196 int i, err; 197 198 if (!IS_ENABLED(CONFIG_CPU_HAS_MSA)) 199 return SIGSYS; 200 201 if (size != sizeof(*msa)) 202 return -EINVAL; 203 204 err = get_user(csr, &msa->csr); 205 if (err) 206 return err; 207 208 preempt_disable(); 209 210 if (is_msa_enabled()) { 211 /* 212 * There are no EVA versions of the vector register load/store 213 * instructions, so MSA context has to be copied to kernel 214 * memory and later loaded to registers. The same is true of 215 * scalar FP context, so FPU & MSA should have already been 216 * disabled whilst handling scalar FP context. 217 */ 218 BUG_ON(IS_ENABLED(CONFIG_EVA)); 219 220 write_msa_csr(csr); 221 err |= _restore_msa_all_upper(&msa->wr); 222 preempt_enable(); 223 } else { 224 preempt_enable(); 225 226 current->thread.fpu.msacsr = csr; 227 228 for (i = 0; i < NUM_FPU_REGS; i++) { 229 err |= __get_user(val, &msa->wr[i]); 230 set_fpr64(¤t->thread.fpu.fpr[i], 1, val); 231 } 232 } 233 234 return err; 235 } 236 237 static int save_extcontext(void __user *buf) 238 { 239 int sz; 240 241 sz = save_msa_extcontext(buf); 242 if (sz < 0) 243 return sz; 244 buf += sz; 245 246 /* If no context was saved then trivially return */ 247 if (!sz) 248 return 0; 249 250 /* Write the end marker */ 251 if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf)) 252 return -EFAULT; 253 254 sz += sizeof(((struct extcontext *)NULL)->magic); 255 return sz; 256 } 257 258 static int restore_extcontext(void __user *buf) 259 { 260 struct extcontext ext; 261 int err; 262 263 while (1) { 264 err = __get_user(ext.magic, (unsigned int *)buf); 265 if (err) 266 return err; 267 268 if (ext.magic == END_EXTCONTEXT_MAGIC) 269 return 0; 270 271 err = __get_user(ext.size, (unsigned int *)(buf 272 + offsetof(struct extcontext, size))); 273 if (err) 274 return err; 275 276 switch (ext.magic) { 277 case MSA_EXTCONTEXT_MAGIC: 278 err = restore_msa_extcontext(buf, ext.size); 279 break; 280 281 default: 282 err = -EINVAL; 283 break; 284 } 285 286 if (err) 287 return err; 288 289 buf += ext.size; 290 } 291 } 292 293 /* 294 * Helper routines 295 */ 296 int protected_save_fp_context(void __user *sc) 297 { 298 struct mips_abi *abi = current->thread.abi; 299 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 300 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 301 uint32_t __user *used_math = sc + abi->off_sc_used_math; 302 unsigned int used, ext_sz; 303 int err; 304 305 used = used_math() ? USED_FP : 0; 306 if (!used) 307 goto fp_done; 308 309 if (!test_thread_flag(TIF_32BIT_FPREGS)) 310 used |= USED_FR1; 311 if (test_thread_flag(TIF_HYBRID_FPREGS)) 312 used |= USED_HYBRID_FPRS; 313 314 /* 315 * EVA does not have userland equivalents of ldc1 or sdc1, so 316 * save to the kernel FP context & copy that to userland below. 317 */ 318 if (IS_ENABLED(CONFIG_EVA)) 319 lose_fpu(1); 320 321 while (1) { 322 lock_fpu_owner(); 323 if (is_fpu_owner()) { 324 err = save_fp_context(sc); 325 unlock_fpu_owner(); 326 } else { 327 unlock_fpu_owner(); 328 err = copy_fp_to_sigcontext(sc); 329 } 330 if (likely(!err)) 331 break; 332 /* touch the sigcontext and try again */ 333 err = __put_user(0, &fpregs[0]) | 334 __put_user(0, &fpregs[31]) | 335 __put_user(0, csr); 336 if (err) 337 return err; /* really bad sigcontext */ 338 } 339 340 fp_done: 341 ext_sz = err = save_extcontext(sc_to_extcontext(sc)); 342 if (err < 0) 343 return err; 344 used |= ext_sz ? USED_EXTCONTEXT : 0; 345 346 return __put_user(used, used_math); 347 } 348 349 int protected_restore_fp_context(void __user *sc) 350 { 351 struct mips_abi *abi = current->thread.abi; 352 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 353 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 354 uint32_t __user *used_math = sc + abi->off_sc_used_math; 355 unsigned int used; 356 int err, sig = 0, tmp __maybe_unused; 357 358 err = __get_user(used, used_math); 359 conditional_used_math(used & USED_FP); 360 361 /* 362 * The signal handler may have used FPU; give it up if the program 363 * doesn't want it following sigreturn. 364 */ 365 if (err || !(used & USED_FP)) 366 lose_fpu(0); 367 if (err) 368 return err; 369 if (!(used & USED_FP)) 370 goto fp_done; 371 372 err = sig = fpcsr_pending(csr); 373 if (err < 0) 374 return err; 375 376 /* 377 * EVA does not have userland equivalents of ldc1 or sdc1, so we 378 * disable the FPU here such that the code below simply copies to 379 * the kernel FP context. 380 */ 381 if (IS_ENABLED(CONFIG_EVA)) 382 lose_fpu(0); 383 384 while (1) { 385 lock_fpu_owner(); 386 if (is_fpu_owner()) { 387 err = restore_fp_context(sc); 388 unlock_fpu_owner(); 389 } else { 390 unlock_fpu_owner(); 391 err = copy_fp_from_sigcontext(sc); 392 } 393 if (likely(!err)) 394 break; 395 /* touch the sigcontext and try again */ 396 err = __get_user(tmp, &fpregs[0]) | 397 __get_user(tmp, &fpregs[31]) | 398 __get_user(tmp, csr); 399 if (err) 400 break; /* really bad sigcontext */ 401 } 402 403 fp_done: 404 if (!err && (used & USED_EXTCONTEXT)) 405 err = restore_extcontext(sc_to_extcontext(sc)); 406 407 return err ?: sig; 408 } 409 410 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 411 { 412 int err = 0; 413 int i; 414 415 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 416 417 err |= __put_user(0, &sc->sc_regs[0]); 418 for (i = 1; i < 32; i++) 419 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 420 421 #ifdef CONFIG_CPU_HAS_SMARTMIPS 422 err |= __put_user(regs->acx, &sc->sc_acx); 423 #endif 424 err |= __put_user(regs->hi, &sc->sc_mdhi); 425 err |= __put_user(regs->lo, &sc->sc_mdlo); 426 if (cpu_has_dsp) { 427 err |= __put_user(mfhi1(), &sc->sc_hi1); 428 err |= __put_user(mflo1(), &sc->sc_lo1); 429 err |= __put_user(mfhi2(), &sc->sc_hi2); 430 err |= __put_user(mflo2(), &sc->sc_lo2); 431 err |= __put_user(mfhi3(), &sc->sc_hi3); 432 err |= __put_user(mflo3(), &sc->sc_lo3); 433 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 434 } 435 436 437 /* 438 * Save FPU state to signal context. Signal handler 439 * will "inherit" current FPU state. 440 */ 441 err |= protected_save_fp_context(sc); 442 443 return err; 444 } 445 446 static size_t extcontext_max_size(void) 447 { 448 size_t sz = 0; 449 450 /* 451 * The assumption here is that between this point & the point at which 452 * the extended context is saved the size of the context should only 453 * ever be able to shrink (if the task is preempted), but never grow. 454 * That is, what this function returns is an upper bound on the size of 455 * the extended context for the current task at the current time. 456 */ 457 458 if (thread_msa_context_live()) 459 sz += sizeof(struct msa_extcontext); 460 461 /* If any context is saved then we'll append the end marker */ 462 if (sz) 463 sz += sizeof(((struct extcontext *)NULL)->magic); 464 465 return sz; 466 } 467 468 int fpcsr_pending(unsigned int __user *fpcsr) 469 { 470 int err, sig = 0; 471 unsigned int csr, enabled; 472 473 err = __get_user(csr, fpcsr); 474 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 475 /* 476 * If the signal handler set some FPU exceptions, clear it and 477 * send SIGFPE. 478 */ 479 if (csr & enabled) { 480 csr &= ~enabled; 481 err |= __put_user(csr, fpcsr); 482 sig = SIGFPE; 483 } 484 return err ?: sig; 485 } 486 487 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 488 { 489 unsigned long treg; 490 int err = 0; 491 int i; 492 493 /* Always make any pending restarted system calls return -EINTR */ 494 current->restart_block.fn = do_no_restart_syscall; 495 496 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 497 498 #ifdef CONFIG_CPU_HAS_SMARTMIPS 499 err |= __get_user(regs->acx, &sc->sc_acx); 500 #endif 501 err |= __get_user(regs->hi, &sc->sc_mdhi); 502 err |= __get_user(regs->lo, &sc->sc_mdlo); 503 if (cpu_has_dsp) { 504 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 505 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 506 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 507 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 508 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 509 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 510 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 511 } 512 513 for (i = 1; i < 32; i++) 514 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 515 516 return err ?: protected_restore_fp_context(sc); 517 } 518 519 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, 520 size_t frame_size) 521 { 522 unsigned long sp; 523 524 /* Leave space for potential extended context */ 525 frame_size += extcontext_max_size(); 526 527 /* Default to using normal stack */ 528 sp = regs->regs[29]; 529 530 /* 531 * FPU emulator may have it's own trampoline active just 532 * above the user stack, 16-bytes before the next lowest 533 * 16 byte boundary. Try to avoid trashing it. 534 */ 535 sp -= 32; 536 537 sp = sigsp(sp, ksig); 538 539 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 540 } 541 542 /* 543 * Atomically swap in the new signal mask, and wait for a signal. 544 */ 545 546 #ifdef CONFIG_TRAD_SIGNALS 547 SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) 548 { 549 return sys_rt_sigsuspend(uset, sizeof(sigset_t)); 550 } 551 #endif 552 553 #ifdef CONFIG_TRAD_SIGNALS 554 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 555 struct sigaction __user *, oact) 556 { 557 struct k_sigaction new_ka, old_ka; 558 int ret; 559 int err = 0; 560 561 if (act) { 562 old_sigset_t mask; 563 564 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 565 return -EFAULT; 566 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 567 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 568 err |= __get_user(mask, &act->sa_mask.sig[0]); 569 if (err) 570 return -EFAULT; 571 572 siginitset(&new_ka.sa.sa_mask, mask); 573 } 574 575 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 576 577 if (!ret && oact) { 578 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 579 return -EFAULT; 580 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 581 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 582 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 583 err |= __put_user(0, &oact->sa_mask.sig[1]); 584 err |= __put_user(0, &oact->sa_mask.sig[2]); 585 err |= __put_user(0, &oact->sa_mask.sig[3]); 586 if (err) 587 return -EFAULT; 588 } 589 590 return ret; 591 } 592 #endif 593 594 #ifdef CONFIG_TRAD_SIGNALS 595 asmlinkage void sys_sigreturn(void) 596 { 597 struct sigframe __user *frame; 598 struct pt_regs *regs; 599 sigset_t blocked; 600 int sig; 601 602 regs = current_pt_regs(); 603 frame = (struct sigframe __user *)regs->regs[29]; 604 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 605 goto badframe; 606 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 607 goto badframe; 608 609 set_current_blocked(&blocked); 610 611 sig = restore_sigcontext(regs, &frame->sf_sc); 612 if (sig < 0) 613 goto badframe; 614 else if (sig) 615 force_sig(sig, current); 616 617 /* 618 * Don't let your children do this ... 619 */ 620 __asm__ __volatile__( 621 "move\t$29, %0\n\t" 622 "j\tsyscall_exit" 623 : /* no outputs */ 624 : "r" (regs)); 625 /* Unreached */ 626 627 badframe: 628 force_sig(SIGSEGV, current); 629 } 630 #endif /* CONFIG_TRAD_SIGNALS */ 631 632 asmlinkage void sys_rt_sigreturn(void) 633 { 634 struct rt_sigframe __user *frame; 635 struct pt_regs *regs; 636 sigset_t set; 637 int sig; 638 639 regs = current_pt_regs(); 640 frame = (struct rt_sigframe __user *)regs->regs[29]; 641 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 642 goto badframe; 643 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 644 goto badframe; 645 646 set_current_blocked(&set); 647 648 sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); 649 if (sig < 0) 650 goto badframe; 651 else if (sig) 652 force_sig(sig, current); 653 654 if (restore_altstack(&frame->rs_uc.uc_stack)) 655 goto badframe; 656 657 /* 658 * Don't let your children do this ... 659 */ 660 __asm__ __volatile__( 661 "move\t$29, %0\n\t" 662 "j\tsyscall_exit" 663 : /* no outputs */ 664 : "r" (regs)); 665 /* Unreached */ 666 667 badframe: 668 force_sig(SIGSEGV, current); 669 } 670 671 #ifdef CONFIG_TRAD_SIGNALS 672 static int setup_frame(void *sig_return, struct ksignal *ksig, 673 struct pt_regs *regs, sigset_t *set) 674 { 675 struct sigframe __user *frame; 676 int err = 0; 677 678 frame = get_sigframe(ksig, regs, sizeof(*frame)); 679 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 680 return -EFAULT; 681 682 err |= setup_sigcontext(regs, &frame->sf_sc); 683 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 684 if (err) 685 return -EFAULT; 686 687 /* 688 * Arguments to signal handler: 689 * 690 * a0 = signal number 691 * a1 = 0 (should be cause) 692 * a2 = pointer to struct sigcontext 693 * 694 * $25 and c0_epc point to the signal handler, $29 points to the 695 * struct sigframe. 696 */ 697 regs->regs[ 4] = ksig->sig; 698 regs->regs[ 5] = 0; 699 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 700 regs->regs[29] = (unsigned long) frame; 701 regs->regs[31] = (unsigned long) sig_return; 702 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 703 704 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 705 current->comm, current->pid, 706 frame, regs->cp0_epc, regs->regs[31]); 707 return 0; 708 } 709 #endif 710 711 static int setup_rt_frame(void *sig_return, struct ksignal *ksig, 712 struct pt_regs *regs, sigset_t *set) 713 { 714 struct rt_sigframe __user *frame; 715 int err = 0; 716 717 frame = get_sigframe(ksig, regs, sizeof(*frame)); 718 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 719 return -EFAULT; 720 721 /* Create siginfo. */ 722 err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); 723 724 /* Create the ucontext. */ 725 err |= __put_user(0, &frame->rs_uc.uc_flags); 726 err |= __put_user(NULL, &frame->rs_uc.uc_link); 727 err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); 728 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 729 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 730 731 if (err) 732 return -EFAULT; 733 734 /* 735 * Arguments to signal handler: 736 * 737 * a0 = signal number 738 * a1 = 0 (should be cause) 739 * a2 = pointer to ucontext 740 * 741 * $25 and c0_epc point to the signal handler, $29 points to 742 * the struct rt_sigframe. 743 */ 744 regs->regs[ 4] = ksig->sig; 745 regs->regs[ 5] = (unsigned long) &frame->rs_info; 746 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 747 regs->regs[29] = (unsigned long) frame; 748 regs->regs[31] = (unsigned long) sig_return; 749 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 750 751 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 752 current->comm, current->pid, 753 frame, regs->cp0_epc, regs->regs[31]); 754 755 return 0; 756 } 757 758 struct mips_abi mips_abi = { 759 #ifdef CONFIG_TRAD_SIGNALS 760 .setup_frame = setup_frame, 761 #endif 762 .setup_rt_frame = setup_rt_frame, 763 .restart = __NR_restart_syscall, 764 765 .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), 766 .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), 767 .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), 768 769 .vdso = &vdso_image, 770 }; 771 772 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 773 { 774 sigset_t *oldset = sigmask_to_save(); 775 int ret; 776 struct mips_abi *abi = current->thread.abi; 777 void *vdso = current->mm->context.vdso; 778 779 /* 780 * If we were emulating a delay slot instruction, exit that frame such 781 * that addresses in the sigframe are as expected for userland and we 782 * don't have a problem if we reuse the thread's frame for an 783 * instruction within the signal handler. 784 */ 785 dsemul_thread_rollback(regs); 786 787 if (regs->regs[0]) { 788 switch(regs->regs[2]) { 789 case ERESTART_RESTARTBLOCK: 790 case ERESTARTNOHAND: 791 regs->regs[2] = EINTR; 792 break; 793 case ERESTARTSYS: 794 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { 795 regs->regs[2] = EINTR; 796 break; 797 } 798 /* fallthrough */ 799 case ERESTARTNOINTR: 800 regs->regs[7] = regs->regs[26]; 801 regs->regs[2] = regs->regs[0]; 802 regs->cp0_epc -= 4; 803 } 804 805 regs->regs[0] = 0; /* Don't deal with this again. */ 806 } 807 808 rseq_signal_deliver(ksig, regs); 809 810 if (sig_uses_siginfo(&ksig->ka, abi)) 811 ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, 812 ksig, regs, oldset); 813 else 814 ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn, 815 ksig, regs, oldset); 816 817 signal_setup_done(ret, ksig, 0); 818 } 819 820 static void do_signal(struct pt_regs *regs) 821 { 822 struct ksignal ksig; 823 824 if (get_signal(&ksig)) { 825 /* Whee! Actually deliver the signal. */ 826 handle_signal(&ksig, regs); 827 return; 828 } 829 830 if (regs->regs[0]) { 831 switch (regs->regs[2]) { 832 case ERESTARTNOHAND: 833 case ERESTARTSYS: 834 case ERESTARTNOINTR: 835 regs->regs[2] = regs->regs[0]; 836 regs->regs[7] = regs->regs[26]; 837 regs->cp0_epc -= 4; 838 break; 839 840 case ERESTART_RESTARTBLOCK: 841 regs->regs[2] = current->thread.abi->restart; 842 regs->regs[7] = regs->regs[26]; 843 regs->cp0_epc -= 4; 844 break; 845 } 846 regs->regs[0] = 0; /* Don't deal with this again. */ 847 } 848 849 /* 850 * If there's no signal to deliver, we just put the saved sigmask 851 * back 852 */ 853 restore_saved_sigmask(); 854 } 855 856 /* 857 * notification of userspace execution resumption 858 * - triggered by the TIF_WORK_MASK flags 859 */ 860 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 861 __u32 thread_info_flags) 862 { 863 local_irq_enable(); 864 865 user_exit(); 866 867 if (thread_info_flags & _TIF_UPROBE) 868 uprobe_notify_resume(regs); 869 870 /* deal with pending signal delivery */ 871 if (thread_info_flags & _TIF_SIGPENDING) 872 do_signal(regs); 873 874 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 875 clear_thread_flag(TIF_NOTIFY_RESUME); 876 tracehook_notify_resume(regs); 877 rseq_handle_notify_resume(NULL, regs); 878 } 879 880 user_enter(); 881 } 882 883 #ifdef CONFIG_SMP 884 static int smp_save_fp_context(void __user *sc) 885 { 886 return raw_cpu_has_fpu 887 ? save_hw_fp_context(sc) 888 : copy_fp_to_sigcontext(sc); 889 } 890 891 static int smp_restore_fp_context(void __user *sc) 892 { 893 return raw_cpu_has_fpu 894 ? restore_hw_fp_context(sc) 895 : copy_fp_from_sigcontext(sc); 896 } 897 #endif 898 899 static int signal_setup(void) 900 { 901 /* 902 * The offset from sigcontext to extended context should be the same 903 * regardless of the type of signal, such that userland can always know 904 * where to look if it wishes to find the extended context structures. 905 */ 906 BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) - 907 offsetof(struct sigframe, sf_sc)) != 908 (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - 909 offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); 910 911 #ifdef CONFIG_SMP 912 /* For now just do the cpu_has_fpu check when the functions are invoked */ 913 save_fp_context = smp_save_fp_context; 914 restore_fp_context = smp_restore_fp_context; 915 #else 916 if (cpu_has_fpu) { 917 save_fp_context = save_hw_fp_context; 918 restore_fp_context = restore_hw_fp_context; 919 } else { 920 save_fp_context = copy_fp_to_sigcontext; 921 restore_fp_context = copy_fp_from_sigcontext; 922 } 923 #endif /* CONFIG_SMP */ 924 925 return 0; 926 } 927 928 arch_initcall(signal_setup); 929