1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2014, Imagination Technologies Ltd. 10 */ 11 #include <linux/cache.h> 12 #include <linux/context_tracking.h> 13 #include <linux/irqflags.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/personality.h> 17 #include <linux/smp.h> 18 #include <linux/kernel.h> 19 #include <linux/signal.h> 20 #include <linux/errno.h> 21 #include <linux/wait.h> 22 #include <linux/ptrace.h> 23 #include <linux/unistd.h> 24 #include <linux/uprobes.h> 25 #include <linux/compiler.h> 26 #include <linux/syscalls.h> 27 #include <linux/uaccess.h> 28 #include <linux/tracehook.h> 29 30 #include <asm/abi.h> 31 #include <asm/asm.h> 32 #include <linux/bitops.h> 33 #include <asm/cacheflush.h> 34 #include <asm/fpu.h> 35 #include <asm/sim.h> 36 #include <asm/ucontext.h> 37 #include <asm/cpu-features.h> 38 #include <asm/war.h> 39 #include <asm/vdso.h> 40 #include <asm/dsp.h> 41 #include <asm/inst.h> 42 #include <asm/msa.h> 43 44 #include "signal-common.h" 45 46 static int (*save_fp_context)(void __user *sc); 47 static int (*restore_fp_context)(void __user *sc); 48 49 struct sigframe { 50 u32 sf_ass[4]; /* argument save space for o32 */ 51 u32 sf_pad[2]; /* Was: signal trampoline */ 52 53 /* Matches struct ucontext from its uc_mcontext field onwards */ 54 struct sigcontext sf_sc; 55 sigset_t sf_mask; 56 unsigned long long sf_extcontext[0]; 57 }; 58 59 struct rt_sigframe { 60 u32 rs_ass[4]; /* argument save space for o32 */ 61 u32 rs_pad[2]; /* Was: signal trampoline */ 62 struct siginfo rs_info; 63 struct ucontext rs_uc; 64 }; 65 66 /* 67 * Thread saved context copy to/from a signal context presumed to be on the 68 * user stack, and therefore accessed with appropriate macros from uaccess.h. 69 */ 70 static int copy_fp_to_sigcontext(void __user *sc) 71 { 72 struct mips_abi *abi = current->thread.abi; 73 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 74 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 75 int i; 76 int err = 0; 77 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; 78 79 for (i = 0; i < NUM_FPU_REGS; i += inc) { 80 err |= 81 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), 82 &fpregs[i]); 83 } 84 err |= __put_user(current->thread.fpu.fcr31, csr); 85 86 return err; 87 } 88 89 static int copy_fp_from_sigcontext(void __user *sc) 90 { 91 struct mips_abi *abi = current->thread.abi; 92 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 93 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 94 int i; 95 int err = 0; 96 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; 97 u64 fpr_val; 98 99 for (i = 0; i < NUM_FPU_REGS; i += inc) { 100 err |= __get_user(fpr_val, &fpregs[i]); 101 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); 102 } 103 err |= __get_user(current->thread.fpu.fcr31, csr); 104 105 return err; 106 } 107 108 /* 109 * Wrappers for the assembly _{save,restore}_fp_context functions. 110 */ 111 static int save_hw_fp_context(void __user *sc) 112 { 113 struct mips_abi *abi = current->thread.abi; 114 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 115 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 116 117 return _save_fp_context(fpregs, csr); 118 } 119 120 static int restore_hw_fp_context(void __user *sc) 121 { 122 struct mips_abi *abi = current->thread.abi; 123 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 124 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 125 126 return _restore_fp_context(fpregs, csr); 127 } 128 129 /* 130 * Extended context handling. 131 */ 132 133 static inline void __user *sc_to_extcontext(void __user *sc) 134 { 135 struct ucontext __user *uc; 136 137 /* 138 * We can just pretend the sigcontext is always embedded in a struct 139 * ucontext here, because the offset from sigcontext to extended 140 * context is the same in the struct sigframe case. 141 */ 142 uc = container_of(sc, struct ucontext, uc_mcontext); 143 return &uc->uc_extcontext; 144 } 145 146 static int save_msa_extcontext(void __user *buf) 147 { 148 struct msa_extcontext __user *msa = buf; 149 uint64_t val; 150 int i, err; 151 152 if (!thread_msa_context_live()) 153 return 0; 154 155 /* 156 * Ensure that we can't lose the live MSA context between checking 157 * for it & writing it to memory. 158 */ 159 preempt_disable(); 160 161 if (is_msa_enabled()) { 162 /* 163 * There are no EVA versions of the vector register load/store 164 * instructions, so MSA context has to be saved to kernel memory 165 * and then copied to user memory. The save to kernel memory 166 * should already have been done when handling scalar FP 167 * context. 168 */ 169 BUG_ON(config_enabled(CONFIG_EVA)); 170 171 err = __put_user(read_msa_csr(), &msa->csr); 172 err |= _save_msa_all_upper(&msa->wr); 173 174 preempt_enable(); 175 } else { 176 preempt_enable(); 177 178 err = __put_user(current->thread.fpu.msacsr, &msa->csr); 179 180 for (i = 0; i < NUM_FPU_REGS; i++) { 181 val = get_fpr64(¤t->thread.fpu.fpr[i], 1); 182 err |= __put_user(val, &msa->wr[i]); 183 } 184 } 185 186 err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic); 187 err |= __put_user(sizeof(*msa), &msa->ext.size); 188 189 return err ? -EFAULT : sizeof(*msa); 190 } 191 192 static int restore_msa_extcontext(void __user *buf, unsigned int size) 193 { 194 struct msa_extcontext __user *msa = buf; 195 unsigned long long val; 196 unsigned int csr; 197 int i, err; 198 199 if (size != sizeof(*msa)) 200 return -EINVAL; 201 202 err = get_user(csr, &msa->csr); 203 if (err) 204 return err; 205 206 preempt_disable(); 207 208 if (is_msa_enabled()) { 209 /* 210 * There are no EVA versions of the vector register load/store 211 * instructions, so MSA context has to be copied to kernel 212 * memory and later loaded to registers. The same is true of 213 * scalar FP context, so FPU & MSA should have already been 214 * disabled whilst handling scalar FP context. 215 */ 216 BUG_ON(config_enabled(CONFIG_EVA)); 217 218 write_msa_csr(csr); 219 err |= _restore_msa_all_upper(&msa->wr); 220 preempt_enable(); 221 } else { 222 preempt_enable(); 223 224 current->thread.fpu.msacsr = csr; 225 226 for (i = 0; i < NUM_FPU_REGS; i++) { 227 err |= __get_user(val, &msa->wr[i]); 228 set_fpr64(¤t->thread.fpu.fpr[i], 1, val); 229 } 230 } 231 232 return err; 233 } 234 235 static int save_extcontext(void __user *buf) 236 { 237 int sz; 238 239 sz = save_msa_extcontext(buf); 240 if (sz < 0) 241 return sz; 242 buf += sz; 243 244 /* If no context was saved then trivially return */ 245 if (!sz) 246 return 0; 247 248 /* Write the end marker */ 249 if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf)) 250 return -EFAULT; 251 252 sz += sizeof(((struct extcontext *)NULL)->magic); 253 return sz; 254 } 255 256 static int restore_extcontext(void __user *buf) 257 { 258 struct extcontext ext; 259 int err; 260 261 while (1) { 262 err = __get_user(ext.magic, (unsigned int *)buf); 263 if (err) 264 return err; 265 266 if (ext.magic == END_EXTCONTEXT_MAGIC) 267 return 0; 268 269 err = __get_user(ext.size, (unsigned int *)(buf 270 + offsetof(struct extcontext, size))); 271 if (err) 272 return err; 273 274 switch (ext.magic) { 275 case MSA_EXTCONTEXT_MAGIC: 276 err = restore_msa_extcontext(buf, ext.size); 277 break; 278 279 default: 280 err = -EINVAL; 281 break; 282 } 283 284 if (err) 285 return err; 286 287 buf += ext.size; 288 } 289 } 290 291 /* 292 * Helper routines 293 */ 294 int protected_save_fp_context(void __user *sc) 295 { 296 struct mips_abi *abi = current->thread.abi; 297 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 298 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 299 uint32_t __user *used_math = sc + abi->off_sc_used_math; 300 unsigned int used, ext_sz; 301 int err; 302 303 used = used_math() ? USED_FP : 0; 304 if (!used) 305 goto fp_done; 306 307 if (!test_thread_flag(TIF_32BIT_FPREGS)) 308 used |= USED_FR1; 309 if (test_thread_flag(TIF_HYBRID_FPREGS)) 310 used |= USED_HYBRID_FPRS; 311 312 /* 313 * EVA does not have userland equivalents of ldc1 or sdc1, so 314 * save to the kernel FP context & copy that to userland below. 315 */ 316 if (config_enabled(CONFIG_EVA)) 317 lose_fpu(1); 318 319 while (1) { 320 lock_fpu_owner(); 321 if (is_fpu_owner()) { 322 err = save_fp_context(sc); 323 unlock_fpu_owner(); 324 } else { 325 unlock_fpu_owner(); 326 err = copy_fp_to_sigcontext(sc); 327 } 328 if (likely(!err)) 329 break; 330 /* touch the sigcontext and try again */ 331 err = __put_user(0, &fpregs[0]) | 332 __put_user(0, &fpregs[31]) | 333 __put_user(0, csr); 334 if (err) 335 return err; /* really bad sigcontext */ 336 } 337 338 fp_done: 339 ext_sz = err = save_extcontext(sc_to_extcontext(sc)); 340 if (err < 0) 341 return err; 342 used |= ext_sz ? USED_EXTCONTEXT : 0; 343 344 return __put_user(used, used_math); 345 } 346 347 int protected_restore_fp_context(void __user *sc) 348 { 349 struct mips_abi *abi = current->thread.abi; 350 uint64_t __user *fpregs = sc + abi->off_sc_fpregs; 351 uint32_t __user *csr = sc + abi->off_sc_fpc_csr; 352 uint32_t __user *used_math = sc + abi->off_sc_used_math; 353 unsigned int used; 354 int err, sig = 0, tmp __maybe_unused; 355 356 err = __get_user(used, used_math); 357 conditional_used_math(used & USED_FP); 358 359 /* 360 * The signal handler may have used FPU; give it up if the program 361 * doesn't want it following sigreturn. 362 */ 363 if (err || !(used & USED_FP)) 364 lose_fpu(0); 365 if (err) 366 return err; 367 if (!(used & USED_FP)) 368 goto fp_done; 369 370 err = sig = fpcsr_pending(csr); 371 if (err < 0) 372 return err; 373 374 /* 375 * EVA does not have userland equivalents of ldc1 or sdc1, so we 376 * disable the FPU here such that the code below simply copies to 377 * the kernel FP context. 378 */ 379 if (config_enabled(CONFIG_EVA)) 380 lose_fpu(0); 381 382 while (1) { 383 lock_fpu_owner(); 384 if (is_fpu_owner()) { 385 err = restore_fp_context(sc); 386 unlock_fpu_owner(); 387 } else { 388 unlock_fpu_owner(); 389 err = copy_fp_from_sigcontext(sc); 390 } 391 if (likely(!err)) 392 break; 393 /* touch the sigcontext and try again */ 394 err = __get_user(tmp, &fpregs[0]) | 395 __get_user(tmp, &fpregs[31]) | 396 __get_user(tmp, csr); 397 if (err) 398 break; /* really bad sigcontext */ 399 } 400 401 fp_done: 402 if (used & USED_EXTCONTEXT) 403 err |= restore_extcontext(sc_to_extcontext(sc)); 404 405 return err ?: sig; 406 } 407 408 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 409 { 410 int err = 0; 411 int i; 412 413 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 414 415 err |= __put_user(0, &sc->sc_regs[0]); 416 for (i = 1; i < 32; i++) 417 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 418 419 #ifdef CONFIG_CPU_HAS_SMARTMIPS 420 err |= __put_user(regs->acx, &sc->sc_acx); 421 #endif 422 err |= __put_user(regs->hi, &sc->sc_mdhi); 423 err |= __put_user(regs->lo, &sc->sc_mdlo); 424 if (cpu_has_dsp) { 425 err |= __put_user(mfhi1(), &sc->sc_hi1); 426 err |= __put_user(mflo1(), &sc->sc_lo1); 427 err |= __put_user(mfhi2(), &sc->sc_hi2); 428 err |= __put_user(mflo2(), &sc->sc_lo2); 429 err |= __put_user(mfhi3(), &sc->sc_hi3); 430 err |= __put_user(mflo3(), &sc->sc_lo3); 431 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 432 } 433 434 435 /* 436 * Save FPU state to signal context. Signal handler 437 * will "inherit" current FPU state. 438 */ 439 err |= protected_save_fp_context(sc); 440 441 return err; 442 } 443 444 static size_t extcontext_max_size(void) 445 { 446 size_t sz = 0; 447 448 /* 449 * The assumption here is that between this point & the point at which 450 * the extended context is saved the size of the context should only 451 * ever be able to shrink (if the task is preempted), but never grow. 452 * That is, what this function returns is an upper bound on the size of 453 * the extended context for the current task at the current time. 454 */ 455 456 if (thread_msa_context_live()) 457 sz += sizeof(struct msa_extcontext); 458 459 /* If any context is saved then we'll append the end marker */ 460 if (sz) 461 sz += sizeof(((struct extcontext *)NULL)->magic); 462 463 return sz; 464 } 465 466 int fpcsr_pending(unsigned int __user *fpcsr) 467 { 468 int err, sig = 0; 469 unsigned int csr, enabled; 470 471 err = __get_user(csr, fpcsr); 472 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 473 /* 474 * If the signal handler set some FPU exceptions, clear it and 475 * send SIGFPE. 476 */ 477 if (csr & enabled) { 478 csr &= ~enabled; 479 err |= __put_user(csr, fpcsr); 480 sig = SIGFPE; 481 } 482 return err ?: sig; 483 } 484 485 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 486 { 487 unsigned long treg; 488 int err = 0; 489 int i; 490 491 /* Always make any pending restarted system calls return -EINTR */ 492 current->restart_block.fn = do_no_restart_syscall; 493 494 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 495 496 #ifdef CONFIG_CPU_HAS_SMARTMIPS 497 err |= __get_user(regs->acx, &sc->sc_acx); 498 #endif 499 err |= __get_user(regs->hi, &sc->sc_mdhi); 500 err |= __get_user(regs->lo, &sc->sc_mdlo); 501 if (cpu_has_dsp) { 502 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 503 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 504 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 505 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 506 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 507 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 508 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 509 } 510 511 for (i = 1; i < 32; i++) 512 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 513 514 return err ?: protected_restore_fp_context(sc); 515 } 516 517 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, 518 size_t frame_size) 519 { 520 unsigned long sp; 521 522 /* Leave space for potential extended context */ 523 frame_size += extcontext_max_size(); 524 525 /* Default to using normal stack */ 526 sp = regs->regs[29]; 527 528 /* 529 * FPU emulator may have it's own trampoline active just 530 * above the user stack, 16-bytes before the next lowest 531 * 16 byte boundary. Try to avoid trashing it. 532 */ 533 sp -= 32; 534 535 sp = sigsp(sp, ksig); 536 537 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 538 } 539 540 /* 541 * Atomically swap in the new signal mask, and wait for a signal. 542 */ 543 544 #ifdef CONFIG_TRAD_SIGNALS 545 SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) 546 { 547 return sys_rt_sigsuspend(uset, sizeof(sigset_t)); 548 } 549 #endif 550 551 #ifdef CONFIG_TRAD_SIGNALS 552 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 553 struct sigaction __user *, oact) 554 { 555 struct k_sigaction new_ka, old_ka; 556 int ret; 557 int err = 0; 558 559 if (act) { 560 old_sigset_t mask; 561 562 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 563 return -EFAULT; 564 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 565 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 566 err |= __get_user(mask, &act->sa_mask.sig[0]); 567 if (err) 568 return -EFAULT; 569 570 siginitset(&new_ka.sa.sa_mask, mask); 571 } 572 573 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 574 575 if (!ret && oact) { 576 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 577 return -EFAULT; 578 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 579 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 580 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 581 err |= __put_user(0, &oact->sa_mask.sig[1]); 582 err |= __put_user(0, &oact->sa_mask.sig[2]); 583 err |= __put_user(0, &oact->sa_mask.sig[3]); 584 if (err) 585 return -EFAULT; 586 } 587 588 return ret; 589 } 590 #endif 591 592 #ifdef CONFIG_TRAD_SIGNALS 593 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) 594 { 595 struct sigframe __user *frame; 596 sigset_t blocked; 597 int sig; 598 599 frame = (struct sigframe __user *) regs.regs[29]; 600 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 601 goto badframe; 602 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 603 goto badframe; 604 605 set_current_blocked(&blocked); 606 607 sig = restore_sigcontext(®s, &frame->sf_sc); 608 if (sig < 0) 609 goto badframe; 610 else if (sig) 611 force_sig(sig, current); 612 613 /* 614 * Don't let your children do this ... 615 */ 616 __asm__ __volatile__( 617 "move\t$29, %0\n\t" 618 "j\tsyscall_exit" 619 :/* no outputs */ 620 :"r" (®s)); 621 /* Unreached */ 622 623 badframe: 624 force_sig(SIGSEGV, current); 625 } 626 #endif /* CONFIG_TRAD_SIGNALS */ 627 628 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 629 { 630 struct rt_sigframe __user *frame; 631 sigset_t set; 632 int sig; 633 634 frame = (struct rt_sigframe __user *) regs.regs[29]; 635 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 636 goto badframe; 637 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 638 goto badframe; 639 640 set_current_blocked(&set); 641 642 sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); 643 if (sig < 0) 644 goto badframe; 645 else if (sig) 646 force_sig(sig, current); 647 648 if (restore_altstack(&frame->rs_uc.uc_stack)) 649 goto badframe; 650 651 /* 652 * Don't let your children do this ... 653 */ 654 __asm__ __volatile__( 655 "move\t$29, %0\n\t" 656 "j\tsyscall_exit" 657 :/* no outputs */ 658 :"r" (®s)); 659 /* Unreached */ 660 661 badframe: 662 force_sig(SIGSEGV, current); 663 } 664 665 #ifdef CONFIG_TRAD_SIGNALS 666 static int setup_frame(void *sig_return, struct ksignal *ksig, 667 struct pt_regs *regs, sigset_t *set) 668 { 669 struct sigframe __user *frame; 670 int err = 0; 671 672 frame = get_sigframe(ksig, regs, sizeof(*frame)); 673 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 674 return -EFAULT; 675 676 err |= setup_sigcontext(regs, &frame->sf_sc); 677 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 678 if (err) 679 return -EFAULT; 680 681 /* 682 * Arguments to signal handler: 683 * 684 * a0 = signal number 685 * a1 = 0 (should be cause) 686 * a2 = pointer to struct sigcontext 687 * 688 * $25 and c0_epc point to the signal handler, $29 points to the 689 * struct sigframe. 690 */ 691 regs->regs[ 4] = ksig->sig; 692 regs->regs[ 5] = 0; 693 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 694 regs->regs[29] = (unsigned long) frame; 695 regs->regs[31] = (unsigned long) sig_return; 696 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 697 698 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 699 current->comm, current->pid, 700 frame, regs->cp0_epc, regs->regs[31]); 701 return 0; 702 } 703 #endif 704 705 static int setup_rt_frame(void *sig_return, struct ksignal *ksig, 706 struct pt_regs *regs, sigset_t *set) 707 { 708 struct rt_sigframe __user *frame; 709 int err = 0; 710 711 frame = get_sigframe(ksig, regs, sizeof(*frame)); 712 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 713 return -EFAULT; 714 715 /* Create siginfo. */ 716 err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); 717 718 /* Create the ucontext. */ 719 err |= __put_user(0, &frame->rs_uc.uc_flags); 720 err |= __put_user(NULL, &frame->rs_uc.uc_link); 721 err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); 722 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 723 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 724 725 if (err) 726 return -EFAULT; 727 728 /* 729 * Arguments to signal handler: 730 * 731 * a0 = signal number 732 * a1 = 0 (should be cause) 733 * a2 = pointer to ucontext 734 * 735 * $25 and c0_epc point to the signal handler, $29 points to 736 * the struct rt_sigframe. 737 */ 738 regs->regs[ 4] = ksig->sig; 739 regs->regs[ 5] = (unsigned long) &frame->rs_info; 740 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 741 regs->regs[29] = (unsigned long) frame; 742 regs->regs[31] = (unsigned long) sig_return; 743 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 744 745 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 746 current->comm, current->pid, 747 frame, regs->cp0_epc, regs->regs[31]); 748 749 return 0; 750 } 751 752 struct mips_abi mips_abi = { 753 #ifdef CONFIG_TRAD_SIGNALS 754 .setup_frame = setup_frame, 755 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), 756 #endif 757 .setup_rt_frame = setup_rt_frame, 758 .rt_signal_return_offset = 759 offsetof(struct mips_vdso, rt_signal_trampoline), 760 .restart = __NR_restart_syscall, 761 762 .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), 763 .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), 764 .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), 765 }; 766 767 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 768 { 769 sigset_t *oldset = sigmask_to_save(); 770 int ret; 771 struct mips_abi *abi = current->thread.abi; 772 #ifdef CONFIG_CPU_MICROMIPS 773 void *vdso; 774 unsigned long tmp = (unsigned long)current->mm->context.vdso; 775 776 set_isa16_mode(tmp); 777 vdso = (void *)tmp; 778 #else 779 void *vdso = current->mm->context.vdso; 780 #endif 781 782 if (regs->regs[0]) { 783 switch(regs->regs[2]) { 784 case ERESTART_RESTARTBLOCK: 785 case ERESTARTNOHAND: 786 regs->regs[2] = EINTR; 787 break; 788 case ERESTARTSYS: 789 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { 790 regs->regs[2] = EINTR; 791 break; 792 } 793 /* fallthrough */ 794 case ERESTARTNOINTR: 795 regs->regs[7] = regs->regs[26]; 796 regs->regs[2] = regs->regs[0]; 797 regs->cp0_epc -= 4; 798 } 799 800 regs->regs[0] = 0; /* Don't deal with this again. */ 801 } 802 803 if (sig_uses_siginfo(&ksig->ka)) 804 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, 805 ksig, regs, oldset); 806 else 807 ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig, 808 regs, oldset); 809 810 signal_setup_done(ret, ksig, 0); 811 } 812 813 static void do_signal(struct pt_regs *regs) 814 { 815 struct ksignal ksig; 816 817 if (get_signal(&ksig)) { 818 /* Whee! Actually deliver the signal. */ 819 handle_signal(&ksig, regs); 820 return; 821 } 822 823 if (regs->regs[0]) { 824 switch (regs->regs[2]) { 825 case ERESTARTNOHAND: 826 case ERESTARTSYS: 827 case ERESTARTNOINTR: 828 regs->regs[2] = regs->regs[0]; 829 regs->regs[7] = regs->regs[26]; 830 regs->cp0_epc -= 4; 831 break; 832 833 case ERESTART_RESTARTBLOCK: 834 regs->regs[2] = current->thread.abi->restart; 835 regs->regs[7] = regs->regs[26]; 836 regs->cp0_epc -= 4; 837 break; 838 } 839 regs->regs[0] = 0; /* Don't deal with this again. */ 840 } 841 842 /* 843 * If there's no signal to deliver, we just put the saved sigmask 844 * back 845 */ 846 restore_saved_sigmask(); 847 } 848 849 /* 850 * notification of userspace execution resumption 851 * - triggered by the TIF_WORK_MASK flags 852 */ 853 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 854 __u32 thread_info_flags) 855 { 856 local_irq_enable(); 857 858 user_exit(); 859 860 if (thread_info_flags & _TIF_UPROBE) 861 uprobe_notify_resume(regs); 862 863 /* deal with pending signal delivery */ 864 if (thread_info_flags & _TIF_SIGPENDING) 865 do_signal(regs); 866 867 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 868 clear_thread_flag(TIF_NOTIFY_RESUME); 869 tracehook_notify_resume(regs); 870 } 871 872 user_enter(); 873 } 874 875 #ifdef CONFIG_SMP 876 static int smp_save_fp_context(void __user *sc) 877 { 878 return raw_cpu_has_fpu 879 ? save_hw_fp_context(sc) 880 : copy_fp_to_sigcontext(sc); 881 } 882 883 static int smp_restore_fp_context(void __user *sc) 884 { 885 return raw_cpu_has_fpu 886 ? restore_hw_fp_context(sc) 887 : copy_fp_from_sigcontext(sc); 888 } 889 #endif 890 891 static int signal_setup(void) 892 { 893 /* 894 * The offset from sigcontext to extended context should be the same 895 * regardless of the type of signal, such that userland can always know 896 * where to look if it wishes to find the extended context structures. 897 */ 898 BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) - 899 offsetof(struct sigframe, sf_sc)) != 900 (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - 901 offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); 902 903 #ifdef CONFIG_SMP 904 /* For now just do the cpu_has_fpu check when the functions are invoked */ 905 save_fp_context = smp_save_fp_context; 906 restore_fp_context = smp_restore_fp_context; 907 #else 908 if (cpu_has_fpu) { 909 save_fp_context = save_hw_fp_context; 910 restore_fp_context = restore_hw_fp_context; 911 } else { 912 save_fp_context = copy_fp_to_sigcontext; 913 restore_fp_context = copy_fp_from_sigcontext; 914 } 915 #endif /* CONFIG_SMP */ 916 917 return 0; 918 } 919 920 arch_initcall(signal_setup); 921