1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * Copyright (C) 1994 - 2000 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2014, Imagination Technologies Ltd. 10 */ 11 #include <linux/cache.h> 12 #include <linux/context_tracking.h> 13 #include <linux/irqflags.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/personality.h> 17 #include <linux/smp.h> 18 #include <linux/kernel.h> 19 #include <linux/signal.h> 20 #include <linux/errno.h> 21 #include <linux/wait.h> 22 #include <linux/ptrace.h> 23 #include <linux/unistd.h> 24 #include <linux/compiler.h> 25 #include <linux/syscalls.h> 26 #include <linux/uaccess.h> 27 #include <linux/tracehook.h> 28 29 #include <asm/abi.h> 30 #include <asm/asm.h> 31 #include <linux/bitops.h> 32 #include <asm/cacheflush.h> 33 #include <asm/fpu.h> 34 #include <asm/sim.h> 35 #include <asm/ucontext.h> 36 #include <asm/cpu-features.h> 37 #include <asm/war.h> 38 #include <asm/vdso.h> 39 #include <asm/dsp.h> 40 #include <asm/inst.h> 41 42 #include "signal-common.h" 43 44 static int (*save_fp_context)(struct sigcontext __user *sc); 45 static int (*restore_fp_context)(struct sigcontext __user *sc); 46 47 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 48 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 49 50 struct sigframe { 51 u32 sf_ass[4]; /* argument save space for o32 */ 52 u32 sf_pad[2]; /* Was: signal trampoline */ 53 struct sigcontext sf_sc; 54 sigset_t sf_mask; 55 }; 56 57 struct rt_sigframe { 58 u32 rs_ass[4]; /* argument save space for o32 */ 59 u32 rs_pad[2]; /* Was: signal trampoline */ 60 struct siginfo rs_info; 61 struct ucontext rs_uc; 62 }; 63 64 /* 65 * Thread saved context copy to/from a signal context presumed to be on the 66 * user stack, and therefore accessed with appropriate macros from uaccess.h. 67 */ 68 static int copy_fp_to_sigcontext(struct sigcontext __user *sc) 69 { 70 int i; 71 int err = 0; 72 73 for (i = 0; i < NUM_FPU_REGS; i++) { 74 err |= 75 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), 76 &sc->sc_fpregs[i]); 77 } 78 err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr); 79 80 return err; 81 } 82 83 static int copy_fp_from_sigcontext(struct sigcontext __user *sc) 84 { 85 int i; 86 int err = 0; 87 u64 fpr_val; 88 89 for (i = 0; i < NUM_FPU_REGS; i++) { 90 err |= __get_user(fpr_val, &sc->sc_fpregs[i]); 91 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); 92 } 93 err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr); 94 95 return err; 96 } 97 98 /* 99 * Helper routines 100 */ 101 static int protected_save_fp_context(struct sigcontext __user *sc) 102 { 103 int err; 104 #ifndef CONFIG_EVA 105 while (1) { 106 lock_fpu_owner(); 107 if (is_fpu_owner()) { 108 err = save_fp_context(sc); 109 unlock_fpu_owner(); 110 } else { 111 unlock_fpu_owner(); 112 err = copy_fp_to_sigcontext(sc); 113 } 114 if (likely(!err)) 115 break; 116 /* touch the sigcontext and try again */ 117 err = __put_user(0, &sc->sc_fpregs[0]) | 118 __put_user(0, &sc->sc_fpregs[31]) | 119 __put_user(0, &sc->sc_fpc_csr); 120 if (err) 121 break; /* really bad sigcontext */ 122 } 123 #else 124 /* 125 * EVA does not have FPU EVA instructions so saving fpu context directly 126 * does not work. 127 */ 128 lose_fpu(1); 129 err = save_fp_context(sc); /* this might fail */ 130 #endif 131 return err; 132 } 133 134 static int protected_restore_fp_context(struct sigcontext __user *sc) 135 { 136 int err, tmp __maybe_unused; 137 #ifndef CONFIG_EVA 138 while (1) { 139 lock_fpu_owner(); 140 if (is_fpu_owner()) { 141 err = restore_fp_context(sc); 142 unlock_fpu_owner(); 143 } else { 144 unlock_fpu_owner(); 145 err = copy_fp_from_sigcontext(sc); 146 } 147 if (likely(!err)) 148 break; 149 /* touch the sigcontext and try again */ 150 err = __get_user(tmp, &sc->sc_fpregs[0]) | 151 __get_user(tmp, &sc->sc_fpregs[31]) | 152 __get_user(tmp, &sc->sc_fpc_csr); 153 if (err) 154 break; /* really bad sigcontext */ 155 } 156 #else 157 /* 158 * EVA does not have FPU EVA instructions so restoring fpu context 159 * directly does not work. 160 */ 161 lose_fpu(0); 162 err = restore_fp_context(sc); /* this might fail */ 163 #endif 164 return err; 165 } 166 167 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 168 { 169 int err = 0; 170 int i; 171 unsigned int used_math; 172 173 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 174 175 err |= __put_user(0, &sc->sc_regs[0]); 176 for (i = 1; i < 32; i++) 177 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 178 179 #ifdef CONFIG_CPU_HAS_SMARTMIPS 180 err |= __put_user(regs->acx, &sc->sc_acx); 181 #endif 182 err |= __put_user(regs->hi, &sc->sc_mdhi); 183 err |= __put_user(regs->lo, &sc->sc_mdlo); 184 if (cpu_has_dsp) { 185 err |= __put_user(mfhi1(), &sc->sc_hi1); 186 err |= __put_user(mflo1(), &sc->sc_lo1); 187 err |= __put_user(mfhi2(), &sc->sc_hi2); 188 err |= __put_user(mflo2(), &sc->sc_lo2); 189 err |= __put_user(mfhi3(), &sc->sc_hi3); 190 err |= __put_user(mflo3(), &sc->sc_lo3); 191 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 192 } 193 194 used_math = !!used_math(); 195 err |= __put_user(used_math, &sc->sc_used_math); 196 197 if (used_math) { 198 /* 199 * Save FPU state to signal context. Signal handler 200 * will "inherit" current FPU state. 201 */ 202 err |= protected_save_fp_context(sc); 203 } 204 return err; 205 } 206 207 int fpcsr_pending(unsigned int __user *fpcsr) 208 { 209 int err, sig = 0; 210 unsigned int csr, enabled; 211 212 err = __get_user(csr, fpcsr); 213 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); 214 /* 215 * If the signal handler set some FPU exceptions, clear it and 216 * send SIGFPE. 217 */ 218 if (csr & enabled) { 219 csr &= ~enabled; 220 err |= __put_user(csr, fpcsr); 221 sig = SIGFPE; 222 } 223 return err ?: sig; 224 } 225 226 static int 227 check_and_restore_fp_context(struct sigcontext __user *sc) 228 { 229 int err, sig; 230 231 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 232 if (err > 0) 233 err = 0; 234 err |= protected_restore_fp_context(sc); 235 return err ?: sig; 236 } 237 238 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) 239 { 240 unsigned int used_math; 241 unsigned long treg; 242 int err = 0; 243 int i; 244 245 /* Always make any pending restarted system calls return -EINTR */ 246 current_thread_info()->restart_block.fn = do_no_restart_syscall; 247 248 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 249 250 #ifdef CONFIG_CPU_HAS_SMARTMIPS 251 err |= __get_user(regs->acx, &sc->sc_acx); 252 #endif 253 err |= __get_user(regs->hi, &sc->sc_mdhi); 254 err |= __get_user(regs->lo, &sc->sc_mdlo); 255 if (cpu_has_dsp) { 256 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 257 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 258 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 259 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 260 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 261 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 262 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 263 } 264 265 for (i = 1; i < 32; i++) 266 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 267 268 err |= __get_user(used_math, &sc->sc_used_math); 269 conditional_used_math(used_math); 270 271 if (used_math) { 272 /* restore fpu context if we have used it before */ 273 if (!err) 274 err = check_and_restore_fp_context(sc); 275 } else { 276 /* signal handler may have used FPU. Give it up. */ 277 lose_fpu(0); 278 } 279 280 return err; 281 } 282 283 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, 284 size_t frame_size) 285 { 286 unsigned long sp; 287 288 /* Default to using normal stack */ 289 sp = regs->regs[29]; 290 291 /* 292 * FPU emulator may have it's own trampoline active just 293 * above the user stack, 16-bytes before the next lowest 294 * 16 byte boundary. Try to avoid trashing it. 295 */ 296 sp -= 32; 297 298 sp = sigsp(sp, ksig); 299 300 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 301 } 302 303 /* 304 * Atomically swap in the new signal mask, and wait for a signal. 305 */ 306 307 #ifdef CONFIG_TRAD_SIGNALS 308 SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) 309 { 310 return sys_rt_sigsuspend(uset, sizeof(sigset_t)); 311 } 312 #endif 313 314 #ifdef CONFIG_TRAD_SIGNALS 315 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, 316 struct sigaction __user *, oact) 317 { 318 struct k_sigaction new_ka, old_ka; 319 int ret; 320 int err = 0; 321 322 if (act) { 323 old_sigset_t mask; 324 325 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 326 return -EFAULT; 327 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); 328 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 329 err |= __get_user(mask, &act->sa_mask.sig[0]); 330 if (err) 331 return -EFAULT; 332 333 siginitset(&new_ka.sa.sa_mask, mask); 334 } 335 336 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 337 338 if (!ret && oact) { 339 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 340 return -EFAULT; 341 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 342 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 343 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 344 err |= __put_user(0, &oact->sa_mask.sig[1]); 345 err |= __put_user(0, &oact->sa_mask.sig[2]); 346 err |= __put_user(0, &oact->sa_mask.sig[3]); 347 if (err) 348 return -EFAULT; 349 } 350 351 return ret; 352 } 353 #endif 354 355 #ifdef CONFIG_TRAD_SIGNALS 356 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) 357 { 358 struct sigframe __user *frame; 359 sigset_t blocked; 360 int sig; 361 362 frame = (struct sigframe __user *) regs.regs[29]; 363 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 364 goto badframe; 365 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 366 goto badframe; 367 368 set_current_blocked(&blocked); 369 370 sig = restore_sigcontext(®s, &frame->sf_sc); 371 if (sig < 0) 372 goto badframe; 373 else if (sig) 374 force_sig(sig, current); 375 376 /* 377 * Don't let your children do this ... 378 */ 379 __asm__ __volatile__( 380 "move\t$29, %0\n\t" 381 "j\tsyscall_exit" 382 :/* no outputs */ 383 :"r" (®s)); 384 /* Unreached */ 385 386 badframe: 387 force_sig(SIGSEGV, current); 388 } 389 #endif /* CONFIG_TRAD_SIGNALS */ 390 391 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 392 { 393 struct rt_sigframe __user *frame; 394 sigset_t set; 395 int sig; 396 397 frame = (struct rt_sigframe __user *) regs.regs[29]; 398 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 399 goto badframe; 400 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 401 goto badframe; 402 403 set_current_blocked(&set); 404 405 sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); 406 if (sig < 0) 407 goto badframe; 408 else if (sig) 409 force_sig(sig, current); 410 411 if (restore_altstack(&frame->rs_uc.uc_stack)) 412 goto badframe; 413 414 /* 415 * Don't let your children do this ... 416 */ 417 __asm__ __volatile__( 418 "move\t$29, %0\n\t" 419 "j\tsyscall_exit" 420 :/* no outputs */ 421 :"r" (®s)); 422 /* Unreached */ 423 424 badframe: 425 force_sig(SIGSEGV, current); 426 } 427 428 #ifdef CONFIG_TRAD_SIGNALS 429 static int setup_frame(void *sig_return, struct ksignal *ksig, 430 struct pt_regs *regs, sigset_t *set) 431 { 432 struct sigframe __user *frame; 433 int err = 0; 434 435 frame = get_sigframe(ksig, regs, sizeof(*frame)); 436 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 437 return -EFAULT; 438 439 err |= setup_sigcontext(regs, &frame->sf_sc); 440 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 441 if (err) 442 return -EFAULT; 443 444 /* 445 * Arguments to signal handler: 446 * 447 * a0 = signal number 448 * a1 = 0 (should be cause) 449 * a2 = pointer to struct sigcontext 450 * 451 * $25 and c0_epc point to the signal handler, $29 points to the 452 * struct sigframe. 453 */ 454 regs->regs[ 4] = ksig->sig; 455 regs->regs[ 5] = 0; 456 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 457 regs->regs[29] = (unsigned long) frame; 458 regs->regs[31] = (unsigned long) sig_return; 459 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 460 461 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 462 current->comm, current->pid, 463 frame, regs->cp0_epc, regs->regs[31]); 464 return 0; 465 } 466 #endif 467 468 static int setup_rt_frame(void *sig_return, struct ksignal *ksig, 469 struct pt_regs *regs, sigset_t *set) 470 { 471 struct rt_sigframe __user *frame; 472 int err = 0; 473 474 frame = get_sigframe(ksig, regs, sizeof(*frame)); 475 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 476 return -EFAULT; 477 478 /* Create siginfo. */ 479 err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); 480 481 /* Create the ucontext. */ 482 err |= __put_user(0, &frame->rs_uc.uc_flags); 483 err |= __put_user(NULL, &frame->rs_uc.uc_link); 484 err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); 485 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); 486 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 487 488 if (err) 489 return -EFAULT; 490 491 /* 492 * Arguments to signal handler: 493 * 494 * a0 = signal number 495 * a1 = 0 (should be cause) 496 * a2 = pointer to ucontext 497 * 498 * $25 and c0_epc point to the signal handler, $29 points to 499 * the struct rt_sigframe. 500 */ 501 regs->regs[ 4] = ksig->sig; 502 regs->regs[ 5] = (unsigned long) &frame->rs_info; 503 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 504 regs->regs[29] = (unsigned long) frame; 505 regs->regs[31] = (unsigned long) sig_return; 506 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 507 508 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 509 current->comm, current->pid, 510 frame, regs->cp0_epc, regs->regs[31]); 511 512 return 0; 513 } 514 515 struct mips_abi mips_abi = { 516 #ifdef CONFIG_TRAD_SIGNALS 517 .setup_frame = setup_frame, 518 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), 519 #endif 520 .setup_rt_frame = setup_rt_frame, 521 .rt_signal_return_offset = 522 offsetof(struct mips_vdso, rt_signal_trampoline), 523 .restart = __NR_restart_syscall 524 }; 525 526 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 527 { 528 sigset_t *oldset = sigmask_to_save(); 529 int ret; 530 struct mips_abi *abi = current->thread.abi; 531 #ifdef CONFIG_CPU_MICROMIPS 532 void *vdso; 533 unsigned int tmp = (unsigned int)current->mm->context.vdso; 534 535 set_isa16_mode(tmp); 536 vdso = (void *)tmp; 537 #else 538 void *vdso = current->mm->context.vdso; 539 #endif 540 541 if (regs->regs[0]) { 542 switch(regs->regs[2]) { 543 case ERESTART_RESTARTBLOCK: 544 case ERESTARTNOHAND: 545 regs->regs[2] = EINTR; 546 break; 547 case ERESTARTSYS: 548 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { 549 regs->regs[2] = EINTR; 550 break; 551 } 552 /* fallthrough */ 553 case ERESTARTNOINTR: 554 regs->regs[7] = regs->regs[26]; 555 regs->regs[2] = regs->regs[0]; 556 regs->cp0_epc -= 4; 557 } 558 559 regs->regs[0] = 0; /* Don't deal with this again. */ 560 } 561 562 if (sig_uses_siginfo(&ksig->ka)) 563 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, 564 ksig, regs, oldset); 565 else 566 ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig, 567 regs, oldset); 568 569 signal_setup_done(ret, ksig, 0); 570 } 571 572 static void do_signal(struct pt_regs *regs) 573 { 574 struct ksignal ksig; 575 576 if (get_signal(&ksig)) { 577 /* Whee! Actually deliver the signal. */ 578 handle_signal(&ksig, regs); 579 return; 580 } 581 582 if (regs->regs[0]) { 583 switch (regs->regs[2]) { 584 case ERESTARTNOHAND: 585 case ERESTARTSYS: 586 case ERESTARTNOINTR: 587 regs->regs[2] = regs->regs[0]; 588 regs->regs[7] = regs->regs[26]; 589 regs->cp0_epc -= 4; 590 break; 591 592 case ERESTART_RESTARTBLOCK: 593 regs->regs[2] = current->thread.abi->restart; 594 regs->regs[7] = regs->regs[26]; 595 regs->cp0_epc -= 4; 596 break; 597 } 598 regs->regs[0] = 0; /* Don't deal with this again. */ 599 } 600 601 /* 602 * If there's no signal to deliver, we just put the saved sigmask 603 * back 604 */ 605 restore_saved_sigmask(); 606 } 607 608 /* 609 * notification of userspace execution resumption 610 * - triggered by the TIF_WORK_MASK flags 611 */ 612 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, 613 __u32 thread_info_flags) 614 { 615 local_irq_enable(); 616 617 user_exit(); 618 619 /* deal with pending signal delivery */ 620 if (thread_info_flags & _TIF_SIGPENDING) 621 do_signal(regs); 622 623 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 624 clear_thread_flag(TIF_NOTIFY_RESUME); 625 tracehook_notify_resume(regs); 626 } 627 628 user_enter(); 629 } 630 631 #ifdef CONFIG_SMP 632 #ifndef CONFIG_EVA 633 static int smp_save_fp_context(struct sigcontext __user *sc) 634 { 635 return raw_cpu_has_fpu 636 ? _save_fp_context(sc) 637 : copy_fp_to_sigcontext(sc); 638 } 639 640 static int smp_restore_fp_context(struct sigcontext __user *sc) 641 { 642 return raw_cpu_has_fpu 643 ? _restore_fp_context(sc) 644 : copy_fp_from_sigcontext(sc); 645 } 646 #endif /* CONFIG_EVA */ 647 #endif 648 649 static int signal_setup(void) 650 { 651 #ifndef CONFIG_EVA 652 #ifdef CONFIG_SMP 653 /* For now just do the cpu_has_fpu check when the functions are invoked */ 654 save_fp_context = smp_save_fp_context; 655 restore_fp_context = smp_restore_fp_context; 656 #else 657 if (cpu_has_fpu) { 658 save_fp_context = _save_fp_context; 659 restore_fp_context = _restore_fp_context; 660 } else { 661 save_fp_context = copy_fp_from_sigcontext; 662 restore_fp_context = copy_fp_to_sigcontext; 663 } 664 #endif /* CONFIG_SMP */ 665 #else 666 save_fp_context = copy_fp_from_sigcontext;; 667 restore_fp_context = copy_fp_to_sigcontext; 668 #endif 669 670 return 0; 671 } 672 673 arch_initcall(signal_setup); 674