1 /* 2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC 3 * 4 * PowerPC version 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Copyright (C) 2001 IBM 7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 9 * 10 * Derived from "arch/i386/kernel/signal.c" 11 * Copyright (C) 1991, 1992 Linus Torvalds 12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 */ 19 20 #include <linux/sched.h> 21 #include <linux/mm.h> 22 #include <linux/smp.h> 23 #include <linux/kernel.h> 24 #include <linux/signal.h> 25 #include <linux/errno.h> 26 #include <linux/elf.h> 27 #ifdef CONFIG_PPC64 28 #include <linux/syscalls.h> 29 #include <linux/compat.h> 30 #include <linux/ptrace.h> 31 #else 32 #include <linux/wait.h> 33 #include <linux/ptrace.h> 34 #include <linux/unistd.h> 35 #include <linux/stddef.h> 36 #include <linux/tty.h> 37 #include <linux/binfmts.h> 38 #include <linux/freezer.h> 39 #endif 40 41 #include <asm/uaccess.h> 42 #include <asm/cacheflush.h> 43 #include <asm/syscalls.h> 44 #include <asm/sigcontext.h> 45 #include <asm/vdso.h> 46 #ifdef CONFIG_PPC64 47 #include "ppc32.h" 48 #include <asm/unistd.h> 49 #else 50 #include <asm/ucontext.h> 51 #include <asm/pgtable.h> 52 #endif 53 54 #include "signal.h" 55 56 #undef DEBUG_SIG 57 58 #ifdef CONFIG_PPC64 59 #define sys_sigsuspend compat_sys_sigsuspend 60 #define sys_rt_sigsuspend compat_sys_rt_sigsuspend 61 #define sys_rt_sigreturn compat_sys_rt_sigreturn 62 #define sys_sigaction compat_sys_sigaction 63 #define sys_swapcontext compat_sys_swapcontext 64 #define sys_sigreturn compat_sys_sigreturn 65 66 #define old_sigaction old_sigaction32 67 #define sigcontext sigcontext32 68 #define mcontext mcontext32 69 #define ucontext ucontext32 70 71 /* 72 * Returning 0 means we return to userspace via 73 * ret_from_except and thus restore all user 74 * registers from *regs. This is what we need 75 * to do when a signal has been delivered. 76 */ 77 78 #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) 79 #undef __SIGNAL_FRAMESIZE 80 #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32 81 #undef ELF_NVRREG 82 #define ELF_NVRREG ELF_NVRREG32 83 84 /* 85 * Functions for flipping sigsets (thanks to brain dead generic 86 * implementation that makes things simple for little endian only) 87 */ 88 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) 89 { 90 compat_sigset_t cset; 91 92 switch (_NSIG_WORDS) { 93 case 4: cset.sig[5] = set->sig[3] & 0xffffffffull; 94 cset.sig[7] = set->sig[3] >> 32; 95 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; 96 cset.sig[5] = set->sig[2] >> 32; 97 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull; 98 cset.sig[3] = set->sig[1] >> 32; 99 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull; 100 cset.sig[1] = set->sig[0] >> 32; 101 } 102 return copy_to_user(uset, &cset, sizeof(*uset)); 103 } 104 105 static inline int get_sigset_t(sigset_t *set, 106 const compat_sigset_t __user *uset) 107 { 108 compat_sigset_t s32; 109 110 if (copy_from_user(&s32, uset, sizeof(*uset))) 111 return -EFAULT; 112 113 /* 114 * Swap the 2 words of the 64-bit sigset_t (they are stored 115 * in the "wrong" endian in 32-bit user storage). 116 */ 117 switch (_NSIG_WORDS) { 118 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); 119 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); 120 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); 121 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); 122 } 123 return 0; 124 } 125 126 static inline int get_old_sigaction(struct k_sigaction *new_ka, 127 struct old_sigaction __user *act) 128 { 129 compat_old_sigset_t mask; 130 compat_uptr_t handler, restorer; 131 132 if (get_user(handler, &act->sa_handler) || 133 __get_user(restorer, &act->sa_restorer) || 134 __get_user(new_ka->sa.sa_flags, &act->sa_flags) || 135 __get_user(mask, &act->sa_mask)) 136 return -EFAULT; 137 new_ka->sa.sa_handler = compat_ptr(handler); 138 new_ka->sa.sa_restorer = compat_ptr(restorer); 139 siginitset(&new_ka->sa.sa_mask, mask); 140 return 0; 141 } 142 143 #define to_user_ptr(p) ptr_to_compat(p) 144 #define from_user_ptr(p) compat_ptr(p) 145 146 static inline int save_general_regs(struct pt_regs *regs, 147 struct mcontext __user *frame) 148 { 149 elf_greg_t64 *gregs = (elf_greg_t64 *)regs; 150 int i; 151 152 WARN_ON(!FULL_REGS(regs)); 153 154 for (i = 0; i <= PT_RESULT; i ++) { 155 if (i == 14 && !FULL_REGS(regs)) 156 i = 32; 157 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i])) 158 return -EFAULT; 159 } 160 return 0; 161 } 162 163 static inline int restore_general_regs(struct pt_regs *regs, 164 struct mcontext __user *sr) 165 { 166 elf_greg_t64 *gregs = (elf_greg_t64 *)regs; 167 int i; 168 169 for (i = 0; i <= PT_RESULT; i++) { 170 if ((i == PT_MSR) || (i == PT_SOFTE)) 171 continue; 172 if (__get_user(gregs[i], &sr->mc_gregs[i])) 173 return -EFAULT; 174 } 175 return 0; 176 } 177 178 #else /* CONFIG_PPC64 */ 179 180 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) 181 182 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set) 183 { 184 return copy_to_user(uset, set, sizeof(*uset)); 185 } 186 187 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset) 188 { 189 return copy_from_user(set, uset, sizeof(*uset)); 190 } 191 192 static inline int get_old_sigaction(struct k_sigaction *new_ka, 193 struct old_sigaction __user *act) 194 { 195 old_sigset_t mask; 196 197 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 198 __get_user(new_ka->sa.sa_handler, &act->sa_handler) || 199 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer)) 200 return -EFAULT; 201 __get_user(new_ka->sa.sa_flags, &act->sa_flags); 202 __get_user(mask, &act->sa_mask); 203 siginitset(&new_ka->sa.sa_mask, mask); 204 return 0; 205 } 206 207 #define to_user_ptr(p) ((unsigned long)(p)) 208 #define from_user_ptr(p) ((void __user *)(p)) 209 210 static inline int save_general_regs(struct pt_regs *regs, 211 struct mcontext __user *frame) 212 { 213 WARN_ON(!FULL_REGS(regs)); 214 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); 215 } 216 217 static inline int restore_general_regs(struct pt_regs *regs, 218 struct mcontext __user *sr) 219 { 220 /* copy up to but not including MSR */ 221 if (__copy_from_user(regs, &sr->mc_gregs, 222 PT_MSR * sizeof(elf_greg_t))) 223 return -EFAULT; 224 /* copy from orig_r3 (the word after the MSR) up to the end */ 225 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], 226 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) 227 return -EFAULT; 228 return 0; 229 } 230 231 #endif /* CONFIG_PPC64 */ 232 233 /* 234 * Atomically swap in the new signal mask, and wait for a signal. 235 */ 236 long sys_sigsuspend(old_sigset_t mask) 237 { 238 mask &= _BLOCKABLE; 239 spin_lock_irq(¤t->sighand->siglock); 240 current->saved_sigmask = current->blocked; 241 siginitset(¤t->blocked, mask); 242 recalc_sigpending(); 243 spin_unlock_irq(¤t->sighand->siglock); 244 245 current->state = TASK_INTERRUPTIBLE; 246 schedule(); 247 set_thread_flag(TIF_RESTORE_SIGMASK); 248 return -ERESTARTNOHAND; 249 } 250 251 long sys_sigaction(int sig, struct old_sigaction __user *act, 252 struct old_sigaction __user *oact) 253 { 254 struct k_sigaction new_ka, old_ka; 255 int ret; 256 257 #ifdef CONFIG_PPC64 258 if (sig < 0) 259 sig = -sig; 260 #endif 261 262 if (act) { 263 if (get_old_sigaction(&new_ka, act)) 264 return -EFAULT; 265 } 266 267 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 268 if (!ret && oact) { 269 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 270 __put_user(to_user_ptr(old_ka.sa.sa_handler), 271 &oact->sa_handler) || 272 __put_user(to_user_ptr(old_ka.sa.sa_restorer), 273 &oact->sa_restorer) || 274 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || 275 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) 276 return -EFAULT; 277 } 278 279 return ret; 280 } 281 282 /* 283 * When we have signals to deliver, we set up on the 284 * user stack, going down from the original stack pointer: 285 * an ABI gap of 56 words 286 * an mcontext struct 287 * a sigcontext struct 288 * a gap of __SIGNAL_FRAMESIZE bytes 289 * 290 * Each of these things must be a multiple of 16 bytes in size. The following 291 * structure represent all of this except the __SIGNAL_FRAMESIZE gap 292 * 293 */ 294 struct sigframe { 295 struct sigcontext sctx; /* the sigcontext */ 296 struct mcontext mctx; /* all the register values */ 297 /* 298 * Programs using the rs6000/xcoff abi can save up to 19 gp 299 * regs and 18 fp regs below sp before decrementing it. 300 */ 301 int abigap[56]; 302 }; 303 304 /* We use the mc_pad field for the signal return trampoline. */ 305 #define tramp mc_pad 306 307 /* 308 * When we have rt signals to deliver, we set up on the 309 * user stack, going down from the original stack pointer: 310 * one rt_sigframe struct (siginfo + ucontext + ABI gap) 311 * a gap of __SIGNAL_FRAMESIZE+16 bytes 312 * (the +16 is to get the siginfo and ucontext in the same 313 * positions as in older kernels). 314 * 315 * Each of these things must be a multiple of 16 bytes in size. 316 * 317 */ 318 struct rt_sigframe { 319 #ifdef CONFIG_PPC64 320 compat_siginfo_t info; 321 #else 322 struct siginfo info; 323 #endif 324 struct ucontext uc; 325 /* 326 * Programs using the rs6000/xcoff abi can save up to 19 gp 327 * regs and 18 fp regs below sp before decrementing it. 328 */ 329 int abigap[56]; 330 }; 331 332 /* 333 * Save the current user registers on the user stack. 334 * We only save the altivec/spe registers if the process has used 335 * altivec/spe instructions at some point. 336 */ 337 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, 338 int sigret) 339 { 340 /* Make sure floating point registers are stored in regs */ 341 flush_fp_to_thread(current); 342 343 /* save general and floating-point registers */ 344 if (save_general_regs(regs, frame) || 345 __copy_to_user(&frame->mc_fregs, current->thread.fpr, 346 ELF_NFPREG * sizeof(double))) 347 return 1; 348 349 #ifdef CONFIG_ALTIVEC 350 /* save altivec registers */ 351 if (current->thread.used_vr) { 352 flush_altivec_to_thread(current); 353 if (__copy_to_user(&frame->mc_vregs, current->thread.vr, 354 ELF_NVRREG * sizeof(vector128))) 355 return 1; 356 /* set MSR_VEC in the saved MSR value to indicate that 357 frame->mc_vregs contains valid data */ 358 if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR])) 359 return 1; 360 } 361 /* else assert((regs->msr & MSR_VEC) == 0) */ 362 363 /* We always copy to/from vrsave, it's 0 if we don't have or don't 364 * use altivec. Since VSCR only contains 32 bits saved in the least 365 * significant bits of a vector, we "cheat" and stuff VRSAVE in the 366 * most significant bits of that same vector. --BenH 367 */ 368 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) 369 return 1; 370 #endif /* CONFIG_ALTIVEC */ 371 372 #ifdef CONFIG_SPE 373 /* save spe registers */ 374 if (current->thread.used_spe) { 375 flush_spe_to_thread(current); 376 if (__copy_to_user(&frame->mc_vregs, current->thread.evr, 377 ELF_NEVRREG * sizeof(u32))) 378 return 1; 379 /* set MSR_SPE in the saved MSR value to indicate that 380 frame->mc_vregs contains valid data */ 381 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR])) 382 return 1; 383 } 384 /* else assert((regs->msr & MSR_SPE) == 0) */ 385 386 /* We always copy to/from spefscr */ 387 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) 388 return 1; 389 #endif /* CONFIG_SPE */ 390 391 if (sigret) { 392 /* Set up the sigreturn trampoline: li r0,sigret; sc */ 393 if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) 394 || __put_user(0x44000002UL, &frame->tramp[1])) 395 return 1; 396 flush_icache_range((unsigned long) &frame->tramp[0], 397 (unsigned long) &frame->tramp[2]); 398 } 399 400 return 0; 401 } 402 403 /* 404 * Restore the current user register values from the user stack, 405 * (except for MSR). 406 */ 407 static long restore_user_regs(struct pt_regs *regs, 408 struct mcontext __user *sr, int sig) 409 { 410 long err; 411 unsigned int save_r2 = 0; 412 unsigned long msr; 413 414 /* 415 * restore general registers but not including MSR or SOFTE. Also 416 * take care of keeping r2 (TLS) intact if not a signal 417 */ 418 if (!sig) 419 save_r2 = (unsigned int)regs->gpr[2]; 420 err = restore_general_regs(regs, sr); 421 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); 422 if (!sig) 423 regs->gpr[2] = (unsigned long) save_r2; 424 if (err) 425 return 1; 426 427 /* if doing signal return, restore the previous little-endian mode */ 428 if (sig) 429 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 430 431 /* 432 * Do this before updating the thread state in 433 * current->thread.fpr/vr/evr. That way, if we get preempted 434 * and another task grabs the FPU/Altivec/SPE, it won't be 435 * tempted to save the current CPU state into the thread_struct 436 * and corrupt what we are writing there. 437 */ 438 discard_lazy_cpu_state(); 439 440 /* force the process to reload the FP registers from 441 current->thread when it next does FP instructions */ 442 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); 443 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs, 444 sizeof(sr->mc_fregs))) 445 return 1; 446 447 #ifdef CONFIG_ALTIVEC 448 /* force the process to reload the altivec registers from 449 current->thread when it next does altivec instructions */ 450 regs->msr &= ~MSR_VEC; 451 if (msr & MSR_VEC) { 452 /* restore altivec registers from the stack */ 453 if (__copy_from_user(current->thread.vr, &sr->mc_vregs, 454 sizeof(sr->mc_vregs))) 455 return 1; 456 } else if (current->thread.used_vr) 457 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); 458 459 /* Always get VRSAVE back */ 460 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) 461 return 1; 462 #endif /* CONFIG_ALTIVEC */ 463 464 #ifdef CONFIG_SPE 465 /* force the process to reload the spe registers from 466 current->thread when it next does spe instructions */ 467 regs->msr &= ~MSR_SPE; 468 if (msr & MSR_SPE) { 469 /* restore spe registers from the stack */ 470 if (__copy_from_user(current->thread.evr, &sr->mc_vregs, 471 ELF_NEVRREG * sizeof(u32))) 472 return 1; 473 } else if (current->thread.used_spe) 474 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); 475 476 /* Always get SPEFSCR back */ 477 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) 478 return 1; 479 #endif /* CONFIG_SPE */ 480 481 return 0; 482 } 483 484 #ifdef CONFIG_PPC64 485 long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act, 486 struct sigaction32 __user *oact, size_t sigsetsize) 487 { 488 struct k_sigaction new_ka, old_ka; 489 int ret; 490 491 /* XXX: Don't preclude handling different sized sigset_t's. */ 492 if (sigsetsize != sizeof(compat_sigset_t)) 493 return -EINVAL; 494 495 if (act) { 496 compat_uptr_t handler; 497 498 ret = get_user(handler, &act->sa_handler); 499 new_ka.sa.sa_handler = compat_ptr(handler); 500 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask); 501 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 502 if (ret) 503 return -EFAULT; 504 } 505 506 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 507 if (!ret && oact) { 508 ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler); 509 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); 510 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 511 } 512 return ret; 513 } 514 515 /* 516 * Note: it is necessary to treat how as an unsigned int, with the 517 * corresponding cast to a signed int to insure that the proper 518 * conversion (sign extension) between the register representation 519 * of a signed int (msr in 32-bit mode) and the register representation 520 * of a signed int (msr in 64-bit mode) is performed. 521 */ 522 long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set, 523 compat_sigset_t __user *oset, size_t sigsetsize) 524 { 525 sigset_t s; 526 sigset_t __user *up; 527 int ret; 528 mm_segment_t old_fs = get_fs(); 529 530 if (set) { 531 if (get_sigset_t(&s, set)) 532 return -EFAULT; 533 } 534 535 set_fs(KERNEL_DS); 536 /* This is valid because of the set_fs() */ 537 up = (sigset_t __user *) &s; 538 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL, 539 sigsetsize); 540 set_fs(old_fs); 541 if (ret) 542 return ret; 543 if (oset) { 544 if (put_sigset_t(oset, &s)) 545 return -EFAULT; 546 } 547 return 0; 548 } 549 550 long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) 551 { 552 sigset_t s; 553 int ret; 554 mm_segment_t old_fs = get_fs(); 555 556 set_fs(KERNEL_DS); 557 /* The __user pointer cast is valid because of the set_fs() */ 558 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); 559 set_fs(old_fs); 560 if (!ret) { 561 if (put_sigset_t(set, &s)) 562 return -EFAULT; 563 } 564 return ret; 565 } 566 567 568 int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s) 569 { 570 int err; 571 572 if (!access_ok (VERIFY_WRITE, d, sizeof(*d))) 573 return -EFAULT; 574 575 /* If you change siginfo_t structure, please be sure 576 * this code is fixed accordingly. 577 * It should never copy any pad contained in the structure 578 * to avoid security leaks, but must copy the generic 579 * 3 ints plus the relevant union member. 580 * This routine must convert siginfo from 64bit to 32bit as well 581 * at the same time. 582 */ 583 err = __put_user(s->si_signo, &d->si_signo); 584 err |= __put_user(s->si_errno, &d->si_errno); 585 err |= __put_user((short)s->si_code, &d->si_code); 586 if (s->si_code < 0) 587 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad, 588 SI_PAD_SIZE32); 589 else switch(s->si_code >> 16) { 590 case __SI_CHLD >> 16: 591 err |= __put_user(s->si_pid, &d->si_pid); 592 err |= __put_user(s->si_uid, &d->si_uid); 593 err |= __put_user(s->si_utime, &d->si_utime); 594 err |= __put_user(s->si_stime, &d->si_stime); 595 err |= __put_user(s->si_status, &d->si_status); 596 break; 597 case __SI_FAULT >> 16: 598 err |= __put_user((unsigned int)(unsigned long)s->si_addr, 599 &d->si_addr); 600 break; 601 case __SI_POLL >> 16: 602 err |= __put_user(s->si_band, &d->si_band); 603 err |= __put_user(s->si_fd, &d->si_fd); 604 break; 605 case __SI_TIMER >> 16: 606 err |= __put_user(s->si_tid, &d->si_tid); 607 err |= __put_user(s->si_overrun, &d->si_overrun); 608 err |= __put_user(s->si_int, &d->si_int); 609 break; 610 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ 611 case __SI_MESGQ >> 16: 612 err |= __put_user(s->si_int, &d->si_int); 613 /* fallthrough */ 614 case __SI_KILL >> 16: 615 default: 616 err |= __put_user(s->si_pid, &d->si_pid); 617 err |= __put_user(s->si_uid, &d->si_uid); 618 break; 619 } 620 return err; 621 } 622 623 #define copy_siginfo_to_user copy_siginfo_to_user32 624 625 /* 626 * Note: it is necessary to treat pid and sig as unsigned ints, with the 627 * corresponding cast to a signed int to insure that the proper conversion 628 * (sign extension) between the register representation of a signed int 629 * (msr in 32-bit mode) and the register representation of a signed int 630 * (msr in 64-bit mode) is performed. 631 */ 632 long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo) 633 { 634 siginfo_t info; 635 int ret; 636 mm_segment_t old_fs = get_fs(); 637 638 if (copy_from_user (&info, uinfo, 3*sizeof(int)) || 639 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32)) 640 return -EFAULT; 641 set_fs (KERNEL_DS); 642 /* The __user pointer cast is valid becasuse of the set_fs() */ 643 ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info); 644 set_fs (old_fs); 645 return ret; 646 } 647 /* 648 * Start Alternate signal stack support 649 * 650 * System Calls 651 * sigaltatck compat_sys_sigaltstack 652 */ 653 654 int compat_sys_sigaltstack(u32 __new, u32 __old, int r5, 655 int r6, int r7, int r8, struct pt_regs *regs) 656 { 657 stack_32_t __user * newstack = compat_ptr(__new); 658 stack_32_t __user * oldstack = compat_ptr(__old); 659 stack_t uss, uoss; 660 int ret; 661 mm_segment_t old_fs; 662 unsigned long sp; 663 compat_uptr_t ss_sp; 664 665 /* 666 * set sp to the user stack on entry to the system call 667 * the system call router sets R9 to the saved registers 668 */ 669 sp = regs->gpr[1]; 670 671 /* Put new stack info in local 64 bit stack struct */ 672 if (newstack) { 673 if (get_user(ss_sp, &newstack->ss_sp) || 674 __get_user(uss.ss_flags, &newstack->ss_flags) || 675 __get_user(uss.ss_size, &newstack->ss_size)) 676 return -EFAULT; 677 uss.ss_sp = compat_ptr(ss_sp); 678 } 679 680 old_fs = get_fs(); 681 set_fs(KERNEL_DS); 682 /* The __user pointer casts are valid because of the set_fs() */ 683 ret = do_sigaltstack( 684 newstack ? (stack_t __user *) &uss : NULL, 685 oldstack ? (stack_t __user *) &uoss : NULL, 686 sp); 687 set_fs(old_fs); 688 /* Copy the stack information to the user output buffer */ 689 if (!ret && oldstack && 690 (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || 691 __put_user(uoss.ss_flags, &oldstack->ss_flags) || 692 __put_user(uoss.ss_size, &oldstack->ss_size))) 693 return -EFAULT; 694 return ret; 695 } 696 #endif /* CONFIG_PPC64 */ 697 698 /* 699 * Set up a signal frame for a "real-time" signal handler 700 * (one which gets siginfo). 701 */ 702 int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, 703 siginfo_t *info, sigset_t *oldset, 704 struct pt_regs *regs) 705 { 706 struct rt_sigframe __user *rt_sf; 707 struct mcontext __user *frame; 708 unsigned long newsp = 0; 709 710 /* Set up Signal Frame */ 711 /* Put a Real Time Context onto stack */ 712 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf)); 713 if (unlikely(rt_sf == NULL)) 714 goto badframe; 715 716 /* Put the siginfo & fill in most of the ucontext */ 717 if (copy_siginfo_to_user(&rt_sf->info, info) 718 || __put_user(0, &rt_sf->uc.uc_flags) 719 || __put_user(0, &rt_sf->uc.uc_link) 720 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) 721 || __put_user(sas_ss_flags(regs->gpr[1]), 722 &rt_sf->uc.uc_stack.ss_flags) 723 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size) 724 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext), 725 &rt_sf->uc.uc_regs) 726 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset)) 727 goto badframe; 728 729 /* Save user registers on the stack */ 730 frame = &rt_sf->uc.uc_mcontext; 731 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { 732 if (save_user_regs(regs, frame, 0)) 733 goto badframe; 734 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; 735 } else { 736 if (save_user_regs(regs, frame, __NR_rt_sigreturn)) 737 goto badframe; 738 regs->link = (unsigned long) frame->tramp; 739 } 740 741 current->thread.fpscr.val = 0; /* turn off all fp exceptions */ 742 743 /* create a stack frame for the caller of the handler */ 744 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); 745 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 746 goto badframe; 747 748 /* Fill registers for signal handler */ 749 regs->gpr[1] = newsp; 750 regs->gpr[3] = sig; 751 regs->gpr[4] = (unsigned long) &rt_sf->info; 752 regs->gpr[5] = (unsigned long) &rt_sf->uc; 753 regs->gpr[6] = (unsigned long) rt_sf; 754 regs->nip = (unsigned long) ka->sa.sa_handler; 755 /* enter the signal handler in big-endian mode */ 756 regs->msr &= ~MSR_LE; 757 regs->trap = 0; 758 return 1; 759 760 badframe: 761 #ifdef DEBUG_SIG 762 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", 763 regs, frame, newsp); 764 #endif 765 force_sigsegv(sig, current); 766 return 0; 767 } 768 769 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) 770 { 771 sigset_t set; 772 struct mcontext __user *mcp; 773 774 if (get_sigset_t(&set, &ucp->uc_sigmask)) 775 return -EFAULT; 776 #ifdef CONFIG_PPC64 777 { 778 u32 cmcp; 779 780 if (__get_user(cmcp, &ucp->uc_regs)) 781 return -EFAULT; 782 mcp = (struct mcontext __user *)(u64)cmcp; 783 /* no need to check access_ok(mcp), since mcp < 4GB */ 784 } 785 #else 786 if (__get_user(mcp, &ucp->uc_regs)) 787 return -EFAULT; 788 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) 789 return -EFAULT; 790 #endif 791 restore_sigmask(&set); 792 if (restore_user_regs(regs, mcp, sig)) 793 return -EFAULT; 794 795 return 0; 796 } 797 798 long sys_swapcontext(struct ucontext __user *old_ctx, 799 struct ucontext __user *new_ctx, 800 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) 801 { 802 unsigned char tmp; 803 804 /* Context size is for future use. Right now, we only make sure 805 * we are passed something we understand 806 */ 807 if (ctx_size < sizeof(struct ucontext)) 808 return -EINVAL; 809 810 if (old_ctx != NULL) { 811 struct mcontext __user *mctx; 812 813 /* 814 * old_ctx might not be 16-byte aligned, in which 815 * case old_ctx->uc_mcontext won't be either. 816 * Because we have the old_ctx->uc_pad2 field 817 * before old_ctx->uc_mcontext, we need to round down 818 * from &old_ctx->uc_mcontext to a 16-byte boundary. 819 */ 820 mctx = (struct mcontext __user *) 821 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); 822 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) 823 || save_user_regs(regs, mctx, 0) 824 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked) 825 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) 826 return -EFAULT; 827 } 828 if (new_ctx == NULL) 829 return 0; 830 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx)) 831 || __get_user(tmp, (u8 __user *) new_ctx) 832 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1)) 833 return -EFAULT; 834 835 /* 836 * If we get a fault copying the context into the kernel's 837 * image of the user's registers, we can't just return -EFAULT 838 * because the user's registers will be corrupted. For instance 839 * the NIP value may have been updated but not some of the 840 * other registers. Given that we have done the access_ok 841 * and successfully read the first and last bytes of the region 842 * above, this should only happen in an out-of-memory situation 843 * or if another thread unmaps the region containing the context. 844 * We kill the task with a SIGSEGV in this situation. 845 */ 846 if (do_setcontext(new_ctx, regs, 0)) 847 do_exit(SIGSEGV); 848 849 set_thread_flag(TIF_RESTOREALL); 850 return 0; 851 } 852 853 long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, 854 struct pt_regs *regs) 855 { 856 struct rt_sigframe __user *rt_sf; 857 858 /* Always make any pending restarted system calls return -EINTR */ 859 current_thread_info()->restart_block.fn = do_no_restart_syscall; 860 861 rt_sf = (struct rt_sigframe __user *) 862 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); 863 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) 864 goto bad; 865 if (do_setcontext(&rt_sf->uc, regs, 1)) 866 goto bad; 867 868 /* 869 * It's not clear whether or why it is desirable to save the 870 * sigaltstack setting on signal delivery and restore it on 871 * signal return. But other architectures do this and we have 872 * always done it up until now so it is probably better not to 873 * change it. -- paulus 874 */ 875 #ifdef CONFIG_PPC64 876 /* 877 * We use the compat_sys_ version that does the 32/64 bits conversion 878 * and takes userland pointer directly. What about error checking ? 879 * nobody does any... 880 */ 881 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs); 882 #else 883 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]); 884 #endif 885 set_thread_flag(TIF_RESTOREALL); 886 return 0; 887 888 bad: 889 force_sig(SIGSEGV, current); 890 return 0; 891 } 892 893 #ifdef CONFIG_PPC32 894 int sys_debug_setcontext(struct ucontext __user *ctx, 895 int ndbg, struct sig_dbg_op __user *dbg, 896 int r6, int r7, int r8, 897 struct pt_regs *regs) 898 { 899 struct sig_dbg_op op; 900 int i; 901 unsigned char tmp; 902 unsigned long new_msr = regs->msr; 903 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 904 unsigned long new_dbcr0 = current->thread.dbcr0; 905 #endif 906 907 for (i=0; i<ndbg; i++) { 908 if (copy_from_user(&op, dbg + i, sizeof(op))) 909 return -EFAULT; 910 switch (op.dbg_type) { 911 case SIG_DBG_SINGLE_STEPPING: 912 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 913 if (op.dbg_value) { 914 new_msr |= MSR_DE; 915 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); 916 } else { 917 new_msr &= ~MSR_DE; 918 new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); 919 } 920 #else 921 if (op.dbg_value) 922 new_msr |= MSR_SE; 923 else 924 new_msr &= ~MSR_SE; 925 #endif 926 break; 927 case SIG_DBG_BRANCH_TRACING: 928 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 929 return -EINVAL; 930 #else 931 if (op.dbg_value) 932 new_msr |= MSR_BE; 933 else 934 new_msr &= ~MSR_BE; 935 #endif 936 break; 937 938 default: 939 return -EINVAL; 940 } 941 } 942 943 /* We wait until here to actually install the values in the 944 registers so if we fail in the above loop, it will not 945 affect the contents of these registers. After this point, 946 failure is a problem, anyway, and it's very unlikely unless 947 the user is really doing something wrong. */ 948 regs->msr = new_msr; 949 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 950 current->thread.dbcr0 = new_dbcr0; 951 #endif 952 953 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) 954 || __get_user(tmp, (u8 __user *) ctx) 955 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1)) 956 return -EFAULT; 957 958 /* 959 * If we get a fault copying the context into the kernel's 960 * image of the user's registers, we can't just return -EFAULT 961 * because the user's registers will be corrupted. For instance 962 * the NIP value may have been updated but not some of the 963 * other registers. Given that we have done the access_ok 964 * and successfully read the first and last bytes of the region 965 * above, this should only happen in an out-of-memory situation 966 * or if another thread unmaps the region containing the context. 967 * We kill the task with a SIGSEGV in this situation. 968 */ 969 if (do_setcontext(ctx, regs, 1)) { 970 force_sig(SIGSEGV, current); 971 goto out; 972 } 973 974 /* 975 * It's not clear whether or why it is desirable to save the 976 * sigaltstack setting on signal delivery and restore it on 977 * signal return. But other architectures do this and we have 978 * always done it up until now so it is probably better not to 979 * change it. -- paulus 980 */ 981 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]); 982 983 set_thread_flag(TIF_RESTOREALL); 984 out: 985 return 0; 986 } 987 #endif 988 989 /* 990 * OK, we're invoking a handler 991 */ 992 int handle_signal32(unsigned long sig, struct k_sigaction *ka, 993 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 994 { 995 struct sigcontext __user *sc; 996 struct sigframe __user *frame; 997 unsigned long newsp = 0; 998 999 /* Set up Signal Frame */ 1000 frame = get_sigframe(ka, regs, sizeof(*frame)); 1001 if (unlikely(frame == NULL)) 1002 goto badframe; 1003 sc = (struct sigcontext __user *) &frame->sctx; 1004 1005 #if _NSIG != 64 1006 #error "Please adjust handle_signal()" 1007 #endif 1008 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler) 1009 || __put_user(oldset->sig[0], &sc->oldmask) 1010 #ifdef CONFIG_PPC64 1011 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) 1012 #else 1013 || __put_user(oldset->sig[1], &sc->_unused[3]) 1014 #endif 1015 || __put_user(to_user_ptr(&frame->mctx), &sc->regs) 1016 || __put_user(sig, &sc->signal)) 1017 goto badframe; 1018 1019 if (vdso32_sigtramp && current->mm->context.vdso_base) { 1020 if (save_user_regs(regs, &frame->mctx, 0)) 1021 goto badframe; 1022 regs->link = current->mm->context.vdso_base + vdso32_sigtramp; 1023 } else { 1024 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) 1025 goto badframe; 1026 regs->link = (unsigned long) frame->mctx.tramp; 1027 } 1028 1029 current->thread.fpscr.val = 0; /* turn off all fp exceptions */ 1030 1031 /* create a stack frame for the caller of the handler */ 1032 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; 1033 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 1034 goto badframe; 1035 1036 regs->gpr[1] = newsp; 1037 regs->gpr[3] = sig; 1038 regs->gpr[4] = (unsigned long) sc; 1039 regs->nip = (unsigned long) ka->sa.sa_handler; 1040 /* enter the signal handler in big-endian mode */ 1041 regs->msr &= ~MSR_LE; 1042 regs->trap = 0; 1043 1044 return 1; 1045 1046 badframe: 1047 #ifdef DEBUG_SIG 1048 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", 1049 regs, frame, newsp); 1050 #endif 1051 force_sigsegv(sig, current); 1052 return 0; 1053 } 1054 1055 /* 1056 * Do a signal return; undo the signal stack. 1057 */ 1058 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, 1059 struct pt_regs *regs) 1060 { 1061 struct sigcontext __user *sc; 1062 struct sigcontext sigctx; 1063 struct mcontext __user *sr; 1064 sigset_t set; 1065 1066 /* Always make any pending restarted system calls return -EINTR */ 1067 current_thread_info()->restart_block.fn = do_no_restart_syscall; 1068 1069 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); 1070 if (copy_from_user(&sigctx, sc, sizeof(sigctx))) 1071 goto badframe; 1072 1073 #ifdef CONFIG_PPC64 1074 /* 1075 * Note that PPC32 puts the upper 32 bits of the sigmask in the 1076 * unused part of the signal stackframe 1077 */ 1078 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); 1079 #else 1080 set.sig[0] = sigctx.oldmask; 1081 set.sig[1] = sigctx._unused[3]; 1082 #endif 1083 restore_sigmask(&set); 1084 1085 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); 1086 if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) 1087 || restore_user_regs(regs, sr, 1)) 1088 goto badframe; 1089 1090 set_thread_flag(TIF_RESTOREALL); 1091 return 0; 1092 1093 badframe: 1094 force_sig(SIGSEGV, current); 1095 return 0; 1096 } 1097