1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Derived from "arch/i386/kernel/signal.c" 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/sched.h> 16 #include <linux/mm.h> 17 #include <linux/smp.h> 18 #include <linux/kernel.h> 19 #include <linux/signal.h> 20 #include <linux/errno.h> 21 #include <linux/wait.h> 22 #include <linux/unistd.h> 23 #include <linux/stddef.h> 24 #include <linux/elf.h> 25 #include <linux/ptrace.h> 26 #include <linux/ratelimit.h> 27 28 #include <asm/sigcontext.h> 29 #include <asm/ucontext.h> 30 #include <asm/uaccess.h> 31 #include <asm/pgtable.h> 32 #include <asm/unistd.h> 33 #include <asm/cacheflush.h> 34 #include <asm/syscalls.h> 35 #include <asm/vdso.h> 36 #include <asm/switch_to.h> 37 #include <asm/tm.h> 38 39 #include "signal.h" 40 41 #define DEBUG_SIG 0 42 43 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) 44 #define FP_REGS_SIZE sizeof(elf_fpregset_t) 45 46 #define TRAMP_TRACEBACK 3 47 #define TRAMP_SIZE 6 48 49 /* 50 * When we have signals to deliver, we set up on the user stack, 51 * going down from the original stack pointer: 52 * 1) a rt_sigframe struct which contains the ucontext 53 * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller 54 * frame for the signal handler. 55 */ 56 57 struct rt_sigframe { 58 /* sys_rt_sigreturn requires the ucontext be the first field */ 59 struct ucontext uc; 60 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 61 struct ucontext uc_transact; 62 #endif 63 unsigned long _unused[2]; 64 unsigned int tramp[TRAMP_SIZE]; 65 struct siginfo __user *pinfo; 66 void __user *puc; 67 struct siginfo info; 68 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 69 char abigap[288]; 70 } __attribute__ ((aligned (16))); 71 72 static const char fmt32[] = KERN_INFO \ 73 "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n"; 74 static const char fmt64[] = KERN_INFO \ 75 "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n"; 76 77 /* 78 * Set up the sigcontext for the signal frame. 79 */ 80 81 static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, 82 int signr, sigset_t *set, unsigned long handler, 83 int ctx_has_vsx_region) 84 { 85 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the 86 * process never used altivec yet (MSR_VEC is zero in pt_regs of 87 * the context). This is very important because we must ensure we 88 * don't lose the VRSAVE content that may have been set prior to 89 * the process doing its first vector operation 90 * Userland shall check AT_HWCAP to know whether it can rely on the 91 * v_regs pointer or not 92 */ 93 #ifdef CONFIG_ALTIVEC 94 elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful); 95 #endif 96 unsigned long msr = regs->msr; 97 long err = 0; 98 99 flush_fp_to_thread(current); 100 101 #ifdef CONFIG_ALTIVEC 102 err |= __put_user(v_regs, &sc->v_regs); 103 104 /* save altivec registers */ 105 if (current->thread.used_vr) { 106 flush_altivec_to_thread(current); 107 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 108 err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128)); 109 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) 110 * contains valid data. 111 */ 112 msr |= MSR_VEC; 113 } 114 /* We always copy to/from vrsave, it's 0 if we don't have or don't 115 * use altivec. 116 */ 117 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 118 #else /* CONFIG_ALTIVEC */ 119 err |= __put_user(0, &sc->v_regs); 120 #endif /* CONFIG_ALTIVEC */ 121 flush_fp_to_thread(current); 122 /* copy fpr regs and fpscr */ 123 err |= copy_fpr_to_user(&sc->fp_regs, current); 124 #ifdef CONFIG_VSX 125 /* 126 * Copy VSX low doubleword to local buffer for formatting, 127 * then out to userspace. Update v_regs to point after the 128 * VMX data. 129 */ 130 if (current->thread.used_vsr && ctx_has_vsx_region) { 131 __giveup_vsx(current); 132 v_regs += ELF_NVRREG; 133 err |= copy_vsx_to_user(v_regs, current); 134 /* set MSR_VSX in the MSR value in the frame to 135 * indicate that sc->vs_reg) contains valid data. 136 */ 137 msr |= MSR_VSX; 138 } 139 #endif /* CONFIG_VSX */ 140 err |= __put_user(&sc->gp_regs, &sc->regs); 141 WARN_ON(!FULL_REGS(regs)); 142 err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); 143 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); 144 err |= __put_user(signr, &sc->signal); 145 err |= __put_user(handler, &sc->handler); 146 if (set != NULL) 147 err |= __put_user(set->sig[0], &sc->oldmask); 148 149 return err; 150 } 151 152 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 153 /* 154 * As above, but Transactional Memory is in use, so deliver sigcontexts 155 * containing checkpointed and transactional register states. 156 * 157 * To do this, we treclaim to gather both sets of registers and set up the 158 * 'normal' sigcontext registers with rolled-back register values such that a 159 * simple signal handler sees a correct checkpointed register state. 160 * If interested, a TM-aware sighandler can examine the transactional registers 161 * in the 2nd sigcontext to determine the real origin of the signal. 162 */ 163 static long setup_tm_sigcontexts(struct sigcontext __user *sc, 164 struct sigcontext __user *tm_sc, 165 struct pt_regs *regs, 166 int signr, sigset_t *set, unsigned long handler) 167 { 168 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the 169 * process never used altivec yet (MSR_VEC is zero in pt_regs of 170 * the context). This is very important because we must ensure we 171 * don't lose the VRSAVE content that may have been set prior to 172 * the process doing its first vector operation 173 * Userland shall check AT_HWCAP to know wether it can rely on the 174 * v_regs pointer or not. 175 */ 176 #ifdef CONFIG_ALTIVEC 177 elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *) 178 (((unsigned long)sc->vmx_reserve + 15) & ~0xful); 179 elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *) 180 (((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful); 181 #endif 182 unsigned long msr = regs->msr; 183 long err = 0; 184 185 BUG_ON(!MSR_TM_ACTIVE(regs->msr)); 186 187 /* tm_reclaim rolls back all reg states, saving checkpointed (older) 188 * GPRs to thread.ckpt_regs and (if used) FPRs to (newer) 189 * thread.transact_fp and/or VRs to (newer) thread.transact_vr. 190 * THEN we save out FP/VRs, if necessary, to the checkpointed (older) 191 * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the 192 * stack, in *regs. 193 */ 194 tm_enable(); 195 tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); 196 197 flush_fp_to_thread(current); 198 199 #ifdef CONFIG_ALTIVEC 200 err |= __put_user(v_regs, &sc->v_regs); 201 err |= __put_user(tm_v_regs, &tm_sc->v_regs); 202 203 /* save altivec registers */ 204 if (current->thread.used_vr) { 205 flush_altivec_to_thread(current); 206 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ 207 err |= __copy_to_user(v_regs, current->thread.vr, 208 33 * sizeof(vector128)); 209 /* If VEC was enabled there are transactional VRs valid too, 210 * else they're a copy of the checkpointed VRs. 211 */ 212 if (msr & MSR_VEC) 213 err |= __copy_to_user(tm_v_regs, 214 current->thread.transact_vr, 215 33 * sizeof(vector128)); 216 else 217 err |= __copy_to_user(tm_v_regs, 218 current->thread.vr, 219 33 * sizeof(vector128)); 220 221 /* set MSR_VEC in the MSR value in the frame to indicate 222 * that sc->v_reg contains valid data. 223 */ 224 msr |= MSR_VEC; 225 } 226 /* We always copy to/from vrsave, it's 0 if we don't have or don't 227 * use altivec. 228 */ 229 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 230 if (msr & MSR_VEC) 231 err |= __put_user(current->thread.transact_vrsave, 232 (u32 __user *)&tm_v_regs[33]); 233 else 234 err |= __put_user(current->thread.vrsave, 235 (u32 __user *)&tm_v_regs[33]); 236 237 #else /* CONFIG_ALTIVEC */ 238 err |= __put_user(0, &sc->v_regs); 239 err |= __put_user(0, &tm_sc->v_regs); 240 #endif /* CONFIG_ALTIVEC */ 241 242 /* copy fpr regs and fpscr */ 243 err |= copy_fpr_to_user(&sc->fp_regs, current); 244 if (msr & MSR_FP) 245 err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current); 246 else 247 err |= copy_fpr_to_user(&tm_sc->fp_regs, current); 248 249 #ifdef CONFIG_VSX 250 /* 251 * Copy VSX low doubleword to local buffer for formatting, 252 * then out to userspace. Update v_regs to point after the 253 * VMX data. 254 */ 255 if (current->thread.used_vsr) { 256 __giveup_vsx(current); 257 v_regs += ELF_NVRREG; 258 tm_v_regs += ELF_NVRREG; 259 260 err |= copy_vsx_to_user(v_regs, current); 261 262 if (msr & MSR_VSX) 263 err |= copy_transact_vsx_to_user(tm_v_regs, current); 264 else 265 err |= copy_vsx_to_user(tm_v_regs, current); 266 267 /* set MSR_VSX in the MSR value in the frame to 268 * indicate that sc->vs_reg) contains valid data. 269 */ 270 msr |= MSR_VSX; 271 } 272 #endif /* CONFIG_VSX */ 273 274 err |= __put_user(&sc->gp_regs, &sc->regs); 275 err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); 276 WARN_ON(!FULL_REGS(regs)); 277 err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); 278 err |= __copy_to_user(&sc->gp_regs, 279 ¤t->thread.ckpt_regs, GP_REGS_SIZE); 280 err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); 281 err |= __put_user(msr, &sc->gp_regs[PT_MSR]); 282 err |= __put_user(signr, &sc->signal); 283 err |= __put_user(handler, &sc->handler); 284 if (set != NULL) 285 err |= __put_user(set->sig[0], &sc->oldmask); 286 287 return err; 288 } 289 #endif 290 291 /* 292 * Restore the sigcontext from the signal frame. 293 */ 294 295 static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, 296 struct sigcontext __user *sc) 297 { 298 #ifdef CONFIG_ALTIVEC 299 elf_vrreg_t __user *v_regs; 300 #endif 301 unsigned long err = 0; 302 unsigned long save_r13 = 0; 303 unsigned long msr; 304 #ifdef CONFIG_VSX 305 int i; 306 #endif 307 308 /* If this is not a signal return, we preserve the TLS in r13 */ 309 if (!sig) 310 save_r13 = regs->gpr[13]; 311 312 /* copy the GPRs */ 313 err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); 314 err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); 315 /* get MSR separately, transfer the LE bit if doing signal return */ 316 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 317 if (sig) 318 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 319 err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); 320 err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); 321 err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); 322 err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); 323 err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); 324 /* skip SOFTE */ 325 regs->trap = 0; 326 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); 327 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); 328 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); 329 330 if (!sig) 331 regs->gpr[13] = save_r13; 332 if (set != NULL) 333 err |= __get_user(set->sig[0], &sc->oldmask); 334 335 /* 336 * Do this before updating the thread state in 337 * current->thread.fpr/vr. That way, if we get preempted 338 * and another task grabs the FPU/Altivec, it won't be 339 * tempted to save the current CPU state into the thread_struct 340 * and corrupt what we are writing there. 341 */ 342 discard_lazy_cpu_state(); 343 344 /* 345 * Force reload of FP/VEC. 346 * This has to be done before copying stuff into current->thread.fpr/vr 347 * for the reasons explained in the previous comment. 348 */ 349 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); 350 351 #ifdef CONFIG_ALTIVEC 352 err |= __get_user(v_regs, &sc->v_regs); 353 if (err) 354 return err; 355 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) 356 return -EFAULT; 357 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 358 if (v_regs != 0 && (msr & MSR_VEC) != 0) 359 err |= __copy_from_user(current->thread.vr, v_regs, 360 33 * sizeof(vector128)); 361 else if (current->thread.used_vr) 362 memset(current->thread.vr, 0, 33 * sizeof(vector128)); 363 /* Always get VRSAVE back */ 364 if (v_regs != 0) 365 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 366 else 367 current->thread.vrsave = 0; 368 #endif /* CONFIG_ALTIVEC */ 369 /* restore floating point */ 370 err |= copy_fpr_from_user(current, &sc->fp_regs); 371 #ifdef CONFIG_VSX 372 /* 373 * Get additional VSX data. Update v_regs to point after the 374 * VMX data. Copy VSX low doubleword from userspace to local 375 * buffer for formatting, then into the taskstruct. 376 */ 377 v_regs += ELF_NVRREG; 378 if ((msr & MSR_VSX) != 0) 379 err |= copy_vsx_from_user(current, v_regs); 380 else 381 for (i = 0; i < 32 ; i++) 382 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 383 #endif 384 return err; 385 } 386 387 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 388 /* 389 * Restore the two sigcontexts from the frame of a transactional processes. 390 */ 391 392 static long restore_tm_sigcontexts(struct pt_regs *regs, 393 struct sigcontext __user *sc, 394 struct sigcontext __user *tm_sc) 395 { 396 #ifdef CONFIG_ALTIVEC 397 elf_vrreg_t __user *v_regs, *tm_v_regs; 398 #endif 399 unsigned long err = 0; 400 unsigned long msr; 401 #ifdef CONFIG_VSX 402 int i; 403 #endif 404 /* copy the GPRs */ 405 err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); 406 err |= __copy_from_user(¤t->thread.ckpt_regs, sc->gp_regs, 407 sizeof(regs->gpr)); 408 409 /* 410 * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. 411 * TEXASR was set by the signal delivery reclaim, as was TFIAR. 412 * Users doing anything abhorrent like thread-switching w/ signals for 413 * TM-Suspended code will have to back TEXASR/TFIAR up themselves. 414 * For the case of getting a signal and simply returning from it, 415 * we don't need to re-copy them here. 416 */ 417 err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); 418 err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); 419 420 /* get MSR separately, transfer the LE bit if doing signal return */ 421 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 422 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 423 424 /* The following non-GPR non-FPR non-VR state is also checkpointed: */ 425 err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); 426 err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); 427 err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); 428 err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); 429 err |= __get_user(current->thread.ckpt_regs.ctr, 430 &sc->gp_regs[PT_CTR]); 431 err |= __get_user(current->thread.ckpt_regs.link, 432 &sc->gp_regs[PT_LNK]); 433 err |= __get_user(current->thread.ckpt_regs.xer, 434 &sc->gp_regs[PT_XER]); 435 err |= __get_user(current->thread.ckpt_regs.ccr, 436 &sc->gp_regs[PT_CCR]); 437 438 /* These regs are not checkpointed; they can go in 'regs'. */ 439 err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); 440 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); 441 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); 442 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); 443 444 /* 445 * Do this before updating the thread state in 446 * current->thread.fpr/vr. That way, if we get preempted 447 * and another task grabs the FPU/Altivec, it won't be 448 * tempted to save the current CPU state into the thread_struct 449 * and corrupt what we are writing there. 450 */ 451 discard_lazy_cpu_state(); 452 453 /* 454 * Force reload of FP/VEC. 455 * This has to be done before copying stuff into current->thread.fpr/vr 456 * for the reasons explained in the previous comment. 457 */ 458 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); 459 460 #ifdef CONFIG_ALTIVEC 461 err |= __get_user(v_regs, &sc->v_regs); 462 err |= __get_user(tm_v_regs, &tm_sc->v_regs); 463 if (err) 464 return err; 465 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) 466 return -EFAULT; 467 if (tm_v_regs && !access_ok(VERIFY_READ, 468 tm_v_regs, 34 * sizeof(vector128))) 469 return -EFAULT; 470 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 471 if (v_regs != 0 && tm_v_regs != 0 && (msr & MSR_VEC) != 0) { 472 err |= __copy_from_user(current->thread.vr, v_regs, 473 33 * sizeof(vector128)); 474 err |= __copy_from_user(current->thread.transact_vr, tm_v_regs, 475 33 * sizeof(vector128)); 476 } 477 else if (current->thread.used_vr) { 478 memset(current->thread.vr, 0, 33 * sizeof(vector128)); 479 memset(current->thread.transact_vr, 0, 33 * sizeof(vector128)); 480 } 481 /* Always get VRSAVE back */ 482 if (v_regs != 0 && tm_v_regs != 0) { 483 err |= __get_user(current->thread.vrsave, 484 (u32 __user *)&v_regs[33]); 485 err |= __get_user(current->thread.transact_vrsave, 486 (u32 __user *)&tm_v_regs[33]); 487 } 488 else { 489 current->thread.vrsave = 0; 490 current->thread.transact_vrsave = 0; 491 } 492 #endif /* CONFIG_ALTIVEC */ 493 /* restore floating point */ 494 err |= copy_fpr_from_user(current, &sc->fp_regs); 495 err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); 496 #ifdef CONFIG_VSX 497 /* 498 * Get additional VSX data. Update v_regs to point after the 499 * VMX data. Copy VSX low doubleword from userspace to local 500 * buffer for formatting, then into the taskstruct. 501 */ 502 if (v_regs && ((msr & MSR_VSX) != 0)) { 503 v_regs += ELF_NVRREG; 504 tm_v_regs += ELF_NVRREG; 505 err |= copy_vsx_from_user(current, v_regs); 506 err |= copy_transact_vsx_from_user(current, tm_v_regs); 507 } else { 508 for (i = 0; i < 32 ; i++) { 509 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 510 current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; 511 } 512 } 513 #endif 514 tm_enable(); 515 /* This loads the checkpointed FP/VEC state, if used */ 516 tm_recheckpoint(¤t->thread, msr); 517 /* The task has moved into TM state S, so ensure MSR reflects this: */ 518 regs->msr = (regs->msr & ~MSR_TS_MASK) | __MASK(33); 519 520 /* This loads the speculative FP/VEC state, if used */ 521 if (msr & MSR_FP) { 522 do_load_up_transact_fpu(¤t->thread); 523 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 524 } 525 if (msr & MSR_VEC) { 526 do_load_up_transact_altivec(¤t->thread); 527 regs->msr |= MSR_VEC; 528 } 529 530 return err; 531 } 532 #endif 533 534 /* 535 * Setup the trampoline code on the stack 536 */ 537 static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) 538 { 539 int i; 540 long err = 0; 541 542 /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ 543 err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); 544 /* li r0, __NR_[rt_]sigreturn| */ 545 err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); 546 /* sc */ 547 err |= __put_user(0x44000002UL, &tramp[2]); 548 549 /* Minimal traceback info */ 550 for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) 551 err |= __put_user(0, &tramp[i]); 552 553 if (!err) 554 flush_icache_range((unsigned long) &tramp[0], 555 (unsigned long) &tramp[TRAMP_SIZE]); 556 557 return err; 558 } 559 560 /* 561 * Userspace code may pass a ucontext which doesn't include VSX added 562 * at the end. We need to check for this case. 563 */ 564 #define UCONTEXTSIZEWITHOUTVSX \ 565 (sizeof(struct ucontext) - 32*sizeof(long)) 566 567 /* 568 * Handle {get,set,swap}_context operations 569 */ 570 int sys_swapcontext(struct ucontext __user *old_ctx, 571 struct ucontext __user *new_ctx, 572 long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) 573 { 574 unsigned char tmp; 575 sigset_t set; 576 unsigned long new_msr = 0; 577 int ctx_has_vsx_region = 0; 578 579 if (new_ctx && 580 get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) 581 return -EFAULT; 582 /* 583 * Check that the context is not smaller than the original 584 * size (with VMX but without VSX) 585 */ 586 if (ctx_size < UCONTEXTSIZEWITHOUTVSX) 587 return -EINVAL; 588 /* 589 * If the new context state sets the MSR VSX bits but 590 * it doesn't provide VSX state. 591 */ 592 if ((ctx_size < sizeof(struct ucontext)) && 593 (new_msr & MSR_VSX)) 594 return -EINVAL; 595 /* Does the context have enough room to store VSX data? */ 596 if (ctx_size >= sizeof(struct ucontext)) 597 ctx_has_vsx_region = 1; 598 599 if (old_ctx != NULL) { 600 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) 601 || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0, 602 ctx_has_vsx_region) 603 || __copy_to_user(&old_ctx->uc_sigmask, 604 ¤t->blocked, sizeof(sigset_t))) 605 return -EFAULT; 606 } 607 if (new_ctx == NULL) 608 return 0; 609 if (!access_ok(VERIFY_READ, new_ctx, ctx_size) 610 || __get_user(tmp, (u8 __user *) new_ctx) 611 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) 612 return -EFAULT; 613 614 /* 615 * If we get a fault copying the context into the kernel's 616 * image of the user's registers, we can't just return -EFAULT 617 * because the user's registers will be corrupted. For instance 618 * the NIP value may have been updated but not some of the 619 * other registers. Given that we have done the access_ok 620 * and successfully read the first and last bytes of the region 621 * above, this should only happen in an out-of-memory situation 622 * or if another thread unmaps the region containing the context. 623 * We kill the task with a SIGSEGV in this situation. 624 */ 625 626 if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) 627 do_exit(SIGSEGV); 628 set_current_blocked(&set); 629 if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) 630 do_exit(SIGSEGV); 631 632 /* This returns like rt_sigreturn */ 633 set_thread_flag(TIF_RESTOREALL); 634 return 0; 635 } 636 637 638 /* 639 * Do a signal return; undo the signal stack. 640 */ 641 642 int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, 643 unsigned long r6, unsigned long r7, unsigned long r8, 644 struct pt_regs *regs) 645 { 646 struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; 647 sigset_t set; 648 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 649 unsigned long msr; 650 #endif 651 652 /* Always make any pending restarted system calls return -EINTR */ 653 current_thread_info()->restart_block.fn = do_no_restart_syscall; 654 655 if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) 656 goto badframe; 657 658 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) 659 goto badframe; 660 set_current_blocked(&set); 661 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 662 if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) 663 goto badframe; 664 if (MSR_TM_SUSPENDED(msr)) { 665 /* We recheckpoint on return. */ 666 struct ucontext __user *uc_transact; 667 if (__get_user(uc_transact, &uc->uc_link)) 668 goto badframe; 669 if (restore_tm_sigcontexts(regs, &uc->uc_mcontext, 670 &uc_transact->uc_mcontext)) 671 goto badframe; 672 } 673 else 674 /* Fall through, for non-TM restore */ 675 #endif 676 if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) 677 goto badframe; 678 679 if (restore_altstack(&uc->uc_stack)) 680 goto badframe; 681 682 set_thread_flag(TIF_RESTOREALL); 683 return 0; 684 685 badframe: 686 #if DEBUG_SIG 687 printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", 688 regs, uc, &uc->uc_mcontext); 689 #endif 690 if (show_unhandled_signals) 691 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 692 current->comm, current->pid, "rt_sigreturn", 693 (long)uc, regs->nip, regs->link); 694 695 force_sig(SIGSEGV, current); 696 return 0; 697 } 698 699 int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, 700 sigset_t *set, struct pt_regs *regs) 701 { 702 /* Handler is *really* a pointer to the function descriptor for 703 * the signal routine. The first entry in the function 704 * descriptor is the entry address of signal and the second 705 * entry is the TOC value we need to use. 706 */ 707 func_descr_t __user *funct_desc_ptr; 708 struct rt_sigframe __user *frame; 709 unsigned long newsp = 0; 710 long err = 0; 711 712 frame = get_sigframe(ka, regs, sizeof(*frame), 0); 713 if (unlikely(frame == NULL)) 714 goto badframe; 715 716 err |= __put_user(&frame->info, &frame->pinfo); 717 err |= __put_user(&frame->uc, &frame->puc); 718 err |= copy_siginfo_to_user(&frame->info, info); 719 if (err) 720 goto badframe; 721 722 /* Create the ucontext. */ 723 err |= __put_user(0, &frame->uc.uc_flags); 724 err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); 725 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 726 if (MSR_TM_ACTIVE(regs->msr)) { 727 /* The ucontext_t passed to userland points to the second 728 * ucontext_t (for transactional state) with its uc_link ptr. 729 */ 730 err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); 731 err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, 732 &frame->uc_transact.uc_mcontext, 733 regs, signr, 734 NULL, 735 (unsigned long)ka->sa.sa_handler); 736 } else 737 #endif 738 { 739 err |= __put_user(0, &frame->uc.uc_link); 740 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, 741 NULL, (unsigned long)ka->sa.sa_handler, 742 1); 743 } 744 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 745 if (err) 746 goto badframe; 747 748 /* Make sure signal handler doesn't get spurious FP exceptions */ 749 current->thread.fpscr.val = 0; 750 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 751 /* Remove TM bits from thread's MSR. The MSR in the sigcontext 752 * just indicates to userland that we were doing a transaction, but we 753 * don't want to return in transactional state: 754 */ 755 regs->msr &= ~MSR_TS_MASK; 756 #endif 757 758 /* Set up to return from userspace. */ 759 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { 760 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; 761 } else { 762 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); 763 if (err) 764 goto badframe; 765 regs->link = (unsigned long) &frame->tramp[0]; 766 } 767 funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler; 768 769 /* Allocate a dummy caller frame for the signal handler. */ 770 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; 771 err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); 772 773 /* Set up "regs" so we "return" to the signal handler. */ 774 err |= get_user(regs->nip, &funct_desc_ptr->entry); 775 /* enter the signal handler in big-endian mode */ 776 regs->msr &= ~MSR_LE; 777 regs->gpr[1] = newsp; 778 err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); 779 regs->gpr[3] = signr; 780 regs->result = 0; 781 if (ka->sa.sa_flags & SA_SIGINFO) { 782 err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); 783 err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); 784 regs->gpr[6] = (unsigned long) frame; 785 } else { 786 regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; 787 } 788 if (err) 789 goto badframe; 790 791 return 1; 792 793 badframe: 794 #if DEBUG_SIG 795 printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", 796 regs, frame, newsp); 797 #endif 798 if (show_unhandled_signals) 799 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 800 current->comm, current->pid, "setup_rt_frame", 801 (long)frame, regs->nip, regs->link); 802 803 force_sigsegv(signr, current); 804 return 0; 805 } 806