1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu.h" 21 #include "signal-common.h" 22 #include "linux-user/trace.h" 23 24 /* Size of dummy stack frame allocated when calling signal handler. 25 See arch/powerpc/include/asm/ptrace.h. */ 26 #if defined(TARGET_PPC64) 27 #define SIGNAL_FRAMESIZE 128 28 #else 29 #define SIGNAL_FRAMESIZE 64 30 #endif 31 32 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 33 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 34 struct target_mcontext { 35 target_ulong mc_gregs[48]; 36 /* Includes fpscr. */ 37 uint64_t mc_fregs[33]; 38 #if defined(TARGET_PPC64) 39 /* Pointer to the vector regs */ 40 target_ulong v_regs; 41 #else 42 target_ulong mc_pad[2]; 43 #endif 44 /* We need to handle Altivec and SPE at the same time, which no 45 kernel needs to do. Fortunately, the kernel defines this bit to 46 be Altivec-register-large all the time, rather than trying to 47 twiddle it based on the specific platform. */ 48 union { 49 /* SPE vector registers. One extra for SPEFSCR. */ 50 uint32_t spe[33]; 51 /* Altivec vector registers. The packing of VSCR and VRSAVE 52 varies depending on whether we're PPC64 or not: PPC64 splits 53 them apart; PPC32 stuffs them together. 54 We also need to account for the VSX registers on PPC64 55 */ 56 #if defined(TARGET_PPC64) 57 #define QEMU_NVRREG (34 + 16) 58 /* On ppc64, this mcontext structure is naturally *unaligned*, 59 * or rather it is aligned on a 8 bytes boundary but not on 60 * a 16 bytes one. This pad fixes it up. This is also why the 61 * vector regs are referenced by the v_regs pointer above so 62 * any amount of padding can be added here 63 */ 64 target_ulong pad; 65 #else 66 /* On ppc32, we are already aligned to 16 bytes */ 67 #define QEMU_NVRREG 33 68 #endif 69 /* We cannot use ppc_avr_t here as we do *not* want the implied 70 * 16-bytes alignment that would result from it. This would have 71 * the effect of making the whole struct target_mcontext aligned 72 * which breaks the layout of struct target_ucontext on ppc64. 73 */ 74 uint64_t altivec[QEMU_NVRREG][2]; 75 #undef QEMU_NVRREG 76 } mc_vregs; 77 }; 78 79 /* See arch/powerpc/include/asm/sigcontext.h. */ 80 struct target_sigcontext { 81 target_ulong _unused[4]; 82 int32_t signal; 83 #if defined(TARGET_PPC64) 84 int32_t pad0; 85 #endif 86 target_ulong handler; 87 target_ulong oldmask; 88 target_ulong regs; /* struct pt_regs __user * */ 89 #if defined(TARGET_PPC64) 90 struct target_mcontext mcontext; 91 #endif 92 }; 93 94 /* Indices for target_mcontext.mc_gregs, below. 95 See arch/powerpc/include/asm/ptrace.h for details. */ 96 enum { 97 TARGET_PT_R0 = 0, 98 TARGET_PT_R1 = 1, 99 TARGET_PT_R2 = 2, 100 TARGET_PT_R3 = 3, 101 TARGET_PT_R4 = 4, 102 TARGET_PT_R5 = 5, 103 TARGET_PT_R6 = 6, 104 TARGET_PT_R7 = 7, 105 TARGET_PT_R8 = 8, 106 TARGET_PT_R9 = 9, 107 TARGET_PT_R10 = 10, 108 TARGET_PT_R11 = 11, 109 TARGET_PT_R12 = 12, 110 TARGET_PT_R13 = 13, 111 TARGET_PT_R14 = 14, 112 TARGET_PT_R15 = 15, 113 TARGET_PT_R16 = 16, 114 TARGET_PT_R17 = 17, 115 TARGET_PT_R18 = 18, 116 TARGET_PT_R19 = 19, 117 TARGET_PT_R20 = 20, 118 TARGET_PT_R21 = 21, 119 TARGET_PT_R22 = 22, 120 TARGET_PT_R23 = 23, 121 TARGET_PT_R24 = 24, 122 TARGET_PT_R25 = 25, 123 TARGET_PT_R26 = 26, 124 TARGET_PT_R27 = 27, 125 TARGET_PT_R28 = 28, 126 TARGET_PT_R29 = 29, 127 TARGET_PT_R30 = 30, 128 TARGET_PT_R31 = 31, 129 TARGET_PT_NIP = 32, 130 TARGET_PT_MSR = 33, 131 TARGET_PT_ORIG_R3 = 34, 132 TARGET_PT_CTR = 35, 133 TARGET_PT_LNK = 36, 134 TARGET_PT_XER = 37, 135 TARGET_PT_CCR = 38, 136 /* Yes, there are two registers with #39. One is 64-bit only. */ 137 TARGET_PT_MQ = 39, 138 TARGET_PT_SOFTE = 39, 139 TARGET_PT_TRAP = 40, 140 TARGET_PT_DAR = 41, 141 TARGET_PT_DSISR = 42, 142 TARGET_PT_RESULT = 43, 143 TARGET_PT_REGS_COUNT = 44 144 }; 145 146 147 struct target_ucontext { 148 target_ulong tuc_flags; 149 target_ulong tuc_link; /* ucontext_t __user * */ 150 struct target_sigaltstack tuc_stack; 151 #if !defined(TARGET_PPC64) 152 int32_t tuc_pad[7]; 153 target_ulong tuc_regs; /* struct mcontext __user * 154 points to uc_mcontext field */ 155 #endif 156 target_sigset_t tuc_sigmask; 157 #if defined(TARGET_PPC64) 158 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 159 struct target_sigcontext tuc_sigcontext; 160 #else 161 int32_t tuc_maskext[30]; 162 int32_t tuc_pad2[3]; 163 struct target_mcontext tuc_mcontext; 164 #endif 165 }; 166 167 /* See arch/powerpc/kernel/signal_32.c. */ 168 struct target_sigframe { 169 struct target_sigcontext sctx; 170 struct target_mcontext mctx; 171 int32_t abigap[56]; 172 }; 173 174 #if defined(TARGET_PPC64) 175 176 #define TARGET_TRAMP_SIZE 6 177 178 struct target_rt_sigframe { 179 /* sys_rt_sigreturn requires the ucontext be the first field */ 180 struct target_ucontext uc; 181 target_ulong _unused[2]; 182 uint32_t trampoline[TARGET_TRAMP_SIZE]; 183 target_ulong pinfo; /* struct siginfo __user * */ 184 target_ulong puc; /* void __user * */ 185 struct target_siginfo info; 186 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 187 char abigap[288]; 188 } __attribute__((aligned(16))); 189 190 #else 191 192 struct target_rt_sigframe { 193 struct target_siginfo info; 194 struct target_ucontext uc; 195 int32_t abigap[56]; 196 }; 197 198 #endif 199 200 #if defined(TARGET_PPC64) 201 202 struct target_func_ptr { 203 target_ulong entry; 204 target_ulong toc; 205 }; 206 207 #endif 208 209 /* We use the mc_pad field for the signal return trampoline. */ 210 #define tramp mc_pad 211 212 /* See arch/powerpc/kernel/signal.c. */ 213 static target_ulong get_sigframe(struct target_sigaction *ka, 214 CPUPPCState *env, 215 int frame_size) 216 { 217 target_ulong oldsp; 218 219 oldsp = target_sigsp(get_sp_from_cpustate(env), ka); 220 221 return (oldsp - frame_size) & ~0xFUL; 222 } 223 224 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \ 225 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN))) 226 #define PPC_VEC_HI 0 227 #define PPC_VEC_LO 1 228 #else 229 #define PPC_VEC_HI 1 230 #define PPC_VEC_LO 0 231 #endif 232 233 234 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 235 { 236 target_ulong msr = env->msr; 237 int i; 238 target_ulong ccr = 0; 239 240 /* In general, the kernel attempts to be intelligent about what it 241 needs to save for Altivec/FP/SPE registers. We don't care that 242 much, so we just go ahead and save everything. */ 243 244 /* Save general registers. */ 245 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 246 __put_user(env->gpr[i], &frame->mc_gregs[i]); 247 } 248 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 249 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 250 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 251 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 252 253 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 254 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 255 } 256 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 257 258 /* Save Altivec registers if necessary. */ 259 if (env->insns_flags & PPC_ALTIVEC) { 260 uint32_t *vrsave; 261 for (i = 0; i < 32; i++) { 262 ppc_avr_t *avr = cpu_avr_ptr(env, i); 263 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i]; 264 265 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 266 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 267 } 268 /* Set MSR_VR in the saved MSR value to indicate that 269 frame->mc_vregs contains valid data. */ 270 msr |= MSR_VR; 271 #if defined(TARGET_PPC64) 272 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33]; 273 /* 64-bit needs to put a pointer to the vectors in the frame */ 274 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs); 275 #else 276 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32]; 277 #endif 278 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave); 279 } 280 281 /* Save VSX second halves */ 282 if (env->insns_flags2 & PPC2_VSX) { 283 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 284 for (i = 0; i < 32; i++) { 285 uint64_t *vsrl = cpu_vsrl_ptr(env, i); 286 __put_user(*vsrl, &vsregs[i]); 287 } 288 } 289 290 /* Save floating point registers. */ 291 if (env->insns_flags & PPC_FLOAT) { 292 for (i = 0; i < 32; i++) { 293 uint64_t *fpr = cpu_fpr_ptr(env, i); 294 __put_user(*fpr, &frame->mc_fregs[i]); 295 } 296 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 297 } 298 299 /* Save SPE registers. The kernel only saves the high half. */ 300 if (env->insns_flags & PPC_SPE) { 301 #if defined(TARGET_PPC64) 302 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 303 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 304 } 305 #else 306 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 307 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 308 } 309 #endif 310 /* Set MSR_SPE in the saved MSR value to indicate that 311 frame->mc_vregs contains valid data. */ 312 msr |= MSR_SPE; 313 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 314 } 315 316 /* Store MSR. */ 317 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 318 } 319 320 static void encode_trampoline(int sigret, uint32_t *tramp) 321 { 322 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 323 if (sigret) { 324 __put_user(0x38000000 | sigret, &tramp[0]); 325 __put_user(0x44000002, &tramp[1]); 326 } 327 } 328 329 static void restore_user_regs(CPUPPCState *env, 330 struct target_mcontext *frame, int sig) 331 { 332 target_ulong save_r2 = 0; 333 target_ulong msr; 334 target_ulong ccr; 335 336 int i; 337 338 if (!sig) { 339 save_r2 = env->gpr[2]; 340 } 341 342 /* Restore general registers. */ 343 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 344 __get_user(env->gpr[i], &frame->mc_gregs[i]); 345 } 346 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 347 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 348 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 349 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 350 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 351 352 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 353 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 354 } 355 356 if (!sig) { 357 env->gpr[2] = save_r2; 358 } 359 /* Restore MSR. */ 360 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 361 362 /* If doing signal return, restore the previous little-endian mode. */ 363 if (sig) 364 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 365 366 /* Restore Altivec registers if necessary. */ 367 if (env->insns_flags & PPC_ALTIVEC) { 368 ppc_avr_t *v_regs; 369 uint32_t *vrsave; 370 #if defined(TARGET_PPC64) 371 uint64_t v_addr; 372 /* 64-bit needs to recover the pointer to the vectors from the frame */ 373 __get_user(v_addr, &frame->v_regs); 374 v_regs = g2h(v_addr); 375 #else 376 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec; 377 #endif 378 for (i = 0; i < 32; i++) { 379 ppc_avr_t *avr = cpu_avr_ptr(env, i); 380 ppc_avr_t *vreg = &v_regs[i]; 381 382 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 383 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 384 } 385 /* Set MSR_VEC in the saved MSR value to indicate that 386 frame->mc_vregs contains valid data. */ 387 #if defined(TARGET_PPC64) 388 vrsave = (uint32_t *)&v_regs[33]; 389 #else 390 vrsave = (uint32_t *)&v_regs[32]; 391 #endif 392 __get_user(env->spr[SPR_VRSAVE], vrsave); 393 } 394 395 /* Restore VSX second halves */ 396 if (env->insns_flags2 & PPC2_VSX) { 397 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 398 for (i = 0; i < 32; i++) { 399 uint64_t *vsrl = cpu_vsrl_ptr(env, i); 400 __get_user(*vsrl, &vsregs[i]); 401 } 402 } 403 404 /* Restore floating point registers. */ 405 if (env->insns_flags & PPC_FLOAT) { 406 uint64_t fpscr; 407 for (i = 0; i < 32; i++) { 408 uint64_t *fpr = cpu_fpr_ptr(env, i); 409 __get_user(*fpr, &frame->mc_fregs[i]); 410 } 411 __get_user(fpscr, &frame->mc_fregs[32]); 412 env->fpscr = (uint32_t) fpscr; 413 } 414 415 /* Save SPE registers. The kernel only saves the high half. */ 416 if (env->insns_flags & PPC_SPE) { 417 #if defined(TARGET_PPC64) 418 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 419 uint32_t hi; 420 421 __get_user(hi, &frame->mc_vregs.spe[i]); 422 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 423 } 424 #else 425 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 426 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 427 } 428 #endif 429 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 430 } 431 } 432 433 #if !defined(TARGET_PPC64) 434 void setup_frame(int sig, struct target_sigaction *ka, 435 target_sigset_t *set, CPUPPCState *env) 436 { 437 struct target_sigframe *frame; 438 struct target_sigcontext *sc; 439 target_ulong frame_addr, newsp; 440 int err = 0; 441 442 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 443 trace_user_setup_frame(env, frame_addr); 444 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 445 goto sigsegv; 446 sc = &frame->sctx; 447 448 __put_user(ka->_sa_handler, &sc->handler); 449 __put_user(set->sig[0], &sc->oldmask); 450 __put_user(set->sig[1], &sc->_unused[3]); 451 __put_user(h2g(&frame->mctx), &sc->regs); 452 __put_user(sig, &sc->signal); 453 454 /* Save user regs. */ 455 save_user_regs(env, &frame->mctx); 456 457 /* Construct the trampoline code on the stack. */ 458 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 459 460 /* The kernel checks for the presence of a VDSO here. We don't 461 emulate a vdso, so use a sigreturn system call. */ 462 env->lr = (target_ulong) h2g(frame->mctx.tramp); 463 464 /* Turn off all fp exceptions. */ 465 env->fpscr = 0; 466 467 /* Create a stack frame for the caller of the handler. */ 468 newsp = frame_addr - SIGNAL_FRAMESIZE; 469 err |= put_user(env->gpr[1], newsp, target_ulong); 470 471 if (err) 472 goto sigsegv; 473 474 /* Set up registers for signal handler. */ 475 env->gpr[1] = newsp; 476 env->gpr[3] = sig; 477 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 478 479 env->nip = (target_ulong) ka->_sa_handler; 480 481 /* Signal handlers are entered in big-endian mode. */ 482 env->msr &= ~(1ull << MSR_LE); 483 484 unlock_user_struct(frame, frame_addr, 1); 485 return; 486 487 sigsegv: 488 unlock_user_struct(frame, frame_addr, 1); 489 force_sigsegv(sig); 490 } 491 #endif /* !defined(TARGET_PPC64) */ 492 493 void setup_rt_frame(int sig, struct target_sigaction *ka, 494 target_siginfo_t *info, 495 target_sigset_t *set, CPUPPCState *env) 496 { 497 struct target_rt_sigframe *rt_sf; 498 uint32_t *trampptr = 0; 499 struct target_mcontext *mctx = 0; 500 target_ulong rt_sf_addr, newsp = 0; 501 int i, err = 0; 502 #if defined(TARGET_PPC64) 503 struct target_sigcontext *sc = 0; 504 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 505 #endif 506 507 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 508 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 509 goto sigsegv; 510 511 tswap_siginfo(&rt_sf->info, info); 512 513 __put_user(0, &rt_sf->uc.tuc_flags); 514 __put_user(0, &rt_sf->uc.tuc_link); 515 target_save_altstack(&rt_sf->uc.tuc_stack, env); 516 #if !defined(TARGET_PPC64) 517 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 518 &rt_sf->uc.tuc_regs); 519 #endif 520 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 521 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 522 } 523 524 #if defined(TARGET_PPC64) 525 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 526 trampptr = &rt_sf->trampoline[0]; 527 528 sc = &rt_sf->uc.tuc_sigcontext; 529 __put_user(h2g(mctx), &sc->regs); 530 __put_user(sig, &sc->signal); 531 #else 532 mctx = &rt_sf->uc.tuc_mcontext; 533 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 534 #endif 535 536 save_user_regs(env, mctx); 537 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 538 539 /* The kernel checks for the presence of a VDSO here. We don't 540 emulate a vdso, so use a sigreturn system call. */ 541 env->lr = (target_ulong) h2g(trampptr); 542 543 /* Turn off all fp exceptions. */ 544 env->fpscr = 0; 545 546 /* Create a stack frame for the caller of the handler. */ 547 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 548 err |= put_user(env->gpr[1], newsp, target_ulong); 549 550 if (err) 551 goto sigsegv; 552 553 /* Set up registers for signal handler. */ 554 env->gpr[1] = newsp; 555 env->gpr[3] = (target_ulong) sig; 556 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 557 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 558 env->gpr[6] = (target_ulong) h2g(rt_sf); 559 560 #if defined(TARGET_PPC64) 561 if (get_ppc64_abi(image) < 2) { 562 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 563 struct target_func_ptr *handler = 564 (struct target_func_ptr *)g2h(ka->_sa_handler); 565 env->nip = tswapl(handler->entry); 566 env->gpr[2] = tswapl(handler->toc); 567 } else { 568 /* ELFv2 PPC64 function pointers are entry points, but R12 569 * must also be set */ 570 env->nip = tswapl((target_ulong) ka->_sa_handler); 571 env->gpr[12] = env->nip; 572 } 573 #else 574 env->nip = (target_ulong) ka->_sa_handler; 575 #endif 576 577 /* Signal handlers are entered in big-endian mode. */ 578 env->msr &= ~(1ull << MSR_LE); 579 580 unlock_user_struct(rt_sf, rt_sf_addr, 1); 581 return; 582 583 sigsegv: 584 unlock_user_struct(rt_sf, rt_sf_addr, 1); 585 force_sigsegv(sig); 586 587 } 588 589 #if !defined(TARGET_PPC64) 590 long do_sigreturn(CPUPPCState *env) 591 { 592 struct target_sigcontext *sc = NULL; 593 struct target_mcontext *sr = NULL; 594 target_ulong sr_addr = 0, sc_addr; 595 sigset_t blocked; 596 target_sigset_t set; 597 598 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 599 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 600 goto sigsegv; 601 602 #if defined(TARGET_PPC64) 603 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 604 #else 605 __get_user(set.sig[0], &sc->oldmask); 606 __get_user(set.sig[1], &sc->_unused[3]); 607 #endif 608 target_to_host_sigset_internal(&blocked, &set); 609 set_sigmask(&blocked); 610 611 __get_user(sr_addr, &sc->regs); 612 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 613 goto sigsegv; 614 restore_user_regs(env, sr, 1); 615 616 unlock_user_struct(sr, sr_addr, 1); 617 unlock_user_struct(sc, sc_addr, 1); 618 return -TARGET_QEMU_ESIGRETURN; 619 620 sigsegv: 621 unlock_user_struct(sr, sr_addr, 1); 622 unlock_user_struct(sc, sc_addr, 1); 623 force_sig(TARGET_SIGSEGV); 624 return -TARGET_QEMU_ESIGRETURN; 625 } 626 #endif /* !defined(TARGET_PPC64) */ 627 628 /* See arch/powerpc/kernel/signal_32.c. */ 629 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 630 { 631 struct target_mcontext *mcp; 632 target_ulong mcp_addr; 633 sigset_t blocked; 634 target_sigset_t set; 635 636 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 637 sizeof (set))) 638 return 1; 639 640 #if defined(TARGET_PPC64) 641 mcp_addr = h2g(ucp) + 642 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 643 #else 644 __get_user(mcp_addr, &ucp->tuc_regs); 645 #endif 646 647 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 648 return 1; 649 650 target_to_host_sigset_internal(&blocked, &set); 651 set_sigmask(&blocked); 652 restore_user_regs(env, mcp, sig); 653 654 unlock_user_struct(mcp, mcp_addr, 1); 655 return 0; 656 } 657 658 long do_rt_sigreturn(CPUPPCState *env) 659 { 660 struct target_rt_sigframe *rt_sf = NULL; 661 target_ulong rt_sf_addr; 662 663 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 664 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 665 goto sigsegv; 666 667 if (do_setcontext(&rt_sf->uc, env, 1)) 668 goto sigsegv; 669 670 do_sigaltstack(rt_sf_addr 671 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 672 0, env->gpr[1]); 673 674 unlock_user_struct(rt_sf, rt_sf_addr, 1); 675 return -TARGET_QEMU_ESIGRETURN; 676 677 sigsegv: 678 unlock_user_struct(rt_sf, rt_sf_addr, 1); 679 force_sig(TARGET_SIGSEGV); 680 return -TARGET_QEMU_ESIGRETURN; 681 } 682 683 /* This syscall implements {get,set,swap}context for userland. */ 684 abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, 685 abi_ulong unew_ctx, abi_long ctx_size) 686 { 687 struct target_ucontext *uctx; 688 struct target_mcontext *mctx; 689 690 /* For ppc32, ctx_size is "reserved for future use". 691 * For ppc64, we do not yet support the VSX extension. 692 */ 693 if (ctx_size < sizeof(struct target_ucontext)) { 694 return -TARGET_EINVAL; 695 } 696 697 if (uold_ctx) { 698 TaskState *ts = (TaskState *)thread_cpu->opaque; 699 700 if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) { 701 return -TARGET_EFAULT; 702 } 703 704 #ifdef TARGET_PPC64 705 mctx = &uctx->tuc_sigcontext.mcontext; 706 #else 707 /* ??? The kernel aligns the pointer down here into padding, but 708 * in setup_rt_frame we don't. Be self-compatible for now. 709 */ 710 mctx = &uctx->tuc_mcontext; 711 __put_user(h2g(mctx), &uctx->tuc_regs); 712 #endif 713 714 save_user_regs(env, mctx); 715 host_to_target_sigset(&uctx->tuc_sigmask, &ts->signal_mask); 716 717 unlock_user_struct(uctx, uold_ctx, 1); 718 } 719 720 if (unew_ctx) { 721 int err; 722 723 if (!lock_user_struct(VERIFY_READ, uctx, unew_ctx, 1)) { 724 return -TARGET_EFAULT; 725 } 726 err = do_setcontext(uctx, env, 0); 727 unlock_user_struct(uctx, unew_ctx, 1); 728 729 if (err) { 730 /* We cannot return to a partially updated context. */ 731 force_sig(TARGET_SIGSEGV); 732 } 733 return -TARGET_QEMU_ESIGRETURN; 734 } 735 736 return 0; 737 } 738