1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu.h" 21 #include "user-internals.h" 22 #include "signal-common.h" 23 #include "linux-user/trace.h" 24 #include "vdso-asmoffset.h" 25 26 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 27 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 28 struct target_mcontext { 29 target_ulong mc_gregs[48]; 30 /* Includes fpscr. */ 31 uint64_t mc_fregs[33]; 32 33 #if defined(TARGET_PPC64) 34 /* Pointer to the vector regs */ 35 target_ulong v_regs; 36 /* 37 * On ppc64, this mcontext structure is naturally *unaligned*, 38 * or rather it is aligned on a 8 bytes boundary but not on 39 * a 16 byte boundary. This pad fixes it up. This is why we 40 * cannot use ppc_avr_t, which would force alignment. This is 41 * also why the vector regs are referenced in the ABI by the 42 * v_regs pointer above so any amount of padding can be added here. 43 */ 44 target_ulong pad; 45 /* VSCR and VRSAVE are saved separately. Also reserve space for VSX. */ 46 struct { 47 uint64_t altivec[34 + 16][2]; 48 } mc_vregs; 49 #else 50 target_ulong mc_pad[2]; 51 52 /* We need to handle Altivec and SPE at the same time, which no 53 kernel needs to do. Fortunately, the kernel defines this bit to 54 be Altivec-register-large all the time, rather than trying to 55 twiddle it based on the specific platform. */ 56 union { 57 /* SPE vector registers. One extra for SPEFSCR. */ 58 uint32_t spe[33]; 59 /* 60 * Altivec vector registers. One extra for VRSAVE. 61 * On ppc32, we are already aligned to 16 bytes. We could 62 * use ppc_avr_t, but choose to share the same type as ppc64. 63 */ 64 uint64_t altivec[33][2]; 65 } mc_vregs; 66 #endif 67 }; 68 69 QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_fregs) 70 != offsetof_mcontext_fregs); 71 #if defined(TARGET_PPC64) 72 QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, v_regs) 73 != offsetof_mcontext_vregs_ptr); 74 #else 75 QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_vregs) 76 != offsetof_mcontext_vregs); 77 #endif 78 79 /* See arch/powerpc/include/asm/sigcontext.h. */ 80 struct target_sigcontext { 81 target_ulong _unused[4]; 82 int32_t signal; 83 #if defined(TARGET_PPC64) 84 int32_t pad0; 85 #endif 86 target_ulong handler; 87 target_ulong oldmask; 88 target_ulong regs; /* struct pt_regs __user * */ 89 #if defined(TARGET_PPC64) 90 struct target_mcontext mcontext; 91 #endif 92 }; 93 94 /* Indices for target_mcontext.mc_gregs, below. 95 See arch/powerpc/include/asm/ptrace.h for details. */ 96 enum { 97 TARGET_PT_R0 = 0, 98 TARGET_PT_R1 = 1, 99 TARGET_PT_R2 = 2, 100 TARGET_PT_R3 = 3, 101 TARGET_PT_R4 = 4, 102 TARGET_PT_R5 = 5, 103 TARGET_PT_R6 = 6, 104 TARGET_PT_R7 = 7, 105 TARGET_PT_R8 = 8, 106 TARGET_PT_R9 = 9, 107 TARGET_PT_R10 = 10, 108 TARGET_PT_R11 = 11, 109 TARGET_PT_R12 = 12, 110 TARGET_PT_R13 = 13, 111 TARGET_PT_R14 = 14, 112 TARGET_PT_R15 = 15, 113 TARGET_PT_R16 = 16, 114 TARGET_PT_R17 = 17, 115 TARGET_PT_R18 = 18, 116 TARGET_PT_R19 = 19, 117 TARGET_PT_R20 = 20, 118 TARGET_PT_R21 = 21, 119 TARGET_PT_R22 = 22, 120 TARGET_PT_R23 = 23, 121 TARGET_PT_R24 = 24, 122 TARGET_PT_R25 = 25, 123 TARGET_PT_R26 = 26, 124 TARGET_PT_R27 = 27, 125 TARGET_PT_R28 = 28, 126 TARGET_PT_R29 = 29, 127 TARGET_PT_R30 = 30, 128 TARGET_PT_R31 = 31, 129 TARGET_PT_NIP = 32, 130 TARGET_PT_MSR = 33, 131 TARGET_PT_ORIG_R3 = 34, 132 TARGET_PT_CTR = 35, 133 TARGET_PT_LNK = 36, 134 TARGET_PT_XER = 37, 135 TARGET_PT_CCR = 38, 136 /* Yes, there are two registers with #39. One is 64-bit only. */ 137 TARGET_PT_MQ = 39, 138 TARGET_PT_SOFTE = 39, 139 TARGET_PT_TRAP = 40, 140 TARGET_PT_DAR = 41, 141 TARGET_PT_DSISR = 42, 142 TARGET_PT_RESULT = 43, 143 TARGET_PT_REGS_COUNT = 44 144 }; 145 146 147 struct target_ucontext { 148 target_ulong tuc_flags; 149 target_ulong tuc_link; /* ucontext_t __user * */ 150 struct target_sigaltstack tuc_stack; 151 #if !defined(TARGET_PPC64) 152 int32_t tuc_pad[7]; 153 target_ulong tuc_regs; /* struct mcontext __user * 154 points to uc_mcontext field */ 155 #endif 156 target_sigset_t tuc_sigmask; 157 #if defined(TARGET_PPC64) 158 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 159 struct target_sigcontext tuc_sigcontext; 160 #else 161 int32_t tuc_maskext[30]; 162 int32_t tuc_pad2[3]; 163 struct target_mcontext tuc_mcontext; 164 #endif 165 }; 166 167 #if !defined(TARGET_PPC64) 168 /* See arch/powerpc/kernel/signal_32.c. */ 169 struct target_sigframe { 170 struct target_sigcontext sctx; 171 struct target_mcontext mctx; 172 int32_t abigap[56]; 173 }; 174 175 QEMU_BUILD_BUG_ON(offsetof(struct target_sigframe, mctx) 176 != offsetof_sigframe_mcontext); 177 #endif 178 179 #if defined(TARGET_PPC64) 180 181 #define TARGET_TRAMP_SIZE 6 182 183 struct target_rt_sigframe { 184 /* sys_rt_sigreturn requires the ucontext be the first field */ 185 struct target_ucontext uc; 186 target_ulong _unused[2]; 187 uint32_t trampoline[TARGET_TRAMP_SIZE]; 188 target_ulong pinfo; /* struct siginfo __user * */ 189 target_ulong puc; /* void __user * */ 190 struct target_siginfo info; 191 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 192 char abigap[288]; 193 } __attribute__((aligned(16))); 194 195 QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, 196 uc.tuc_sigcontext.mcontext) 197 != offsetof_rt_sigframe_mcontext); 198 199 #else 200 201 struct target_rt_sigframe { 202 struct target_siginfo info; 203 struct target_ucontext uc; 204 int32_t abigap[56]; 205 }; 206 207 QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.tuc_mcontext) 208 != offsetof_rt_sigframe_mcontext); 209 210 #endif 211 212 #if defined(TARGET_PPC64) 213 214 struct target_func_ptr { 215 target_ulong entry; 216 target_ulong toc; 217 }; 218 219 #endif 220 221 /* See arch/powerpc/kernel/signal.c. */ 222 static target_ulong get_sigframe(struct target_sigaction *ka, 223 CPUPPCState *env, 224 int frame_size) 225 { 226 target_ulong oldsp; 227 228 oldsp = target_sigsp(get_sp_from_cpustate(env), ka); 229 230 return (oldsp - frame_size) & ~0xFUL; 231 } 232 233 #if TARGET_BIG_ENDIAN == HOST_BIG_ENDIAN 234 #define PPC_VEC_HI 0 235 #define PPC_VEC_LO 1 236 #else 237 #define PPC_VEC_HI 1 238 #define PPC_VEC_LO 0 239 #endif 240 241 242 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 243 { 244 target_ulong msr = env->msr; 245 int i; 246 uint32_t ccr = 0; 247 248 /* In general, the kernel attempts to be intelligent about what it 249 needs to save for Altivec/FP/SPE registers. We don't care that 250 much, so we just go ahead and save everything. */ 251 252 /* Save general registers. */ 253 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 254 __put_user(env->gpr[i], &frame->mc_gregs[i]); 255 } 256 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 257 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 258 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 259 __put_user(cpu_read_xer(env), &frame->mc_gregs[TARGET_PT_XER]); 260 261 ccr = ppc_get_cr(env); 262 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 263 264 /* Save Altivec registers if necessary. */ 265 if (env->insns_flags & PPC_ALTIVEC) { 266 uint32_t *vrsave; 267 for (i = 0; i < 32; i++) { 268 ppc_avr_t *avr = cpu_avr_ptr(env, i); 269 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i]; 270 271 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 272 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 273 } 274 #if defined(TARGET_PPC64) 275 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33]; 276 /* 64-bit needs to put a pointer to the vectors in the frame */ 277 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs); 278 #else 279 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32]; 280 #endif 281 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave); 282 } 283 284 #if defined(TARGET_PPC64) 285 /* Save VSX second halves */ 286 if (env->insns_flags2 & PPC2_VSX) { 287 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 288 for (i = 0; i < 32; i++) { 289 uint64_t *vsrl = cpu_vsrl_ptr(env, i); 290 __put_user(*vsrl, &vsregs[i]); 291 } 292 } 293 #endif 294 295 /* Save floating point registers. */ 296 if (env->insns_flags & PPC_FLOAT) { 297 for (i = 0; i < 32; i++) { 298 uint64_t *fpr = cpu_fpr_ptr(env, i); 299 __put_user(*fpr, &frame->mc_fregs[i]); 300 } 301 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 302 } 303 304 #if !defined(TARGET_PPC64) 305 /* Save SPE registers. The kernel only saves the high half. */ 306 if (env->insns_flags & PPC_SPE) { 307 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 308 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 309 } 310 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 311 } 312 #endif 313 314 /* Store MSR. */ 315 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 316 } 317 318 static void encode_trampoline(int sigret, uint32_t *tramp) 319 { 320 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 321 __put_user(0x38000000 | sigret, &tramp[0]); 322 __put_user(0x44000002, &tramp[1]); 323 } 324 325 static void restore_user_regs(CPUPPCState *env, 326 struct target_mcontext *frame, int sig) 327 { 328 target_ulong save_r2 = 0; 329 target_ulong msr; 330 target_ulong xer; 331 target_ulong ccr; 332 333 int i; 334 335 if (!sig) { 336 save_r2 = env->gpr[2]; 337 } 338 339 /* Restore general registers. */ 340 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 341 __get_user(env->gpr[i], &frame->mc_gregs[i]); 342 } 343 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 344 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 345 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 346 347 __get_user(xer, &frame->mc_gregs[TARGET_PT_XER]); 348 cpu_write_xer(env, xer); 349 350 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 351 ppc_set_cr(env, ccr); 352 if (!sig) { 353 env->gpr[2] = save_r2; 354 } 355 /* Restore MSR. */ 356 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 357 358 /* If doing signal return, restore the previous little-endian mode. */ 359 if (sig) { 360 ppc_store_msr(env, ((env->msr & ~(1ull << MSR_LE)) | 361 (msr & (1ull << MSR_LE)))); 362 } 363 364 /* Restore Altivec registers if necessary. */ 365 if (env->insns_flags & PPC_ALTIVEC) { 366 ppc_avr_t *v_regs; 367 uint32_t *vrsave; 368 #if defined(TARGET_PPC64) 369 uint64_t v_addr; 370 /* 64-bit needs to recover the pointer to the vectors from the frame */ 371 __get_user(v_addr, &frame->v_regs); 372 v_regs = g2h(env_cpu(env), v_addr); 373 #else 374 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec; 375 #endif 376 for (i = 0; i < 32; i++) { 377 ppc_avr_t *avr = cpu_avr_ptr(env, i); 378 ppc_avr_t *vreg = &v_regs[i]; 379 380 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 381 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 382 } 383 #if defined(TARGET_PPC64) 384 vrsave = (uint32_t *)&v_regs[33]; 385 #else 386 vrsave = (uint32_t *)&v_regs[32]; 387 #endif 388 __get_user(env->spr[SPR_VRSAVE], vrsave); 389 } 390 391 #if defined(TARGET_PPC64) 392 /* Restore VSX second halves */ 393 if (env->insns_flags2 & PPC2_VSX) { 394 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 395 for (i = 0; i < 32; i++) { 396 uint64_t *vsrl = cpu_vsrl_ptr(env, i); 397 __get_user(*vsrl, &vsregs[i]); 398 } 399 } 400 #endif 401 402 /* Restore floating point registers. */ 403 if (env->insns_flags & PPC_FLOAT) { 404 uint64_t fpscr; 405 for (i = 0; i < 32; i++) { 406 uint64_t *fpr = cpu_fpr_ptr(env, i); 407 __get_user(*fpr, &frame->mc_fregs[i]); 408 } 409 __get_user(fpscr, &frame->mc_fregs[32]); 410 env->fpscr = (uint32_t) fpscr; 411 } 412 413 #if !defined(TARGET_PPC64) 414 /* Save SPE registers. The kernel only saves the high half. */ 415 if (env->insns_flags & PPC_SPE) { 416 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 417 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 418 } 419 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 420 } 421 #endif 422 } 423 424 #if !defined(TARGET_PPC64) 425 void setup_frame(int sig, struct target_sigaction *ka, 426 target_sigset_t *set, CPUPPCState *env) 427 { 428 struct target_sigframe *frame; 429 struct target_sigcontext *sc; 430 target_ulong frame_addr, newsp; 431 int err = 0; 432 433 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 434 trace_user_setup_frame(env, frame_addr); 435 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 436 goto sigsegv; 437 sc = &frame->sctx; 438 439 __put_user(ka->_sa_handler, &sc->handler); 440 __put_user(set->sig[0], &sc->oldmask); 441 __put_user(set->sig[1], &sc->_unused[3]); 442 __put_user(h2g(&frame->mctx), &sc->regs); 443 __put_user(sig, &sc->signal); 444 445 /* Save user regs. */ 446 save_user_regs(env, &frame->mctx); 447 448 env->lr = default_sigreturn; 449 450 /* Turn off all fp exceptions. */ 451 env->fpscr = 0; 452 453 /* Create a stack frame for the caller of the handler. */ 454 newsp = frame_addr - SIGNAL_FRAMESIZE; 455 err |= put_user(env->gpr[1], newsp, target_ulong); 456 457 if (err) 458 goto sigsegv; 459 460 /* Set up registers for signal handler. */ 461 env->gpr[1] = newsp; 462 env->gpr[3] = sig; 463 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 464 465 env->nip = (target_ulong) ka->_sa_handler; 466 467 /* Signal handlers are entered in big-endian mode. */ 468 ppc_store_msr(env, env->msr & ~(1ull << MSR_LE)); 469 470 unlock_user_struct(frame, frame_addr, 1); 471 return; 472 473 sigsegv: 474 unlock_user_struct(frame, frame_addr, 1); 475 force_sigsegv(sig); 476 } 477 #endif /* !defined(TARGET_PPC64) */ 478 479 void setup_rt_frame(int sig, struct target_sigaction *ka, 480 target_siginfo_t *info, 481 target_sigset_t *set, CPUPPCState *env) 482 { 483 struct target_rt_sigframe *rt_sf; 484 struct target_mcontext *mctx = 0; 485 target_ulong rt_sf_addr, newsp = 0; 486 int i, err = 0; 487 #if defined(TARGET_PPC64) 488 struct target_sigcontext *sc = 0; 489 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 490 #endif 491 492 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 493 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 494 goto sigsegv; 495 496 tswap_siginfo(&rt_sf->info, info); 497 498 __put_user(0, &rt_sf->uc.tuc_flags); 499 __put_user(0, &rt_sf->uc.tuc_link); 500 target_save_altstack(&rt_sf->uc.tuc_stack, env); 501 #if !defined(TARGET_PPC64) 502 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 503 &rt_sf->uc.tuc_regs); 504 #endif 505 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 506 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 507 } 508 509 #if defined(TARGET_PPC64) 510 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 511 512 sc = &rt_sf->uc.tuc_sigcontext; 513 __put_user(h2g(mctx), &sc->regs); 514 __put_user(sig, &sc->signal); 515 #else 516 mctx = &rt_sf->uc.tuc_mcontext; 517 #endif 518 519 save_user_regs(env, mctx); 520 521 env->lr = default_rt_sigreturn; 522 523 /* Turn off all fp exceptions. */ 524 env->fpscr = 0; 525 526 /* Create a stack frame for the caller of the handler. */ 527 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 528 err |= put_user(env->gpr[1], newsp, target_ulong); 529 530 if (err) 531 goto sigsegv; 532 533 /* Set up registers for signal handler. */ 534 env->gpr[1] = newsp; 535 env->gpr[3] = (target_ulong) sig; 536 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 537 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 538 env->gpr[6] = (target_ulong) h2g(rt_sf); 539 540 #if defined(TARGET_PPC64) 541 if (get_ppc64_abi(image) < 2) { 542 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 543 struct target_func_ptr *handler = 544 (struct target_func_ptr *)g2h(env_cpu(env), ka->_sa_handler); 545 env->nip = tswapl(handler->entry); 546 env->gpr[2] = tswapl(handler->toc); 547 } else { 548 /* ELFv2 PPC64 function pointers are entry points. R12 must also be set. */ 549 env->gpr[12] = env->nip = ka->_sa_handler; 550 } 551 #else 552 env->nip = (target_ulong) ka->_sa_handler; 553 #endif 554 555 #if TARGET_BIG_ENDIAN 556 /* Signal handlers are entered in big-endian mode. */ 557 ppc_store_msr(env, env->msr & ~(1ull << MSR_LE)); 558 #else 559 /* Signal handlers are entered in little-endian mode. */ 560 ppc_store_msr(env, env->msr | (1ull << MSR_LE)); 561 #endif 562 563 unlock_user_struct(rt_sf, rt_sf_addr, 1); 564 return; 565 566 sigsegv: 567 unlock_user_struct(rt_sf, rt_sf_addr, 1); 568 force_sigsegv(sig); 569 570 } 571 572 #if !defined(TARGET_PPC64) 573 long do_sigreturn(CPUPPCState *env) 574 { 575 struct target_sigcontext *sc = NULL; 576 struct target_mcontext *sr = NULL; 577 target_ulong sr_addr = 0, sc_addr; 578 sigset_t blocked; 579 target_sigset_t set; 580 581 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 582 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 583 goto sigsegv; 584 585 __get_user(set.sig[0], &sc->oldmask); 586 __get_user(set.sig[1], &sc->_unused[3]); 587 588 target_to_host_sigset_internal(&blocked, &set); 589 set_sigmask(&blocked); 590 591 __get_user(sr_addr, &sc->regs); 592 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 593 goto sigsegv; 594 restore_user_regs(env, sr, 1); 595 596 unlock_user_struct(sr, sr_addr, 1); 597 unlock_user_struct(sc, sc_addr, 1); 598 return -QEMU_ESIGRETURN; 599 600 sigsegv: 601 unlock_user_struct(sr, sr_addr, 1); 602 unlock_user_struct(sc, sc_addr, 1); 603 force_sig(TARGET_SIGSEGV); 604 return -QEMU_ESIGRETURN; 605 } 606 #endif /* !defined(TARGET_PPC64) */ 607 608 /* See arch/powerpc/kernel/signal_32.c. */ 609 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 610 { 611 struct target_mcontext *mcp; 612 target_ulong mcp_addr; 613 sigset_t blocked; 614 target_sigset_t set; 615 616 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 617 sizeof (set))) 618 return 1; 619 620 #if defined(TARGET_PPC64) 621 mcp_addr = h2g(ucp) + 622 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 623 #else 624 __get_user(mcp_addr, &ucp->tuc_regs); 625 #endif 626 627 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 628 return 1; 629 630 target_to_host_sigset_internal(&blocked, &set); 631 set_sigmask(&blocked); 632 restore_user_regs(env, mcp, sig); 633 634 unlock_user_struct(mcp, mcp_addr, 1); 635 return 0; 636 } 637 638 long do_rt_sigreturn(CPUPPCState *env) 639 { 640 struct target_rt_sigframe *rt_sf = NULL; 641 target_ulong rt_sf_addr; 642 643 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 644 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 645 goto sigsegv; 646 647 if (do_setcontext(&rt_sf->uc, env, 1)) 648 goto sigsegv; 649 650 target_restore_altstack(&rt_sf->uc.tuc_stack, env); 651 652 unlock_user_struct(rt_sf, rt_sf_addr, 1); 653 return -QEMU_ESIGRETURN; 654 655 sigsegv: 656 unlock_user_struct(rt_sf, rt_sf_addr, 1); 657 force_sig(TARGET_SIGSEGV); 658 return -QEMU_ESIGRETURN; 659 } 660 661 /* This syscall implements {get,set,swap}context for userland. */ 662 abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, 663 abi_ulong unew_ctx, abi_long ctx_size) 664 { 665 struct target_ucontext *uctx; 666 struct target_mcontext *mctx; 667 668 /* For ppc32, ctx_size is "reserved for future use". 669 * For ppc64, we do not yet support the VSX extension. 670 */ 671 if (ctx_size < sizeof(struct target_ucontext)) { 672 return -TARGET_EINVAL; 673 } 674 675 if (uold_ctx) { 676 TaskState *ts = (TaskState *)thread_cpu->opaque; 677 678 if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) { 679 return -TARGET_EFAULT; 680 } 681 682 #ifdef TARGET_PPC64 683 mctx = &uctx->tuc_sigcontext.mcontext; 684 #else 685 /* ??? The kernel aligns the pointer down here into padding, but 686 * in setup_rt_frame we don't. Be self-compatible for now. 687 */ 688 mctx = &uctx->tuc_mcontext; 689 __put_user(h2g(mctx), &uctx->tuc_regs); 690 #endif 691 692 save_user_regs(env, mctx); 693 host_to_target_sigset(&uctx->tuc_sigmask, &ts->signal_mask); 694 695 unlock_user_struct(uctx, uold_ctx, 1); 696 } 697 698 if (unew_ctx) { 699 int err; 700 701 if (!lock_user_struct(VERIFY_READ, uctx, unew_ctx, 1)) { 702 return -TARGET_EFAULT; 703 } 704 err = do_setcontext(uctx, env, 0); 705 unlock_user_struct(uctx, unew_ctx, 1); 706 707 if (err) { 708 /* We cannot return to a partially updated context. */ 709 force_sig(TARGET_SIGSEGV); 710 } 711 return -QEMU_ESIGRETURN; 712 } 713 714 return 0; 715 } 716 717 void setup_sigtramp(abi_ulong sigtramp_page) 718 { 719 uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); 720 assert(tramp != NULL); 721 722 #ifdef TARGET_ARCH_HAS_SETUP_FRAME 723 default_sigreturn = sigtramp_page; 724 encode_trampoline(TARGET_NR_sigreturn, tramp + 0); 725 #endif 726 727 default_rt_sigreturn = sigtramp_page + 8; 728 encode_trampoline(TARGET_NR_rt_sigreturn, tramp + 2); 729 730 unlock_user(tramp, sigtramp_page, 2 * 8); 731 } 732