1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu.h" 21 #include "user-internals.h" 22 #include "signal-common.h" 23 #include "linux-user/trace.h" 24 25 /* Size of dummy stack frame allocated when calling signal handler. 26 See arch/powerpc/include/asm/ptrace.h. */ 27 #if defined(TARGET_PPC64) 28 #define SIGNAL_FRAMESIZE 128 29 #else 30 #define SIGNAL_FRAMESIZE 64 31 #endif 32 33 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 34 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 35 struct target_mcontext { 36 target_ulong mc_gregs[48]; 37 /* Includes fpscr. */ 38 uint64_t mc_fregs[33]; 39 40 #if defined(TARGET_PPC64) 41 /* Pointer to the vector regs */ 42 target_ulong v_regs; 43 /* 44 * On ppc64, this mcontext structure is naturally *unaligned*, 45 * or rather it is aligned on a 8 bytes boundary but not on 46 * a 16 byte boundary. This pad fixes it up. This is why we 47 * cannot use ppc_avr_t, which would force alignment. This is 48 * also why the vector regs are referenced in the ABI by the 49 * v_regs pointer above so any amount of padding can be added here. 50 */ 51 target_ulong pad; 52 /* VSCR and VRSAVE are saved separately. Also reserve space for VSX. */ 53 struct { 54 uint64_t altivec[34 + 16][2]; 55 } mc_vregs; 56 #else 57 target_ulong mc_pad[2]; 58 59 /* We need to handle Altivec and SPE at the same time, which no 60 kernel needs to do. Fortunately, the kernel defines this bit to 61 be Altivec-register-large all the time, rather than trying to 62 twiddle it based on the specific platform. */ 63 union { 64 /* SPE vector registers. One extra for SPEFSCR. */ 65 uint32_t spe[33]; 66 /* 67 * Altivec vector registers. One extra for VRSAVE. 68 * On ppc32, we are already aligned to 16 bytes. We could 69 * use ppc_avr_t, but choose to share the same type as ppc64. 70 */ 71 uint64_t altivec[33][2]; 72 } mc_vregs; 73 #endif 74 }; 75 76 /* See arch/powerpc/include/asm/sigcontext.h. */ 77 struct target_sigcontext { 78 target_ulong _unused[4]; 79 int32_t signal; 80 #if defined(TARGET_PPC64) 81 int32_t pad0; 82 #endif 83 target_ulong handler; 84 target_ulong oldmask; 85 target_ulong regs; /* struct pt_regs __user * */ 86 #if defined(TARGET_PPC64) 87 struct target_mcontext mcontext; 88 #endif 89 }; 90 91 /* Indices for target_mcontext.mc_gregs, below. 92 See arch/powerpc/include/asm/ptrace.h for details. */ 93 enum { 94 TARGET_PT_R0 = 0, 95 TARGET_PT_R1 = 1, 96 TARGET_PT_R2 = 2, 97 TARGET_PT_R3 = 3, 98 TARGET_PT_R4 = 4, 99 TARGET_PT_R5 = 5, 100 TARGET_PT_R6 = 6, 101 TARGET_PT_R7 = 7, 102 TARGET_PT_R8 = 8, 103 TARGET_PT_R9 = 9, 104 TARGET_PT_R10 = 10, 105 TARGET_PT_R11 = 11, 106 TARGET_PT_R12 = 12, 107 TARGET_PT_R13 = 13, 108 TARGET_PT_R14 = 14, 109 TARGET_PT_R15 = 15, 110 TARGET_PT_R16 = 16, 111 TARGET_PT_R17 = 17, 112 TARGET_PT_R18 = 18, 113 TARGET_PT_R19 = 19, 114 TARGET_PT_R20 = 20, 115 TARGET_PT_R21 = 21, 116 TARGET_PT_R22 = 22, 117 TARGET_PT_R23 = 23, 118 TARGET_PT_R24 = 24, 119 TARGET_PT_R25 = 25, 120 TARGET_PT_R26 = 26, 121 TARGET_PT_R27 = 27, 122 TARGET_PT_R28 = 28, 123 TARGET_PT_R29 = 29, 124 TARGET_PT_R30 = 30, 125 TARGET_PT_R31 = 31, 126 TARGET_PT_NIP = 32, 127 TARGET_PT_MSR = 33, 128 TARGET_PT_ORIG_R3 = 34, 129 TARGET_PT_CTR = 35, 130 TARGET_PT_LNK = 36, 131 TARGET_PT_XER = 37, 132 TARGET_PT_CCR = 38, 133 /* Yes, there are two registers with #39. One is 64-bit only. */ 134 TARGET_PT_MQ = 39, 135 TARGET_PT_SOFTE = 39, 136 TARGET_PT_TRAP = 40, 137 TARGET_PT_DAR = 41, 138 TARGET_PT_DSISR = 42, 139 TARGET_PT_RESULT = 43, 140 TARGET_PT_REGS_COUNT = 44 141 }; 142 143 144 struct target_ucontext { 145 target_ulong tuc_flags; 146 target_ulong tuc_link; /* ucontext_t __user * */ 147 struct target_sigaltstack tuc_stack; 148 #if !defined(TARGET_PPC64) 149 int32_t tuc_pad[7]; 150 target_ulong tuc_regs; /* struct mcontext __user * 151 points to uc_mcontext field */ 152 #endif 153 target_sigset_t tuc_sigmask; 154 #if defined(TARGET_PPC64) 155 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 156 struct target_sigcontext tuc_sigcontext; 157 #else 158 int32_t tuc_maskext[30]; 159 int32_t tuc_pad2[3]; 160 struct target_mcontext tuc_mcontext; 161 #endif 162 }; 163 164 /* See arch/powerpc/kernel/signal_32.c. */ 165 struct target_sigframe { 166 struct target_sigcontext sctx; 167 struct target_mcontext mctx; 168 int32_t abigap[56]; 169 }; 170 171 #if defined(TARGET_PPC64) 172 173 #define TARGET_TRAMP_SIZE 6 174 175 struct target_rt_sigframe { 176 /* sys_rt_sigreturn requires the ucontext be the first field */ 177 struct target_ucontext uc; 178 target_ulong _unused[2]; 179 uint32_t trampoline[TARGET_TRAMP_SIZE]; 180 target_ulong pinfo; /* struct siginfo __user * */ 181 target_ulong puc; /* void __user * */ 182 struct target_siginfo info; 183 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 184 char abigap[288]; 185 } __attribute__((aligned(16))); 186 187 #else 188 189 struct target_rt_sigframe { 190 struct target_siginfo info; 191 struct target_ucontext uc; 192 int32_t abigap[56]; 193 }; 194 195 #endif 196 197 #if defined(TARGET_PPC64) 198 199 struct target_func_ptr { 200 target_ulong entry; 201 target_ulong toc; 202 }; 203 204 #endif 205 206 /* We use the mc_pad field for the signal return trampoline. */ 207 #define tramp mc_pad 208 209 /* See arch/powerpc/kernel/signal.c. */ 210 static target_ulong get_sigframe(struct target_sigaction *ka, 211 CPUPPCState *env, 212 int frame_size) 213 { 214 target_ulong oldsp; 215 216 oldsp = target_sigsp(get_sp_from_cpustate(env), ka); 217 218 return (oldsp - frame_size) & ~0xFUL; 219 } 220 221 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \ 222 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN))) 223 #define PPC_VEC_HI 0 224 #define PPC_VEC_LO 1 225 #else 226 #define PPC_VEC_HI 1 227 #define PPC_VEC_LO 0 228 #endif 229 230 231 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 232 { 233 target_ulong msr = env->msr; 234 int i; 235 target_ulong ccr = 0; 236 237 /* In general, the kernel attempts to be intelligent about what it 238 needs to save for Altivec/FP/SPE registers. We don't care that 239 much, so we just go ahead and save everything. */ 240 241 /* Save general registers. */ 242 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 243 __put_user(env->gpr[i], &frame->mc_gregs[i]); 244 } 245 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 246 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 247 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 248 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 249 250 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 251 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 252 } 253 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 254 255 /* Save Altivec registers if necessary. */ 256 if (env->insns_flags & PPC_ALTIVEC) { 257 uint32_t *vrsave; 258 for (i = 0; i < 32; i++) { 259 ppc_avr_t *avr = cpu_avr_ptr(env, i); 260 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i]; 261 262 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 263 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 264 } 265 #if defined(TARGET_PPC64) 266 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33]; 267 /* 64-bit needs to put a pointer to the vectors in the frame */ 268 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs); 269 #else 270 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32]; 271 #endif 272 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave); 273 } 274 275 #if defined(TARGET_PPC64) 276 /* Save VSX second halves */ 277 if (env->insns_flags2 & PPC2_VSX) { 278 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 279 for (i = 0; i < 32; i++) { 280 uint64_t *vsrl = cpu_vsrl_ptr(env, i); 281 __put_user(*vsrl, &vsregs[i]); 282 } 283 } 284 #endif 285 286 /* Save floating point registers. */ 287 if (env->insns_flags & PPC_FLOAT) { 288 for (i = 0; i < 32; i++) { 289 uint64_t *fpr = cpu_fpr_ptr(env, i); 290 __put_user(*fpr, &frame->mc_fregs[i]); 291 } 292 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 293 } 294 295 #if !defined(TARGET_PPC64) 296 /* Save SPE registers. The kernel only saves the high half. */ 297 if (env->insns_flags & PPC_SPE) { 298 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 299 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 300 } 301 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 302 } 303 #endif 304 305 /* Store MSR. */ 306 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 307 } 308 309 static void encode_trampoline(int sigret, uint32_t *tramp) 310 { 311 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 312 if (sigret) { 313 __put_user(0x38000000 | sigret, &tramp[0]); 314 __put_user(0x44000002, &tramp[1]); 315 } 316 } 317 318 static void restore_user_regs(CPUPPCState *env, 319 struct target_mcontext *frame, int sig) 320 { 321 target_ulong save_r2 = 0; 322 target_ulong msr; 323 target_ulong ccr; 324 325 int i; 326 327 if (!sig) { 328 save_r2 = env->gpr[2]; 329 } 330 331 /* Restore general registers. */ 332 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 333 __get_user(env->gpr[i], &frame->mc_gregs[i]); 334 } 335 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 336 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 337 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 338 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 339 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 340 341 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 342 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 343 } 344 345 if (!sig) { 346 env->gpr[2] = save_r2; 347 } 348 /* Restore MSR. */ 349 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 350 351 /* If doing signal return, restore the previous little-endian mode. */ 352 if (sig) { 353 ppc_store_msr(env, ((env->msr & ~(1ull << MSR_LE)) | 354 (msr & (1ull << MSR_LE)))); 355 } 356 357 /* Restore Altivec registers if necessary. */ 358 if (env->insns_flags & PPC_ALTIVEC) { 359 ppc_avr_t *v_regs; 360 uint32_t *vrsave; 361 #if defined(TARGET_PPC64) 362 uint64_t v_addr; 363 /* 64-bit needs to recover the pointer to the vectors from the frame */ 364 __get_user(v_addr, &frame->v_regs); 365 v_regs = g2h(env_cpu(env), v_addr); 366 #else 367 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec; 368 #endif 369 for (i = 0; i < 32; i++) { 370 ppc_avr_t *avr = cpu_avr_ptr(env, i); 371 ppc_avr_t *vreg = &v_regs[i]; 372 373 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 374 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 375 } 376 #if defined(TARGET_PPC64) 377 vrsave = (uint32_t *)&v_regs[33]; 378 #else 379 vrsave = (uint32_t *)&v_regs[32]; 380 #endif 381 __get_user(env->spr[SPR_VRSAVE], vrsave); 382 } 383 384 #if defined(TARGET_PPC64) 385 /* Restore VSX second halves */ 386 if (env->insns_flags2 & PPC2_VSX) { 387 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 388 for (i = 0; i < 32; i++) { 389 uint64_t *vsrl = cpu_vsrl_ptr(env, i); 390 __get_user(*vsrl, &vsregs[i]); 391 } 392 } 393 #endif 394 395 /* Restore floating point registers. */ 396 if (env->insns_flags & PPC_FLOAT) { 397 uint64_t fpscr; 398 for (i = 0; i < 32; i++) { 399 uint64_t *fpr = cpu_fpr_ptr(env, i); 400 __get_user(*fpr, &frame->mc_fregs[i]); 401 } 402 __get_user(fpscr, &frame->mc_fregs[32]); 403 env->fpscr = (uint32_t) fpscr; 404 } 405 406 #if !defined(TARGET_PPC64) 407 /* Save SPE registers. The kernel only saves the high half. */ 408 if (env->insns_flags & PPC_SPE) { 409 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 410 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 411 } 412 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 413 } 414 #endif 415 } 416 417 #if !defined(TARGET_PPC64) 418 void setup_frame(int sig, struct target_sigaction *ka, 419 target_sigset_t *set, CPUPPCState *env) 420 { 421 struct target_sigframe *frame; 422 struct target_sigcontext *sc; 423 target_ulong frame_addr, newsp; 424 int err = 0; 425 426 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 427 trace_user_setup_frame(env, frame_addr); 428 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 429 goto sigsegv; 430 sc = &frame->sctx; 431 432 __put_user(ka->_sa_handler, &sc->handler); 433 __put_user(set->sig[0], &sc->oldmask); 434 __put_user(set->sig[1], &sc->_unused[3]); 435 __put_user(h2g(&frame->mctx), &sc->regs); 436 __put_user(sig, &sc->signal); 437 438 /* Save user regs. */ 439 save_user_regs(env, &frame->mctx); 440 441 /* Construct the trampoline code on the stack. */ 442 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 443 444 /* The kernel checks for the presence of a VDSO here. We don't 445 emulate a vdso, so use a sigreturn system call. */ 446 env->lr = (target_ulong) h2g(frame->mctx.tramp); 447 448 /* Turn off all fp exceptions. */ 449 env->fpscr = 0; 450 451 /* Create a stack frame for the caller of the handler. */ 452 newsp = frame_addr - SIGNAL_FRAMESIZE; 453 err |= put_user(env->gpr[1], newsp, target_ulong); 454 455 if (err) 456 goto sigsegv; 457 458 /* Set up registers for signal handler. */ 459 env->gpr[1] = newsp; 460 env->gpr[3] = sig; 461 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 462 463 env->nip = (target_ulong) ka->_sa_handler; 464 465 /* Signal handlers are entered in big-endian mode. */ 466 ppc_store_msr(env, env->msr & ~(1ull << MSR_LE)); 467 468 unlock_user_struct(frame, frame_addr, 1); 469 return; 470 471 sigsegv: 472 unlock_user_struct(frame, frame_addr, 1); 473 force_sigsegv(sig); 474 } 475 #endif /* !defined(TARGET_PPC64) */ 476 477 void setup_rt_frame(int sig, struct target_sigaction *ka, 478 target_siginfo_t *info, 479 target_sigset_t *set, CPUPPCState *env) 480 { 481 struct target_rt_sigframe *rt_sf; 482 uint32_t *trampptr = 0; 483 struct target_mcontext *mctx = 0; 484 target_ulong rt_sf_addr, newsp = 0; 485 int i, err = 0; 486 #if defined(TARGET_PPC64) 487 struct target_sigcontext *sc = 0; 488 #if !defined(TARGET_ABI32) 489 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 490 #endif 491 #endif 492 493 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 494 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 495 goto sigsegv; 496 497 tswap_siginfo(&rt_sf->info, info); 498 499 __put_user(0, &rt_sf->uc.tuc_flags); 500 __put_user(0, &rt_sf->uc.tuc_link); 501 target_save_altstack(&rt_sf->uc.tuc_stack, env); 502 #if !defined(TARGET_PPC64) 503 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 504 &rt_sf->uc.tuc_regs); 505 #endif 506 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 507 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 508 } 509 510 #if defined(TARGET_PPC64) 511 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 512 trampptr = &rt_sf->trampoline[0]; 513 514 sc = &rt_sf->uc.tuc_sigcontext; 515 __put_user(h2g(mctx), &sc->regs); 516 __put_user(sig, &sc->signal); 517 #else 518 mctx = &rt_sf->uc.tuc_mcontext; 519 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 520 #endif 521 522 save_user_regs(env, mctx); 523 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 524 525 /* The kernel checks for the presence of a VDSO here. We don't 526 emulate a vdso, so use a sigreturn system call. */ 527 env->lr = (target_ulong) h2g(trampptr); 528 529 /* Turn off all fp exceptions. */ 530 env->fpscr = 0; 531 532 /* Create a stack frame for the caller of the handler. */ 533 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 534 err |= put_user(env->gpr[1], newsp, target_ulong); 535 536 if (err) 537 goto sigsegv; 538 539 /* Set up registers for signal handler. */ 540 env->gpr[1] = newsp; 541 env->gpr[3] = (target_ulong) sig; 542 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 543 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 544 env->gpr[6] = (target_ulong) h2g(rt_sf); 545 546 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 547 if (get_ppc64_abi(image) < 2) { 548 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 549 struct target_func_ptr *handler = 550 (struct target_func_ptr *)g2h(env_cpu(env), ka->_sa_handler); 551 env->nip = tswapl(handler->entry); 552 env->gpr[2] = tswapl(handler->toc); 553 } else { 554 /* ELFv2 PPC64 function pointers are entry points. R12 must also be set. */ 555 env->gpr[12] = env->nip = ka->_sa_handler; 556 } 557 #else 558 env->nip = (target_ulong) ka->_sa_handler; 559 #endif 560 561 #ifdef TARGET_WORDS_BIGENDIAN 562 /* Signal handlers are entered in big-endian mode. */ 563 ppc_store_msr(env, env->msr & ~(1ull << MSR_LE)); 564 #else 565 /* Signal handlers are entered in little-endian mode. */ 566 ppc_store_msr(env, env->msr | (1ull << MSR_LE)); 567 #endif 568 569 unlock_user_struct(rt_sf, rt_sf_addr, 1); 570 return; 571 572 sigsegv: 573 unlock_user_struct(rt_sf, rt_sf_addr, 1); 574 force_sigsegv(sig); 575 576 } 577 578 #if !defined(TARGET_PPC64) || defined(TARGET_ABI32) 579 long do_sigreturn(CPUPPCState *env) 580 { 581 struct target_sigcontext *sc = NULL; 582 struct target_mcontext *sr = NULL; 583 target_ulong sr_addr = 0, sc_addr; 584 sigset_t blocked; 585 target_sigset_t set; 586 587 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 588 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 589 goto sigsegv; 590 591 #if defined(TARGET_PPC64) 592 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 593 #else 594 __get_user(set.sig[0], &sc->oldmask); 595 __get_user(set.sig[1], &sc->_unused[3]); 596 #endif 597 target_to_host_sigset_internal(&blocked, &set); 598 set_sigmask(&blocked); 599 600 __get_user(sr_addr, &sc->regs); 601 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 602 goto sigsegv; 603 restore_user_regs(env, sr, 1); 604 605 unlock_user_struct(sr, sr_addr, 1); 606 unlock_user_struct(sc, sc_addr, 1); 607 return -TARGET_QEMU_ESIGRETURN; 608 609 sigsegv: 610 unlock_user_struct(sr, sr_addr, 1); 611 unlock_user_struct(sc, sc_addr, 1); 612 force_sig(TARGET_SIGSEGV); 613 return -TARGET_QEMU_ESIGRETURN; 614 } 615 #endif /* !defined(TARGET_PPC64) */ 616 617 /* See arch/powerpc/kernel/signal_32.c. */ 618 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 619 { 620 struct target_mcontext *mcp; 621 target_ulong mcp_addr; 622 sigset_t blocked; 623 target_sigset_t set; 624 625 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 626 sizeof (set))) 627 return 1; 628 629 #if defined(TARGET_PPC64) 630 mcp_addr = h2g(ucp) + 631 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 632 #else 633 __get_user(mcp_addr, &ucp->tuc_regs); 634 #endif 635 636 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 637 return 1; 638 639 target_to_host_sigset_internal(&blocked, &set); 640 set_sigmask(&blocked); 641 restore_user_regs(env, mcp, sig); 642 643 unlock_user_struct(mcp, mcp_addr, 1); 644 return 0; 645 } 646 647 long do_rt_sigreturn(CPUPPCState *env) 648 { 649 struct target_rt_sigframe *rt_sf = NULL; 650 target_ulong rt_sf_addr; 651 652 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 653 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 654 goto sigsegv; 655 656 if (do_setcontext(&rt_sf->uc, env, 1)) 657 goto sigsegv; 658 659 target_restore_altstack(&rt_sf->uc.tuc_stack, env); 660 661 unlock_user_struct(rt_sf, rt_sf_addr, 1); 662 return -TARGET_QEMU_ESIGRETURN; 663 664 sigsegv: 665 unlock_user_struct(rt_sf, rt_sf_addr, 1); 666 force_sig(TARGET_SIGSEGV); 667 return -TARGET_QEMU_ESIGRETURN; 668 } 669 670 /* This syscall implements {get,set,swap}context for userland. */ 671 abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, 672 abi_ulong unew_ctx, abi_long ctx_size) 673 { 674 struct target_ucontext *uctx; 675 struct target_mcontext *mctx; 676 677 /* For ppc32, ctx_size is "reserved for future use". 678 * For ppc64, we do not yet support the VSX extension. 679 */ 680 if (ctx_size < sizeof(struct target_ucontext)) { 681 return -TARGET_EINVAL; 682 } 683 684 if (uold_ctx) { 685 TaskState *ts = (TaskState *)thread_cpu->opaque; 686 687 if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) { 688 return -TARGET_EFAULT; 689 } 690 691 #ifdef TARGET_PPC64 692 mctx = &uctx->tuc_sigcontext.mcontext; 693 #else 694 /* ??? The kernel aligns the pointer down here into padding, but 695 * in setup_rt_frame we don't. Be self-compatible for now. 696 */ 697 mctx = &uctx->tuc_mcontext; 698 __put_user(h2g(mctx), &uctx->tuc_regs); 699 #endif 700 701 save_user_regs(env, mctx); 702 host_to_target_sigset(&uctx->tuc_sigmask, &ts->signal_mask); 703 704 unlock_user_struct(uctx, uold_ctx, 1); 705 } 706 707 if (unew_ctx) { 708 int err; 709 710 if (!lock_user_struct(VERIFY_READ, uctx, unew_ctx, 1)) { 711 return -TARGET_EFAULT; 712 } 713 err = do_setcontext(uctx, env, 0); 714 unlock_user_struct(uctx, unew_ctx, 1); 715 716 if (err) { 717 /* We cannot return to a partially updated context. */ 718 force_sig(TARGET_SIGSEGV); 719 } 720 return -TARGET_QEMU_ESIGRETURN; 721 } 722 723 return 0; 724 } 725