1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 257 !defined(TARGET_NIOS2) 258 /* Just set the guest's signal mask to the specified value; the 259 * caller is assumed to have called block_signals() already. 260 */ 261 static void set_sigmask(const sigset_t *set) 262 { 263 TaskState *ts = (TaskState *)thread_cpu->opaque; 264 265 ts->signal_mask = *set; 266 } 267 #endif 268 269 /* siginfo conversion */ 270 271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 272 const siginfo_t *info) 273 { 274 int sig = host_to_target_signal(info->si_signo); 275 int si_code = info->si_code; 276 int si_type; 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 /* This memset serves two purposes: 282 * (1) ensure we don't leak random junk to the guest later 283 * (2) placate false positives from gcc about fields 284 * being used uninitialized if it chooses to inline both this 285 * function and tswap_siginfo() into host_to_target_siginfo(). 286 */ 287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 288 289 /* This is awkward, because we have to use a combination of 290 * the si_code and si_signo to figure out which of the union's 291 * members are valid. (Within the host kernel it is always possible 292 * to tell, but the kernel carefully avoids giving userspace the 293 * high 16 bits of si_code, so we don't have the information to 294 * do this the easy way...) We therefore make our best guess, 295 * bearing in mind that a guest can spoof most of the si_codes 296 * via rt_sigqueueinfo() if it likes. 297 * 298 * Once we have made our guess, we record it in the top 16 bits of 299 * the si_code, so that tswap_siginfo() later can use it. 300 * tswap_siginfo() will strip these top bits out before writing 301 * si_code to the guest (sign-extending the lower bits). 302 */ 303 304 switch (si_code) { 305 case SI_USER: 306 case SI_TKILL: 307 case SI_KERNEL: 308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 309 * These are the only unspoofable si_code values. 310 */ 311 tinfo->_sifields._kill._pid = info->si_pid; 312 tinfo->_sifields._kill._uid = info->si_uid; 313 si_type = QEMU_SI_KILL; 314 break; 315 default: 316 /* Everything else is spoofable. Make best guess based on signal */ 317 switch (sig) { 318 case TARGET_SIGCHLD: 319 tinfo->_sifields._sigchld._pid = info->si_pid; 320 tinfo->_sifields._sigchld._uid = info->si_uid; 321 tinfo->_sifields._sigchld._status 322 = host_to_target_waitstatus(info->si_status); 323 tinfo->_sifields._sigchld._utime = info->si_utime; 324 tinfo->_sifields._sigchld._stime = info->si_stime; 325 si_type = QEMU_SI_CHLD; 326 break; 327 case TARGET_SIGIO: 328 tinfo->_sifields._sigpoll._band = info->si_band; 329 tinfo->_sifields._sigpoll._fd = info->si_fd; 330 si_type = QEMU_SI_POLL; 331 break; 332 default: 333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 334 tinfo->_sifields._rt._pid = info->si_pid; 335 tinfo->_sifields._rt._uid = info->si_uid; 336 /* XXX: potential problem if 64 bit */ 337 tinfo->_sifields._rt._sigval.sival_ptr 338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 339 si_type = QEMU_SI_RT; 340 break; 341 } 342 break; 343 } 344 345 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 346 } 347 348 static void tswap_siginfo(target_siginfo_t *tinfo, 349 const target_siginfo_t *info) 350 { 351 int si_type = extract32(info->si_code, 16, 16); 352 int si_code = sextract32(info->si_code, 0, 16); 353 354 __put_user(info->si_signo, &tinfo->si_signo); 355 __put_user(info->si_errno, &tinfo->si_errno); 356 __put_user(si_code, &tinfo->si_code); 357 358 /* We can use our internal marker of which fields in the structure 359 * are valid, rather than duplicating the guesswork of 360 * host_to_target_siginfo_noswap() here. 361 */ 362 switch (si_type) { 363 case QEMU_SI_KILL: 364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 366 break; 367 case QEMU_SI_TIMER: 368 __put_user(info->_sifields._timer._timer1, 369 &tinfo->_sifields._timer._timer1); 370 __put_user(info->_sifields._timer._timer2, 371 &tinfo->_sifields._timer._timer2); 372 break; 373 case QEMU_SI_POLL: 374 __put_user(info->_sifields._sigpoll._band, 375 &tinfo->_sifields._sigpoll._band); 376 __put_user(info->_sifields._sigpoll._fd, 377 &tinfo->_sifields._sigpoll._fd); 378 break; 379 case QEMU_SI_FAULT: 380 __put_user(info->_sifields._sigfault._addr, 381 &tinfo->_sifields._sigfault._addr); 382 break; 383 case QEMU_SI_CHLD: 384 __put_user(info->_sifields._sigchld._pid, 385 &tinfo->_sifields._sigchld._pid); 386 __put_user(info->_sifields._sigchld._uid, 387 &tinfo->_sifields._sigchld._uid); 388 __put_user(info->_sifields._sigchld._status, 389 &tinfo->_sifields._sigchld._status); 390 __put_user(info->_sifields._sigchld._utime, 391 &tinfo->_sifields._sigchld._utime); 392 __put_user(info->_sifields._sigchld._stime, 393 &tinfo->_sifields._sigchld._stime); 394 break; 395 case QEMU_SI_RT: 396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 398 __put_user(info->_sifields._rt._sigval.sival_ptr, 399 &tinfo->_sifields._rt._sigval.sival_ptr); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 } 405 406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 407 { 408 target_siginfo_t tgt_tmp; 409 host_to_target_siginfo_noswap(&tgt_tmp, info); 410 tswap_siginfo(tinfo, &tgt_tmp); 411 } 412 413 /* XXX: we support only POSIX RT signals are used. */ 414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 416 { 417 /* This conversion is used only for the rt_sigqueueinfo syscall, 418 * and so we know that the _rt fields are the valid ones. 419 */ 420 abi_ulong sival_ptr; 421 422 __get_user(info->si_signo, &tinfo->si_signo); 423 __get_user(info->si_errno, &tinfo->si_errno); 424 __get_user(info->si_code, &tinfo->si_code); 425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 428 info->si_value.sival_ptr = (void *)(long)sival_ptr; 429 } 430 431 static int fatal_signal (int sig) 432 { 433 switch (sig) { 434 case TARGET_SIGCHLD: 435 case TARGET_SIGURG: 436 case TARGET_SIGWINCH: 437 /* Ignored by default. */ 438 return 0; 439 case TARGET_SIGCONT: 440 case TARGET_SIGSTOP: 441 case TARGET_SIGTSTP: 442 case TARGET_SIGTTIN: 443 case TARGET_SIGTTOU: 444 /* Job control signals. */ 445 return 0; 446 default: 447 return 1; 448 } 449 } 450 451 /* returns 1 if given signal should dump core if not handled */ 452 static int core_dump_signal(int sig) 453 { 454 switch (sig) { 455 case TARGET_SIGABRT: 456 case TARGET_SIGFPE: 457 case TARGET_SIGILL: 458 case TARGET_SIGQUIT: 459 case TARGET_SIGSEGV: 460 case TARGET_SIGTRAP: 461 case TARGET_SIGBUS: 462 return (1); 463 default: 464 return (0); 465 } 466 } 467 468 void signal_init(void) 469 { 470 TaskState *ts = (TaskState *)thread_cpu->opaque; 471 struct sigaction act; 472 struct sigaction oact; 473 int i, j; 474 int host_sig; 475 476 /* generate signal conversion tables */ 477 for(i = 1; i < _NSIG; i++) { 478 if (host_to_target_signal_table[i] == 0) 479 host_to_target_signal_table[i] = i; 480 } 481 for(i = 1; i < _NSIG; i++) { 482 j = host_to_target_signal_table[i]; 483 target_to_host_signal_table[j] = i; 484 } 485 486 /* Set the signal mask from the host mask. */ 487 sigprocmask(0, 0, &ts->signal_mask); 488 489 /* set all host signal handlers. ALL signals are blocked during 490 the handlers to serialize them. */ 491 memset(sigact_table, 0, sizeof(sigact_table)); 492 493 sigfillset(&act.sa_mask); 494 act.sa_flags = SA_SIGINFO; 495 act.sa_sigaction = host_signal_handler; 496 for(i = 1; i <= TARGET_NSIG; i++) { 497 host_sig = target_to_host_signal(i); 498 sigaction(host_sig, NULL, &oact); 499 if (oact.sa_sigaction == (void *)SIG_IGN) { 500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 501 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 503 } 504 /* If there's already a handler installed then something has 505 gone horribly wrong, so don't even try to handle that case. */ 506 /* Install some handlers for our own use. We need at least 507 SIGSEGV and SIGBUS, to detect exceptions. We can not just 508 trap all signals because it affects syscall interrupt 509 behavior. But do trap all default-fatal signals. */ 510 if (fatal_signal (i)) 511 sigaction(host_sig, &act, NULL); 512 } 513 } 514 515 #ifndef TARGET_UNICORE32 516 /* Force a synchronously taken signal. The kernel force_sig() function 517 * also forces the signal to "not blocked, not ignored", but for QEMU 518 * that work is done in process_pending_signals(). 519 */ 520 static void force_sig(int sig) 521 { 522 CPUState *cpu = thread_cpu; 523 CPUArchState *env = cpu->env_ptr; 524 target_siginfo_t info; 525 526 info.si_signo = sig; 527 info.si_errno = 0; 528 info.si_code = TARGET_SI_KERNEL; 529 info._sifields._kill._pid = 0; 530 info._sifields._kill._uid = 0; 531 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 532 } 533 534 /* Force a SIGSEGV if we couldn't write to memory trying to set 535 * up the signal frame. oldsig is the signal we were trying to handle 536 * at the point of failure. 537 */ 538 static void force_sigsegv(int oldsig) 539 { 540 if (oldsig == SIGSEGV) { 541 /* Make sure we don't try to deliver the signal again; this will 542 * end up with handle_pending_signal() calling dump_core_and_abort(). 543 */ 544 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL; 545 } 546 force_sig(TARGET_SIGSEGV); 547 } 548 #endif 549 550 /* abort execution with signal */ 551 static void QEMU_NORETURN dump_core_and_abort(int target_sig) 552 { 553 CPUState *cpu = thread_cpu; 554 CPUArchState *env = cpu->env_ptr; 555 TaskState *ts = (TaskState *)cpu->opaque; 556 int host_sig, core_dumped = 0; 557 struct sigaction act; 558 559 host_sig = target_to_host_signal(target_sig); 560 trace_user_force_sig(env, target_sig, host_sig); 561 gdb_signalled(env, target_sig); 562 563 /* dump core if supported by target binary format */ 564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 565 stop_all_tasks(); 566 core_dumped = 567 ((*ts->bprm->core_dump)(target_sig, env) == 0); 568 } 569 if (core_dumped) { 570 /* we already dumped the core of target process, we don't want 571 * a coredump of qemu itself */ 572 struct rlimit nodump; 573 getrlimit(RLIMIT_CORE, &nodump); 574 nodump.rlim_cur=0; 575 setrlimit(RLIMIT_CORE, &nodump); 576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 577 target_sig, strsignal(host_sig), "core dumped" ); 578 } 579 580 /* The proper exit code for dying from an uncaught signal is 581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 582 * a negative value. To get the proper exit code we need to 583 * actually die from an uncaught signal. Here the default signal 584 * handler is installed, we send ourself a signal and we wait for 585 * it to arrive. */ 586 sigfillset(&act.sa_mask); 587 act.sa_handler = SIG_DFL; 588 act.sa_flags = 0; 589 sigaction(host_sig, &act, NULL); 590 591 /* For some reason raise(host_sig) doesn't send the signal when 592 * statically linked on x86-64. */ 593 kill(getpid(), host_sig); 594 595 /* Make sure the signal isn't masked (just reuse the mask inside 596 of act) */ 597 sigdelset(&act.sa_mask, host_sig); 598 sigsuspend(&act.sa_mask); 599 600 /* unreachable */ 601 abort(); 602 } 603 604 /* queue a signal so that it will be send to the virtual CPU as soon 605 as possible */ 606 int queue_signal(CPUArchState *env, int sig, int si_type, 607 target_siginfo_t *info) 608 { 609 CPUState *cpu = ENV_GET_CPU(env); 610 TaskState *ts = cpu->opaque; 611 612 trace_user_queue_signal(env, sig); 613 614 info->si_code = deposit32(info->si_code, 16, 16, si_type); 615 616 ts->sync_signal.info = *info; 617 ts->sync_signal.pending = sig; 618 /* signal that a new signal is pending */ 619 atomic_set(&ts->signal_pending, 1); 620 return 1; /* indicates that the signal was queued */ 621 } 622 623 #ifndef HAVE_SAFE_SYSCALL 624 static inline void rewind_if_in_safe_syscall(void *puc) 625 { 626 /* Default version: never rewind */ 627 } 628 #endif 629 630 static void host_signal_handler(int host_signum, siginfo_t *info, 631 void *puc) 632 { 633 CPUArchState *env = thread_cpu->env_ptr; 634 CPUState *cpu = ENV_GET_CPU(env); 635 TaskState *ts = cpu->opaque; 636 637 int sig; 638 target_siginfo_t tinfo; 639 ucontext_t *uc = puc; 640 struct emulated_sigtable *k; 641 642 /* the CPU emulator uses some host signals to detect exceptions, 643 we forward to it some signals */ 644 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 645 && info->si_code > 0) { 646 if (cpu_signal_handler(host_signum, info, puc)) 647 return; 648 } 649 650 /* get target signal number */ 651 sig = host_to_target_signal(host_signum); 652 if (sig < 1 || sig > TARGET_NSIG) 653 return; 654 trace_user_host_signal(env, host_signum, sig); 655 656 rewind_if_in_safe_syscall(puc); 657 658 host_to_target_siginfo_noswap(&tinfo, info); 659 k = &ts->sigtab[sig - 1]; 660 k->info = tinfo; 661 k->pending = sig; 662 ts->signal_pending = 1; 663 664 /* Block host signals until target signal handler entered. We 665 * can't block SIGSEGV or SIGBUS while we're executing guest 666 * code in case the guest code provokes one in the window between 667 * now and it getting out to the main loop. Signals will be 668 * unblocked again in process_pending_signals(). 669 * 670 * WARNING: we cannot use sigfillset() here because the uc_sigmask 671 * field is a kernel sigset_t, which is much smaller than the 672 * libc sigset_t which sigfillset() operates on. Using sigfillset() 673 * would write 0xff bytes off the end of the structure and trash 674 * data on the struct. 675 * We can't use sizeof(uc->uc_sigmask) either, because the libc 676 * headers define the struct field with the wrong (too large) type. 677 */ 678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 679 sigdelset(&uc->uc_sigmask, SIGSEGV); 680 sigdelset(&uc->uc_sigmask, SIGBUS); 681 682 /* interrupt the virtual CPU as soon as possible */ 683 cpu_exit(thread_cpu); 684 } 685 686 /* do_sigaltstack() returns target values and errnos. */ 687 /* compare linux/kernel/signal.c:do_sigaltstack() */ 688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 689 { 690 int ret; 691 struct target_sigaltstack oss; 692 693 /* XXX: test errors */ 694 if(uoss_addr) 695 { 696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 698 __put_user(sas_ss_flags(sp), &oss.ss_flags); 699 } 700 701 if(uss_addr) 702 { 703 struct target_sigaltstack *uss; 704 struct target_sigaltstack ss; 705 size_t minstacksize = TARGET_MINSIGSTKSZ; 706 707 #if defined(TARGET_PPC64) 708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 710 if (get_ppc64_abi(image) > 1) { 711 minstacksize = 4096; 712 } 713 #endif 714 715 ret = -TARGET_EFAULT; 716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 717 goto out; 718 } 719 __get_user(ss.ss_sp, &uss->ss_sp); 720 __get_user(ss.ss_size, &uss->ss_size); 721 __get_user(ss.ss_flags, &uss->ss_flags); 722 unlock_user_struct(uss, uss_addr, 0); 723 724 ret = -TARGET_EPERM; 725 if (on_sig_stack(sp)) 726 goto out; 727 728 ret = -TARGET_EINVAL; 729 if (ss.ss_flags != TARGET_SS_DISABLE 730 && ss.ss_flags != TARGET_SS_ONSTACK 731 && ss.ss_flags != 0) 732 goto out; 733 734 if (ss.ss_flags == TARGET_SS_DISABLE) { 735 ss.ss_size = 0; 736 ss.ss_sp = 0; 737 } else { 738 ret = -TARGET_ENOMEM; 739 if (ss.ss_size < minstacksize) { 740 goto out; 741 } 742 } 743 744 target_sigaltstack_used.ss_sp = ss.ss_sp; 745 target_sigaltstack_used.ss_size = ss.ss_size; 746 } 747 748 if (uoss_addr) { 749 ret = -TARGET_EFAULT; 750 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 751 goto out; 752 } 753 754 ret = 0; 755 out: 756 return ret; 757 } 758 759 /* do_sigaction() return target values and host errnos */ 760 int do_sigaction(int sig, const struct target_sigaction *act, 761 struct target_sigaction *oact) 762 { 763 struct target_sigaction *k; 764 struct sigaction act1; 765 int host_sig; 766 int ret = 0; 767 768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 769 return -TARGET_EINVAL; 770 } 771 772 if (block_signals()) { 773 return -TARGET_ERESTARTSYS; 774 } 775 776 k = &sigact_table[sig - 1]; 777 if (oact) { 778 __put_user(k->_sa_handler, &oact->_sa_handler); 779 __put_user(k->sa_flags, &oact->sa_flags); 780 #ifdef TARGET_ARCH_HAS_SA_RESTORER 781 __put_user(k->sa_restorer, &oact->sa_restorer); 782 #endif 783 /* Not swapped. */ 784 oact->sa_mask = k->sa_mask; 785 } 786 if (act) { 787 /* FIXME: This is not threadsafe. */ 788 __get_user(k->_sa_handler, &act->_sa_handler); 789 __get_user(k->sa_flags, &act->sa_flags); 790 #ifdef TARGET_ARCH_HAS_SA_RESTORER 791 __get_user(k->sa_restorer, &act->sa_restorer); 792 #endif 793 /* To be swapped in target_to_host_sigset. */ 794 k->sa_mask = act->sa_mask; 795 796 /* we update the host linux signal state */ 797 host_sig = target_to_host_signal(sig); 798 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 799 sigfillset(&act1.sa_mask); 800 act1.sa_flags = SA_SIGINFO; 801 if (k->sa_flags & TARGET_SA_RESTART) 802 act1.sa_flags |= SA_RESTART; 803 /* NOTE: it is important to update the host kernel signal 804 ignore state to avoid getting unexpected interrupted 805 syscalls */ 806 if (k->_sa_handler == TARGET_SIG_IGN) { 807 act1.sa_sigaction = (void *)SIG_IGN; 808 } else if (k->_sa_handler == TARGET_SIG_DFL) { 809 if (fatal_signal (sig)) 810 act1.sa_sigaction = host_signal_handler; 811 else 812 act1.sa_sigaction = (void *)SIG_DFL; 813 } else { 814 act1.sa_sigaction = host_signal_handler; 815 } 816 ret = sigaction(host_sig, &act1, NULL); 817 } 818 } 819 return ret; 820 } 821 822 #if defined(TARGET_I386) 823 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */ 824 825 struct target_fpreg { 826 uint16_t significand[4]; 827 uint16_t exponent; 828 }; 829 830 struct target_fpxreg { 831 uint16_t significand[4]; 832 uint16_t exponent; 833 uint16_t padding[3]; 834 }; 835 836 struct target_xmmreg { 837 uint32_t element[4]; 838 }; 839 840 struct target_fpstate_32 { 841 /* Regular FPU environment */ 842 uint32_t cw; 843 uint32_t sw; 844 uint32_t tag; 845 uint32_t ipoff; 846 uint32_t cssel; 847 uint32_t dataoff; 848 uint32_t datasel; 849 struct target_fpreg st[8]; 850 uint16_t status; 851 uint16_t magic; /* 0xffff = regular FPU data only */ 852 853 /* FXSR FPU environment */ 854 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */ 855 uint32_t mxcsr; 856 uint32_t reserved; 857 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */ 858 struct target_xmmreg xmm[8]; 859 uint32_t padding[56]; 860 }; 861 862 struct target_fpstate_64 { 863 /* FXSAVE format */ 864 uint16_t cw; 865 uint16_t sw; 866 uint16_t twd; 867 uint16_t fop; 868 uint64_t rip; 869 uint64_t rdp; 870 uint32_t mxcsr; 871 uint32_t mxcsr_mask; 872 uint32_t st_space[32]; 873 uint32_t xmm_space[64]; 874 uint32_t reserved[24]; 875 }; 876 877 #ifndef TARGET_X86_64 878 # define target_fpstate target_fpstate_32 879 #else 880 # define target_fpstate target_fpstate_64 881 #endif 882 883 struct target_sigcontext_32 { 884 uint16_t gs, __gsh; 885 uint16_t fs, __fsh; 886 uint16_t es, __esh; 887 uint16_t ds, __dsh; 888 uint32_t edi; 889 uint32_t esi; 890 uint32_t ebp; 891 uint32_t esp; 892 uint32_t ebx; 893 uint32_t edx; 894 uint32_t ecx; 895 uint32_t eax; 896 uint32_t trapno; 897 uint32_t err; 898 uint32_t eip; 899 uint16_t cs, __csh; 900 uint32_t eflags; 901 uint32_t esp_at_signal; 902 uint16_t ss, __ssh; 903 uint32_t fpstate; /* pointer */ 904 uint32_t oldmask; 905 uint32_t cr2; 906 }; 907 908 struct target_sigcontext_64 { 909 uint64_t r8; 910 uint64_t r9; 911 uint64_t r10; 912 uint64_t r11; 913 uint64_t r12; 914 uint64_t r13; 915 uint64_t r14; 916 uint64_t r15; 917 918 uint64_t rdi; 919 uint64_t rsi; 920 uint64_t rbp; 921 uint64_t rbx; 922 uint64_t rdx; 923 uint64_t rax; 924 uint64_t rcx; 925 uint64_t rsp; 926 uint64_t rip; 927 928 uint64_t eflags; 929 930 uint16_t cs; 931 uint16_t gs; 932 uint16_t fs; 933 uint16_t ss; 934 935 uint64_t err; 936 uint64_t trapno; 937 uint64_t oldmask; 938 uint64_t cr2; 939 940 uint64_t fpstate; /* pointer */ 941 uint64_t padding[8]; 942 }; 943 944 #ifndef TARGET_X86_64 945 # define target_sigcontext target_sigcontext_32 946 #else 947 # define target_sigcontext target_sigcontext_64 948 #endif 949 950 /* see Linux/include/uapi/asm-generic/ucontext.h */ 951 struct target_ucontext { 952 abi_ulong tuc_flags; 953 abi_ulong tuc_link; 954 target_stack_t tuc_stack; 955 struct target_sigcontext tuc_mcontext; 956 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 957 }; 958 959 #ifndef TARGET_X86_64 960 struct sigframe { 961 abi_ulong pretcode; 962 int sig; 963 struct target_sigcontext sc; 964 struct target_fpstate fpstate; 965 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 966 char retcode[8]; 967 }; 968 969 struct rt_sigframe { 970 abi_ulong pretcode; 971 int sig; 972 abi_ulong pinfo; 973 abi_ulong puc; 974 struct target_siginfo info; 975 struct target_ucontext uc; 976 struct target_fpstate fpstate; 977 char retcode[8]; 978 }; 979 980 #else 981 982 struct rt_sigframe { 983 abi_ulong pretcode; 984 struct target_ucontext uc; 985 struct target_siginfo info; 986 struct target_fpstate fpstate; 987 }; 988 989 #endif 990 991 /* 992 * Set up a signal frame. 993 */ 994 995 /* XXX: save x87 state */ 996 static void setup_sigcontext(struct target_sigcontext *sc, 997 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 998 abi_ulong fpstate_addr) 999 { 1000 CPUState *cs = CPU(x86_env_get_cpu(env)); 1001 #ifndef TARGET_X86_64 1002 uint16_t magic; 1003 1004 /* already locked in setup_frame() */ 1005 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 1006 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 1007 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 1008 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 1009 __put_user(env->regs[R_EDI], &sc->edi); 1010 __put_user(env->regs[R_ESI], &sc->esi); 1011 __put_user(env->regs[R_EBP], &sc->ebp); 1012 __put_user(env->regs[R_ESP], &sc->esp); 1013 __put_user(env->regs[R_EBX], &sc->ebx); 1014 __put_user(env->regs[R_EDX], &sc->edx); 1015 __put_user(env->regs[R_ECX], &sc->ecx); 1016 __put_user(env->regs[R_EAX], &sc->eax); 1017 __put_user(cs->exception_index, &sc->trapno); 1018 __put_user(env->error_code, &sc->err); 1019 __put_user(env->eip, &sc->eip); 1020 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 1021 __put_user(env->eflags, &sc->eflags); 1022 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 1023 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 1024 1025 cpu_x86_fsave(env, fpstate_addr, 1); 1026 fpstate->status = fpstate->sw; 1027 magic = 0xffff; 1028 __put_user(magic, &fpstate->magic); 1029 __put_user(fpstate_addr, &sc->fpstate); 1030 1031 /* non-iBCS2 extensions.. */ 1032 __put_user(mask, &sc->oldmask); 1033 __put_user(env->cr[2], &sc->cr2); 1034 #else 1035 __put_user(env->regs[R_EDI], &sc->rdi); 1036 __put_user(env->regs[R_ESI], &sc->rsi); 1037 __put_user(env->regs[R_EBP], &sc->rbp); 1038 __put_user(env->regs[R_ESP], &sc->rsp); 1039 __put_user(env->regs[R_EBX], &sc->rbx); 1040 __put_user(env->regs[R_EDX], &sc->rdx); 1041 __put_user(env->regs[R_ECX], &sc->rcx); 1042 __put_user(env->regs[R_EAX], &sc->rax); 1043 1044 __put_user(env->regs[8], &sc->r8); 1045 __put_user(env->regs[9], &sc->r9); 1046 __put_user(env->regs[10], &sc->r10); 1047 __put_user(env->regs[11], &sc->r11); 1048 __put_user(env->regs[12], &sc->r12); 1049 __put_user(env->regs[13], &sc->r13); 1050 __put_user(env->regs[14], &sc->r14); 1051 __put_user(env->regs[15], &sc->r15); 1052 1053 __put_user(cs->exception_index, &sc->trapno); 1054 __put_user(env->error_code, &sc->err); 1055 __put_user(env->eip, &sc->rip); 1056 1057 __put_user(env->eflags, &sc->eflags); 1058 __put_user(env->segs[R_CS].selector, &sc->cs); 1059 __put_user((uint16_t)0, &sc->gs); 1060 __put_user((uint16_t)0, &sc->fs); 1061 __put_user(env->segs[R_SS].selector, &sc->ss); 1062 1063 __put_user(mask, &sc->oldmask); 1064 __put_user(env->cr[2], &sc->cr2); 1065 1066 /* fpstate_addr must be 16 byte aligned for fxsave */ 1067 assert(!(fpstate_addr & 0xf)); 1068 1069 cpu_x86_fxsave(env, fpstate_addr); 1070 __put_user(fpstate_addr, &sc->fpstate); 1071 #endif 1072 } 1073 1074 /* 1075 * Determine which stack to use.. 1076 */ 1077 1078 static inline abi_ulong 1079 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 1080 { 1081 unsigned long esp; 1082 1083 /* Default to using normal stack */ 1084 esp = env->regs[R_ESP]; 1085 #ifdef TARGET_X86_64 1086 esp -= 128; /* this is the redzone */ 1087 #endif 1088 1089 /* This is the X/Open sanctioned signal stack switching. */ 1090 if (ka->sa_flags & TARGET_SA_ONSTACK) { 1091 if (sas_ss_flags(esp) == 0) { 1092 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1093 } 1094 } else { 1095 #ifndef TARGET_X86_64 1096 /* This is the legacy signal stack switching. */ 1097 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 1098 !(ka->sa_flags & TARGET_SA_RESTORER) && 1099 ka->sa_restorer) { 1100 esp = (unsigned long) ka->sa_restorer; 1101 } 1102 #endif 1103 } 1104 1105 #ifndef TARGET_X86_64 1106 return (esp - frame_size) & -8ul; 1107 #else 1108 return ((esp - frame_size) & (~15ul)) - 8; 1109 #endif 1110 } 1111 1112 #ifndef TARGET_X86_64 1113 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 1114 static void setup_frame(int sig, struct target_sigaction *ka, 1115 target_sigset_t *set, CPUX86State *env) 1116 { 1117 abi_ulong frame_addr; 1118 struct sigframe *frame; 1119 int i; 1120 1121 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1122 trace_user_setup_frame(env, frame_addr); 1123 1124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1125 goto give_sigsegv; 1126 1127 __put_user(sig, &frame->sig); 1128 1129 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 1130 frame_addr + offsetof(struct sigframe, fpstate)); 1131 1132 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1133 __put_user(set->sig[i], &frame->extramask[i - 1]); 1134 } 1135 1136 /* Set up to return from userspace. If provided, use a stub 1137 already in userspace. */ 1138 if (ka->sa_flags & TARGET_SA_RESTORER) { 1139 __put_user(ka->sa_restorer, &frame->pretcode); 1140 } else { 1141 uint16_t val16; 1142 abi_ulong retcode_addr; 1143 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 1144 __put_user(retcode_addr, &frame->pretcode); 1145 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 1146 val16 = 0xb858; 1147 __put_user(val16, (uint16_t *)(frame->retcode+0)); 1148 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 1149 val16 = 0x80cd; 1150 __put_user(val16, (uint16_t *)(frame->retcode+6)); 1151 } 1152 1153 /* Set up registers for signal handler */ 1154 env->regs[R_ESP] = frame_addr; 1155 env->eip = ka->_sa_handler; 1156 1157 cpu_x86_load_seg(env, R_DS, __USER_DS); 1158 cpu_x86_load_seg(env, R_ES, __USER_DS); 1159 cpu_x86_load_seg(env, R_SS, __USER_DS); 1160 cpu_x86_load_seg(env, R_CS, __USER_CS); 1161 env->eflags &= ~TF_MASK; 1162 1163 unlock_user_struct(frame, frame_addr, 1); 1164 1165 return; 1166 1167 give_sigsegv: 1168 force_sigsegv(sig); 1169 } 1170 #endif 1171 1172 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */ 1173 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1174 target_siginfo_t *info, 1175 target_sigset_t *set, CPUX86State *env) 1176 { 1177 abi_ulong frame_addr; 1178 #ifndef TARGET_X86_64 1179 abi_ulong addr; 1180 #endif 1181 struct rt_sigframe *frame; 1182 int i; 1183 1184 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1185 trace_user_setup_rt_frame(env, frame_addr); 1186 1187 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1188 goto give_sigsegv; 1189 1190 /* These fields are only in rt_sigframe on 32 bit */ 1191 #ifndef TARGET_X86_64 1192 __put_user(sig, &frame->sig); 1193 addr = frame_addr + offsetof(struct rt_sigframe, info); 1194 __put_user(addr, &frame->pinfo); 1195 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1196 __put_user(addr, &frame->puc); 1197 #endif 1198 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1199 tswap_siginfo(&frame->info, info); 1200 } 1201 1202 /* Create the ucontext. */ 1203 __put_user(0, &frame->uc.tuc_flags); 1204 __put_user(0, &frame->uc.tuc_link); 1205 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1206 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1207 &frame->uc.tuc_stack.ss_flags); 1208 __put_user(target_sigaltstack_used.ss_size, 1209 &frame->uc.tuc_stack.ss_size); 1210 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1211 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1212 1213 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1214 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1215 } 1216 1217 /* Set up to return from userspace. If provided, use a stub 1218 already in userspace. */ 1219 #ifndef TARGET_X86_64 1220 if (ka->sa_flags & TARGET_SA_RESTORER) { 1221 __put_user(ka->sa_restorer, &frame->pretcode); 1222 } else { 1223 uint16_t val16; 1224 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1225 __put_user(addr, &frame->pretcode); 1226 /* This is movl $,%eax ; int $0x80 */ 1227 __put_user(0xb8, (char *)(frame->retcode+0)); 1228 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1229 val16 = 0x80cd; 1230 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1231 } 1232 #else 1233 /* XXX: Would be slightly better to return -EFAULT here if test fails 1234 assert(ka->sa_flags & TARGET_SA_RESTORER); */ 1235 __put_user(ka->sa_restorer, &frame->pretcode); 1236 #endif 1237 1238 /* Set up registers for signal handler */ 1239 env->regs[R_ESP] = frame_addr; 1240 env->eip = ka->_sa_handler; 1241 1242 #ifndef TARGET_X86_64 1243 env->regs[R_EAX] = sig; 1244 env->regs[R_EDX] = (unsigned long)&frame->info; 1245 env->regs[R_ECX] = (unsigned long)&frame->uc; 1246 #else 1247 env->regs[R_EAX] = 0; 1248 env->regs[R_EDI] = sig; 1249 env->regs[R_ESI] = (unsigned long)&frame->info; 1250 env->regs[R_EDX] = (unsigned long)&frame->uc; 1251 #endif 1252 1253 cpu_x86_load_seg(env, R_DS, __USER_DS); 1254 cpu_x86_load_seg(env, R_ES, __USER_DS); 1255 cpu_x86_load_seg(env, R_CS, __USER_CS); 1256 cpu_x86_load_seg(env, R_SS, __USER_DS); 1257 env->eflags &= ~TF_MASK; 1258 1259 unlock_user_struct(frame, frame_addr, 1); 1260 1261 return; 1262 1263 give_sigsegv: 1264 force_sigsegv(sig); 1265 } 1266 1267 static int 1268 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1269 { 1270 unsigned int err = 0; 1271 abi_ulong fpstate_addr; 1272 unsigned int tmpflags; 1273 1274 #ifndef TARGET_X86_64 1275 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1276 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1277 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1278 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1279 1280 env->regs[R_EDI] = tswapl(sc->edi); 1281 env->regs[R_ESI] = tswapl(sc->esi); 1282 env->regs[R_EBP] = tswapl(sc->ebp); 1283 env->regs[R_ESP] = tswapl(sc->esp); 1284 env->regs[R_EBX] = tswapl(sc->ebx); 1285 env->regs[R_EDX] = tswapl(sc->edx); 1286 env->regs[R_ECX] = tswapl(sc->ecx); 1287 env->regs[R_EAX] = tswapl(sc->eax); 1288 1289 env->eip = tswapl(sc->eip); 1290 #else 1291 env->regs[8] = tswapl(sc->r8); 1292 env->regs[9] = tswapl(sc->r9); 1293 env->regs[10] = tswapl(sc->r10); 1294 env->regs[11] = tswapl(sc->r11); 1295 env->regs[12] = tswapl(sc->r12); 1296 env->regs[13] = tswapl(sc->r13); 1297 env->regs[14] = tswapl(sc->r14); 1298 env->regs[15] = tswapl(sc->r15); 1299 1300 env->regs[R_EDI] = tswapl(sc->rdi); 1301 env->regs[R_ESI] = tswapl(sc->rsi); 1302 env->regs[R_EBP] = tswapl(sc->rbp); 1303 env->regs[R_EBX] = tswapl(sc->rbx); 1304 env->regs[R_EDX] = tswapl(sc->rdx); 1305 env->regs[R_EAX] = tswapl(sc->rax); 1306 env->regs[R_ECX] = tswapl(sc->rcx); 1307 env->regs[R_ESP] = tswapl(sc->rsp); 1308 1309 env->eip = tswapl(sc->rip); 1310 #endif 1311 1312 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1313 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1314 1315 tmpflags = tswapl(sc->eflags); 1316 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1317 // regs->orig_eax = -1; /* disable syscall checks */ 1318 1319 fpstate_addr = tswapl(sc->fpstate); 1320 if (fpstate_addr != 0) { 1321 if (!access_ok(VERIFY_READ, fpstate_addr, 1322 sizeof(struct target_fpstate))) 1323 goto badframe; 1324 #ifndef TARGET_X86_64 1325 cpu_x86_frstor(env, fpstate_addr, 1); 1326 #else 1327 cpu_x86_fxrstor(env, fpstate_addr); 1328 #endif 1329 } 1330 1331 return err; 1332 badframe: 1333 return 1; 1334 } 1335 1336 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */ 1337 #ifndef TARGET_X86_64 1338 long do_sigreturn(CPUX86State *env) 1339 { 1340 struct sigframe *frame; 1341 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1342 target_sigset_t target_set; 1343 sigset_t set; 1344 int i; 1345 1346 trace_user_do_sigreturn(env, frame_addr); 1347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1348 goto badframe; 1349 /* set blocked signals */ 1350 __get_user(target_set.sig[0], &frame->sc.oldmask); 1351 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1352 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1353 } 1354 1355 target_to_host_sigset_internal(&set, &target_set); 1356 set_sigmask(&set); 1357 1358 /* restore registers */ 1359 if (restore_sigcontext(env, &frame->sc)) 1360 goto badframe; 1361 unlock_user_struct(frame, frame_addr, 0); 1362 return -TARGET_QEMU_ESIGRETURN; 1363 1364 badframe: 1365 unlock_user_struct(frame, frame_addr, 0); 1366 force_sig(TARGET_SIGSEGV); 1367 return -TARGET_QEMU_ESIGRETURN; 1368 } 1369 #endif 1370 1371 long do_rt_sigreturn(CPUX86State *env) 1372 { 1373 abi_ulong frame_addr; 1374 struct rt_sigframe *frame; 1375 sigset_t set; 1376 1377 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong); 1378 trace_user_do_rt_sigreturn(env, frame_addr); 1379 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1380 goto badframe; 1381 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1382 set_sigmask(&set); 1383 1384 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1385 goto badframe; 1386 } 1387 1388 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1389 get_sp_from_cpustate(env)) == -EFAULT) { 1390 goto badframe; 1391 } 1392 1393 unlock_user_struct(frame, frame_addr, 0); 1394 return -TARGET_QEMU_ESIGRETURN; 1395 1396 badframe: 1397 unlock_user_struct(frame, frame_addr, 0); 1398 force_sig(TARGET_SIGSEGV); 1399 return -TARGET_QEMU_ESIGRETURN; 1400 } 1401 1402 #elif defined(TARGET_AARCH64) 1403 1404 struct target_sigcontext { 1405 uint64_t fault_address; 1406 /* AArch64 registers */ 1407 uint64_t regs[31]; 1408 uint64_t sp; 1409 uint64_t pc; 1410 uint64_t pstate; 1411 /* 4K reserved for FP/SIMD state and future expansion */ 1412 char __reserved[4096] __attribute__((__aligned__(16))); 1413 }; 1414 1415 struct target_ucontext { 1416 abi_ulong tuc_flags; 1417 abi_ulong tuc_link; 1418 target_stack_t tuc_stack; 1419 target_sigset_t tuc_sigmask; 1420 /* glibc uses a 1024-bit sigset_t */ 1421 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1422 /* last for future expansion */ 1423 struct target_sigcontext tuc_mcontext; 1424 }; 1425 1426 /* 1427 * Header to be used at the beginning of structures extending the user 1428 * context. Such structures must be placed after the rt_sigframe on the stack 1429 * and be 16-byte aligned. The last structure must be a dummy one with the 1430 * magic and size set to 0. 1431 */ 1432 struct target_aarch64_ctx { 1433 uint32_t magic; 1434 uint32_t size; 1435 }; 1436 1437 #define TARGET_FPSIMD_MAGIC 0x46508001 1438 1439 struct target_fpsimd_context { 1440 struct target_aarch64_ctx head; 1441 uint32_t fpsr; 1442 uint32_t fpcr; 1443 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1444 }; 1445 1446 /* 1447 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1448 * user space as it will change with the addition of new context. User space 1449 * should check the magic/size information. 1450 */ 1451 struct target_aux_context { 1452 struct target_fpsimd_context fpsimd; 1453 /* additional context to be added before "end" */ 1454 struct target_aarch64_ctx end; 1455 }; 1456 1457 struct target_rt_sigframe { 1458 struct target_siginfo info; 1459 struct target_ucontext uc; 1460 uint64_t fp; 1461 uint64_t lr; 1462 uint32_t tramp[2]; 1463 }; 1464 1465 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1466 CPUARMState *env, target_sigset_t *set) 1467 { 1468 int i; 1469 struct target_aux_context *aux = 1470 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1471 1472 /* set up the stack frame for unwinding */ 1473 __put_user(env->xregs[29], &sf->fp); 1474 __put_user(env->xregs[30], &sf->lr); 1475 1476 for (i = 0; i < 31; i++) { 1477 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1478 } 1479 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1480 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1481 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1482 1483 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1484 1485 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1486 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1487 } 1488 1489 for (i = 0; i < 32; i++) { 1490 #ifdef TARGET_WORDS_BIGENDIAN 1491 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1492 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1493 #else 1494 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1495 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1496 #endif 1497 } 1498 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1499 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1500 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1501 __put_user(sizeof(struct target_fpsimd_context), 1502 &aux->fpsimd.head.size); 1503 1504 /* set the "end" magic */ 1505 __put_user(0, &aux->end.magic); 1506 __put_user(0, &aux->end.size); 1507 1508 return 0; 1509 } 1510 1511 static int target_restore_sigframe(CPUARMState *env, 1512 struct target_rt_sigframe *sf) 1513 { 1514 sigset_t set; 1515 int i; 1516 struct target_aux_context *aux = 1517 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1518 uint32_t magic, size, fpsr, fpcr; 1519 uint64_t pstate; 1520 1521 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1522 set_sigmask(&set); 1523 1524 for (i = 0; i < 31; i++) { 1525 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1526 } 1527 1528 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1529 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1530 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1531 pstate_write(env, pstate); 1532 1533 __get_user(magic, &aux->fpsimd.head.magic); 1534 __get_user(size, &aux->fpsimd.head.size); 1535 1536 if (magic != TARGET_FPSIMD_MAGIC 1537 || size != sizeof(struct target_fpsimd_context)) { 1538 return 1; 1539 } 1540 1541 for (i = 0; i < 32; i++) { 1542 #ifdef TARGET_WORDS_BIGENDIAN 1543 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1544 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1545 #else 1546 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1547 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1548 #endif 1549 } 1550 __get_user(fpsr, &aux->fpsimd.fpsr); 1551 vfp_set_fpsr(env, fpsr); 1552 __get_user(fpcr, &aux->fpsimd.fpcr); 1553 vfp_set_fpcr(env, fpcr); 1554 1555 return 0; 1556 } 1557 1558 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1559 { 1560 abi_ulong sp; 1561 1562 sp = env->xregs[31]; 1563 1564 /* 1565 * This is the X/Open sanctioned signal stack switching. 1566 */ 1567 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1568 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1569 } 1570 1571 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1572 1573 return sp; 1574 } 1575 1576 static void target_setup_frame(int usig, struct target_sigaction *ka, 1577 target_siginfo_t *info, target_sigset_t *set, 1578 CPUARMState *env) 1579 { 1580 struct target_rt_sigframe *frame; 1581 abi_ulong frame_addr, return_addr; 1582 1583 frame_addr = get_sigframe(ka, env); 1584 trace_user_setup_frame(env, frame_addr); 1585 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1586 goto give_sigsegv; 1587 } 1588 1589 __put_user(0, &frame->uc.tuc_flags); 1590 __put_user(0, &frame->uc.tuc_link); 1591 1592 __put_user(target_sigaltstack_used.ss_sp, 1593 &frame->uc.tuc_stack.ss_sp); 1594 __put_user(sas_ss_flags(env->xregs[31]), 1595 &frame->uc.tuc_stack.ss_flags); 1596 __put_user(target_sigaltstack_used.ss_size, 1597 &frame->uc.tuc_stack.ss_size); 1598 target_setup_sigframe(frame, env, set); 1599 if (ka->sa_flags & TARGET_SA_RESTORER) { 1600 return_addr = ka->sa_restorer; 1601 } else { 1602 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1603 __put_user(0xd2801168, &frame->tramp[0]); 1604 __put_user(0xd4000001, &frame->tramp[1]); 1605 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1606 } 1607 env->xregs[0] = usig; 1608 env->xregs[31] = frame_addr; 1609 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1610 env->pc = ka->_sa_handler; 1611 env->xregs[30] = return_addr; 1612 if (info) { 1613 tswap_siginfo(&frame->info, info); 1614 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1615 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1616 } 1617 1618 unlock_user_struct(frame, frame_addr, 1); 1619 return; 1620 1621 give_sigsegv: 1622 unlock_user_struct(frame, frame_addr, 1); 1623 force_sigsegv(usig); 1624 } 1625 1626 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1627 target_siginfo_t *info, target_sigset_t *set, 1628 CPUARMState *env) 1629 { 1630 target_setup_frame(sig, ka, info, set, env); 1631 } 1632 1633 static void setup_frame(int sig, struct target_sigaction *ka, 1634 target_sigset_t *set, CPUARMState *env) 1635 { 1636 target_setup_frame(sig, ka, 0, set, env); 1637 } 1638 1639 long do_rt_sigreturn(CPUARMState *env) 1640 { 1641 struct target_rt_sigframe *frame = NULL; 1642 abi_ulong frame_addr = env->xregs[31]; 1643 1644 trace_user_do_rt_sigreturn(env, frame_addr); 1645 if (frame_addr & 15) { 1646 goto badframe; 1647 } 1648 1649 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1650 goto badframe; 1651 } 1652 1653 if (target_restore_sigframe(env, frame)) { 1654 goto badframe; 1655 } 1656 1657 if (do_sigaltstack(frame_addr + 1658 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1659 0, get_sp_from_cpustate(env)) == -EFAULT) { 1660 goto badframe; 1661 } 1662 1663 unlock_user_struct(frame, frame_addr, 0); 1664 return -TARGET_QEMU_ESIGRETURN; 1665 1666 badframe: 1667 unlock_user_struct(frame, frame_addr, 0); 1668 force_sig(TARGET_SIGSEGV); 1669 return -TARGET_QEMU_ESIGRETURN; 1670 } 1671 1672 long do_sigreturn(CPUARMState *env) 1673 { 1674 return do_rt_sigreturn(env); 1675 } 1676 1677 #elif defined(TARGET_ARM) 1678 1679 struct target_sigcontext { 1680 abi_ulong trap_no; 1681 abi_ulong error_code; 1682 abi_ulong oldmask; 1683 abi_ulong arm_r0; 1684 abi_ulong arm_r1; 1685 abi_ulong arm_r2; 1686 abi_ulong arm_r3; 1687 abi_ulong arm_r4; 1688 abi_ulong arm_r5; 1689 abi_ulong arm_r6; 1690 abi_ulong arm_r7; 1691 abi_ulong arm_r8; 1692 abi_ulong arm_r9; 1693 abi_ulong arm_r10; 1694 abi_ulong arm_fp; 1695 abi_ulong arm_ip; 1696 abi_ulong arm_sp; 1697 abi_ulong arm_lr; 1698 abi_ulong arm_pc; 1699 abi_ulong arm_cpsr; 1700 abi_ulong fault_address; 1701 }; 1702 1703 struct target_ucontext_v1 { 1704 abi_ulong tuc_flags; 1705 abi_ulong tuc_link; 1706 target_stack_t tuc_stack; 1707 struct target_sigcontext tuc_mcontext; 1708 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1709 }; 1710 1711 struct target_ucontext_v2 { 1712 abi_ulong tuc_flags; 1713 abi_ulong tuc_link; 1714 target_stack_t tuc_stack; 1715 struct target_sigcontext tuc_mcontext; 1716 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1717 char __unused[128 - sizeof(target_sigset_t)]; 1718 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1719 }; 1720 1721 struct target_user_vfp { 1722 uint64_t fpregs[32]; 1723 abi_ulong fpscr; 1724 }; 1725 1726 struct target_user_vfp_exc { 1727 abi_ulong fpexc; 1728 abi_ulong fpinst; 1729 abi_ulong fpinst2; 1730 }; 1731 1732 struct target_vfp_sigframe { 1733 abi_ulong magic; 1734 abi_ulong size; 1735 struct target_user_vfp ufp; 1736 struct target_user_vfp_exc ufp_exc; 1737 } __attribute__((__aligned__(8))); 1738 1739 struct target_iwmmxt_sigframe { 1740 abi_ulong magic; 1741 abi_ulong size; 1742 uint64_t regs[16]; 1743 /* Note that not all the coprocessor control registers are stored here */ 1744 uint32_t wcssf; 1745 uint32_t wcasf; 1746 uint32_t wcgr0; 1747 uint32_t wcgr1; 1748 uint32_t wcgr2; 1749 uint32_t wcgr3; 1750 } __attribute__((__aligned__(8))); 1751 1752 #define TARGET_VFP_MAGIC 0x56465001 1753 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1754 1755 struct sigframe_v1 1756 { 1757 struct target_sigcontext sc; 1758 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1759 abi_ulong retcode; 1760 }; 1761 1762 struct sigframe_v2 1763 { 1764 struct target_ucontext_v2 uc; 1765 abi_ulong retcode; 1766 }; 1767 1768 struct rt_sigframe_v1 1769 { 1770 abi_ulong pinfo; 1771 abi_ulong puc; 1772 struct target_siginfo info; 1773 struct target_ucontext_v1 uc; 1774 abi_ulong retcode; 1775 }; 1776 1777 struct rt_sigframe_v2 1778 { 1779 struct target_siginfo info; 1780 struct target_ucontext_v2 uc; 1781 abi_ulong retcode; 1782 }; 1783 1784 #define TARGET_CONFIG_CPU_32 1 1785 1786 /* 1787 * For ARM syscalls, we encode the syscall number into the instruction. 1788 */ 1789 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1790 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1791 1792 /* 1793 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1794 * need two 16-bit instructions. 1795 */ 1796 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1797 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1798 1799 static const abi_ulong retcodes[4] = { 1800 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1801 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1802 }; 1803 1804 1805 static inline int valid_user_regs(CPUARMState *regs) 1806 { 1807 return 1; 1808 } 1809 1810 static void 1811 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1812 CPUARMState *env, abi_ulong mask) 1813 { 1814 __put_user(env->regs[0], &sc->arm_r0); 1815 __put_user(env->regs[1], &sc->arm_r1); 1816 __put_user(env->regs[2], &sc->arm_r2); 1817 __put_user(env->regs[3], &sc->arm_r3); 1818 __put_user(env->regs[4], &sc->arm_r4); 1819 __put_user(env->regs[5], &sc->arm_r5); 1820 __put_user(env->regs[6], &sc->arm_r6); 1821 __put_user(env->regs[7], &sc->arm_r7); 1822 __put_user(env->regs[8], &sc->arm_r8); 1823 __put_user(env->regs[9], &sc->arm_r9); 1824 __put_user(env->regs[10], &sc->arm_r10); 1825 __put_user(env->regs[11], &sc->arm_fp); 1826 __put_user(env->regs[12], &sc->arm_ip); 1827 __put_user(env->regs[13], &sc->arm_sp); 1828 __put_user(env->regs[14], &sc->arm_lr); 1829 __put_user(env->regs[15], &sc->arm_pc); 1830 #ifdef TARGET_CONFIG_CPU_32 1831 __put_user(cpsr_read(env), &sc->arm_cpsr); 1832 #endif 1833 1834 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1835 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1836 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1837 __put_user(mask, &sc->oldmask); 1838 } 1839 1840 static inline abi_ulong 1841 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1842 { 1843 unsigned long sp = regs->regs[13]; 1844 1845 /* 1846 * This is the X/Open sanctioned signal stack switching. 1847 */ 1848 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1849 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1850 } 1851 /* 1852 * ATPCS B01 mandates 8-byte alignment 1853 */ 1854 return (sp - framesize) & ~7; 1855 } 1856 1857 static void 1858 setup_return(CPUARMState *env, struct target_sigaction *ka, 1859 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1860 { 1861 abi_ulong handler = ka->_sa_handler; 1862 abi_ulong retcode; 1863 int thumb = handler & 1; 1864 uint32_t cpsr = cpsr_read(env); 1865 1866 cpsr &= ~CPSR_IT; 1867 if (thumb) { 1868 cpsr |= CPSR_T; 1869 } else { 1870 cpsr &= ~CPSR_T; 1871 } 1872 1873 if (ka->sa_flags & TARGET_SA_RESTORER) { 1874 retcode = ka->sa_restorer; 1875 } else { 1876 unsigned int idx = thumb; 1877 1878 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1879 idx += 2; 1880 } 1881 1882 __put_user(retcodes[idx], rc); 1883 1884 retcode = rc_addr + thumb; 1885 } 1886 1887 env->regs[0] = usig; 1888 env->regs[13] = frame_addr; 1889 env->regs[14] = retcode; 1890 env->regs[15] = handler & (thumb ? ~1 : ~3); 1891 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1892 } 1893 1894 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1895 { 1896 int i; 1897 struct target_vfp_sigframe *vfpframe; 1898 vfpframe = (struct target_vfp_sigframe *)regspace; 1899 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1900 __put_user(sizeof(*vfpframe), &vfpframe->size); 1901 for (i = 0; i < 32; i++) { 1902 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1903 } 1904 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1905 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1906 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1907 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1908 return (abi_ulong*)(vfpframe+1); 1909 } 1910 1911 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1912 CPUARMState *env) 1913 { 1914 int i; 1915 struct target_iwmmxt_sigframe *iwmmxtframe; 1916 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1917 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1918 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1919 for (i = 0; i < 16; i++) { 1920 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1921 } 1922 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1923 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1924 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1925 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1926 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1927 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1928 return (abi_ulong*)(iwmmxtframe+1); 1929 } 1930 1931 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1932 target_sigset_t *set, CPUARMState *env) 1933 { 1934 struct target_sigaltstack stack; 1935 int i; 1936 abi_ulong *regspace; 1937 1938 /* Clear all the bits of the ucontext we don't use. */ 1939 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1940 1941 memset(&stack, 0, sizeof(stack)); 1942 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1943 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1944 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1945 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1946 1947 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1948 /* Save coprocessor signal frame. */ 1949 regspace = uc->tuc_regspace; 1950 if (arm_feature(env, ARM_FEATURE_VFP)) { 1951 regspace = setup_sigframe_v2_vfp(regspace, env); 1952 } 1953 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1954 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1955 } 1956 1957 /* Write terminating magic word */ 1958 __put_user(0, regspace); 1959 1960 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1961 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1962 } 1963 } 1964 1965 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1966 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1967 target_sigset_t *set, CPUARMState *regs) 1968 { 1969 struct sigframe_v1 *frame; 1970 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1971 int i; 1972 1973 trace_user_setup_frame(regs, frame_addr); 1974 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1975 goto sigsegv; 1976 } 1977 1978 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1979 1980 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1981 __put_user(set->sig[i], &frame->extramask[i - 1]); 1982 } 1983 1984 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1985 frame_addr + offsetof(struct sigframe_v1, retcode)); 1986 1987 unlock_user_struct(frame, frame_addr, 1); 1988 return; 1989 sigsegv: 1990 force_sigsegv(usig); 1991 } 1992 1993 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1994 target_sigset_t *set, CPUARMState *regs) 1995 { 1996 struct sigframe_v2 *frame; 1997 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1998 1999 trace_user_setup_frame(regs, frame_addr); 2000 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2001 goto sigsegv; 2002 } 2003 2004 setup_sigframe_v2(&frame->uc, set, regs); 2005 2006 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 2007 frame_addr + offsetof(struct sigframe_v2, retcode)); 2008 2009 unlock_user_struct(frame, frame_addr, 1); 2010 return; 2011 sigsegv: 2012 force_sigsegv(usig); 2013 } 2014 2015 static void setup_frame(int usig, struct target_sigaction *ka, 2016 target_sigset_t *set, CPUARMState *regs) 2017 { 2018 if (get_osversion() >= 0x020612) { 2019 setup_frame_v2(usig, ka, set, regs); 2020 } else { 2021 setup_frame_v1(usig, ka, set, regs); 2022 } 2023 } 2024 2025 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 2026 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 2027 target_siginfo_t *info, 2028 target_sigset_t *set, CPUARMState *env) 2029 { 2030 struct rt_sigframe_v1 *frame; 2031 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 2032 struct target_sigaltstack stack; 2033 int i; 2034 abi_ulong info_addr, uc_addr; 2035 2036 trace_user_setup_rt_frame(env, frame_addr); 2037 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2038 goto sigsegv; 2039 } 2040 2041 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 2042 __put_user(info_addr, &frame->pinfo); 2043 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 2044 __put_user(uc_addr, &frame->puc); 2045 tswap_siginfo(&frame->info, info); 2046 2047 /* Clear all the bits of the ucontext we don't use. */ 2048 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 2049 2050 memset(&stack, 0, sizeof(stack)); 2051 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 2052 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 2053 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 2054 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 2055 2056 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 2057 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 2058 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 2059 } 2060 2061 setup_return(env, ka, &frame->retcode, frame_addr, usig, 2062 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 2063 2064 env->regs[1] = info_addr; 2065 env->regs[2] = uc_addr; 2066 2067 unlock_user_struct(frame, frame_addr, 1); 2068 return; 2069 sigsegv: 2070 force_sigsegv(usig); 2071 } 2072 2073 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 2074 target_siginfo_t *info, 2075 target_sigset_t *set, CPUARMState *env) 2076 { 2077 struct rt_sigframe_v2 *frame; 2078 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 2079 abi_ulong info_addr, uc_addr; 2080 2081 trace_user_setup_rt_frame(env, frame_addr); 2082 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2083 goto sigsegv; 2084 } 2085 2086 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 2087 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 2088 tswap_siginfo(&frame->info, info); 2089 2090 setup_sigframe_v2(&frame->uc, set, env); 2091 2092 setup_return(env, ka, &frame->retcode, frame_addr, usig, 2093 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 2094 2095 env->regs[1] = info_addr; 2096 env->regs[2] = uc_addr; 2097 2098 unlock_user_struct(frame, frame_addr, 1); 2099 return; 2100 sigsegv: 2101 force_sigsegv(usig); 2102 } 2103 2104 static void setup_rt_frame(int usig, struct target_sigaction *ka, 2105 target_siginfo_t *info, 2106 target_sigset_t *set, CPUARMState *env) 2107 { 2108 if (get_osversion() >= 0x020612) { 2109 setup_rt_frame_v2(usig, ka, info, set, env); 2110 } else { 2111 setup_rt_frame_v1(usig, ka, info, set, env); 2112 } 2113 } 2114 2115 static int 2116 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 2117 { 2118 int err = 0; 2119 uint32_t cpsr; 2120 2121 __get_user(env->regs[0], &sc->arm_r0); 2122 __get_user(env->regs[1], &sc->arm_r1); 2123 __get_user(env->regs[2], &sc->arm_r2); 2124 __get_user(env->regs[3], &sc->arm_r3); 2125 __get_user(env->regs[4], &sc->arm_r4); 2126 __get_user(env->regs[5], &sc->arm_r5); 2127 __get_user(env->regs[6], &sc->arm_r6); 2128 __get_user(env->regs[7], &sc->arm_r7); 2129 __get_user(env->regs[8], &sc->arm_r8); 2130 __get_user(env->regs[9], &sc->arm_r9); 2131 __get_user(env->regs[10], &sc->arm_r10); 2132 __get_user(env->regs[11], &sc->arm_fp); 2133 __get_user(env->regs[12], &sc->arm_ip); 2134 __get_user(env->regs[13], &sc->arm_sp); 2135 __get_user(env->regs[14], &sc->arm_lr); 2136 __get_user(env->regs[15], &sc->arm_pc); 2137 #ifdef TARGET_CONFIG_CPU_32 2138 __get_user(cpsr, &sc->arm_cpsr); 2139 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 2140 #endif 2141 2142 err |= !valid_user_regs(env); 2143 2144 return err; 2145 } 2146 2147 static long do_sigreturn_v1(CPUARMState *env) 2148 { 2149 abi_ulong frame_addr; 2150 struct sigframe_v1 *frame = NULL; 2151 target_sigset_t set; 2152 sigset_t host_set; 2153 int i; 2154 2155 /* 2156 * Since we stacked the signal on a 64-bit boundary, 2157 * then 'sp' should be word aligned here. If it's 2158 * not, then the user is trying to mess with us. 2159 */ 2160 frame_addr = env->regs[13]; 2161 trace_user_do_sigreturn(env, frame_addr); 2162 if (frame_addr & 7) { 2163 goto badframe; 2164 } 2165 2166 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2167 goto badframe; 2168 } 2169 2170 __get_user(set.sig[0], &frame->sc.oldmask); 2171 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2172 __get_user(set.sig[i], &frame->extramask[i - 1]); 2173 } 2174 2175 target_to_host_sigset_internal(&host_set, &set); 2176 set_sigmask(&host_set); 2177 2178 if (restore_sigcontext(env, &frame->sc)) { 2179 goto badframe; 2180 } 2181 2182 #if 0 2183 /* Send SIGTRAP if we're single-stepping */ 2184 if (ptrace_cancel_bpt(current)) 2185 send_sig(SIGTRAP, current, 1); 2186 #endif 2187 unlock_user_struct(frame, frame_addr, 0); 2188 return -TARGET_QEMU_ESIGRETURN; 2189 2190 badframe: 2191 force_sig(TARGET_SIGSEGV); 2192 return -TARGET_QEMU_ESIGRETURN; 2193 } 2194 2195 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 2196 { 2197 int i; 2198 abi_ulong magic, sz; 2199 uint32_t fpscr, fpexc; 2200 struct target_vfp_sigframe *vfpframe; 2201 vfpframe = (struct target_vfp_sigframe *)regspace; 2202 2203 __get_user(magic, &vfpframe->magic); 2204 __get_user(sz, &vfpframe->size); 2205 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 2206 return 0; 2207 } 2208 for (i = 0; i < 32; i++) { 2209 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 2210 } 2211 __get_user(fpscr, &vfpframe->ufp.fpscr); 2212 vfp_set_fpscr(env, fpscr); 2213 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 2214 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 2215 * and the exception flag is cleared 2216 */ 2217 fpexc |= (1 << 30); 2218 fpexc &= ~((1 << 31) | (1 << 28)); 2219 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2220 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2221 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2222 return (abi_ulong*)(vfpframe + 1); 2223 } 2224 2225 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2226 abi_ulong *regspace) 2227 { 2228 int i; 2229 abi_ulong magic, sz; 2230 struct target_iwmmxt_sigframe *iwmmxtframe; 2231 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2232 2233 __get_user(magic, &iwmmxtframe->magic); 2234 __get_user(sz, &iwmmxtframe->size); 2235 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2236 return 0; 2237 } 2238 for (i = 0; i < 16; i++) { 2239 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2240 } 2241 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2242 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2243 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2244 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2245 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2246 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2247 return (abi_ulong*)(iwmmxtframe + 1); 2248 } 2249 2250 static int do_sigframe_return_v2(CPUARMState *env, 2251 target_ulong context_addr, 2252 struct target_ucontext_v2 *uc) 2253 { 2254 sigset_t host_set; 2255 abi_ulong *regspace; 2256 2257 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2258 set_sigmask(&host_set); 2259 2260 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2261 return 1; 2262 2263 /* Restore coprocessor signal frame */ 2264 regspace = uc->tuc_regspace; 2265 if (arm_feature(env, ARM_FEATURE_VFP)) { 2266 regspace = restore_sigframe_v2_vfp(env, regspace); 2267 if (!regspace) { 2268 return 1; 2269 } 2270 } 2271 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2272 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2273 if (!regspace) { 2274 return 1; 2275 } 2276 } 2277 2278 if (do_sigaltstack(context_addr 2279 + offsetof(struct target_ucontext_v2, tuc_stack), 2280 0, get_sp_from_cpustate(env)) == -EFAULT) { 2281 return 1; 2282 } 2283 2284 #if 0 2285 /* Send SIGTRAP if we're single-stepping */ 2286 if (ptrace_cancel_bpt(current)) 2287 send_sig(SIGTRAP, current, 1); 2288 #endif 2289 2290 return 0; 2291 } 2292 2293 static long do_sigreturn_v2(CPUARMState *env) 2294 { 2295 abi_ulong frame_addr; 2296 struct sigframe_v2 *frame = NULL; 2297 2298 /* 2299 * Since we stacked the signal on a 64-bit boundary, 2300 * then 'sp' should be word aligned here. If it's 2301 * not, then the user is trying to mess with us. 2302 */ 2303 frame_addr = env->regs[13]; 2304 trace_user_do_sigreturn(env, frame_addr); 2305 if (frame_addr & 7) { 2306 goto badframe; 2307 } 2308 2309 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2310 goto badframe; 2311 } 2312 2313 if (do_sigframe_return_v2(env, 2314 frame_addr 2315 + offsetof(struct sigframe_v2, uc), 2316 &frame->uc)) { 2317 goto badframe; 2318 } 2319 2320 unlock_user_struct(frame, frame_addr, 0); 2321 return -TARGET_QEMU_ESIGRETURN; 2322 2323 badframe: 2324 unlock_user_struct(frame, frame_addr, 0); 2325 force_sig(TARGET_SIGSEGV); 2326 return -TARGET_QEMU_ESIGRETURN; 2327 } 2328 2329 long do_sigreturn(CPUARMState *env) 2330 { 2331 if (get_osversion() >= 0x020612) { 2332 return do_sigreturn_v2(env); 2333 } else { 2334 return do_sigreturn_v1(env); 2335 } 2336 } 2337 2338 static long do_rt_sigreturn_v1(CPUARMState *env) 2339 { 2340 abi_ulong frame_addr; 2341 struct rt_sigframe_v1 *frame = NULL; 2342 sigset_t host_set; 2343 2344 /* 2345 * Since we stacked the signal on a 64-bit boundary, 2346 * then 'sp' should be word aligned here. If it's 2347 * not, then the user is trying to mess with us. 2348 */ 2349 frame_addr = env->regs[13]; 2350 trace_user_do_rt_sigreturn(env, frame_addr); 2351 if (frame_addr & 7) { 2352 goto badframe; 2353 } 2354 2355 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2356 goto badframe; 2357 } 2358 2359 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2360 set_sigmask(&host_set); 2361 2362 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2363 goto badframe; 2364 } 2365 2366 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2367 goto badframe; 2368 2369 #if 0 2370 /* Send SIGTRAP if we're single-stepping */ 2371 if (ptrace_cancel_bpt(current)) 2372 send_sig(SIGTRAP, current, 1); 2373 #endif 2374 unlock_user_struct(frame, frame_addr, 0); 2375 return -TARGET_QEMU_ESIGRETURN; 2376 2377 badframe: 2378 unlock_user_struct(frame, frame_addr, 0); 2379 force_sig(TARGET_SIGSEGV); 2380 return -TARGET_QEMU_ESIGRETURN; 2381 } 2382 2383 static long do_rt_sigreturn_v2(CPUARMState *env) 2384 { 2385 abi_ulong frame_addr; 2386 struct rt_sigframe_v2 *frame = NULL; 2387 2388 /* 2389 * Since we stacked the signal on a 64-bit boundary, 2390 * then 'sp' should be word aligned here. If it's 2391 * not, then the user is trying to mess with us. 2392 */ 2393 frame_addr = env->regs[13]; 2394 trace_user_do_rt_sigreturn(env, frame_addr); 2395 if (frame_addr & 7) { 2396 goto badframe; 2397 } 2398 2399 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2400 goto badframe; 2401 } 2402 2403 if (do_sigframe_return_v2(env, 2404 frame_addr 2405 + offsetof(struct rt_sigframe_v2, uc), 2406 &frame->uc)) { 2407 goto badframe; 2408 } 2409 2410 unlock_user_struct(frame, frame_addr, 0); 2411 return -TARGET_QEMU_ESIGRETURN; 2412 2413 badframe: 2414 unlock_user_struct(frame, frame_addr, 0); 2415 force_sig(TARGET_SIGSEGV); 2416 return -TARGET_QEMU_ESIGRETURN; 2417 } 2418 2419 long do_rt_sigreturn(CPUARMState *env) 2420 { 2421 if (get_osversion() >= 0x020612) { 2422 return do_rt_sigreturn_v2(env); 2423 } else { 2424 return do_rt_sigreturn_v1(env); 2425 } 2426 } 2427 2428 #elif defined(TARGET_SPARC) 2429 2430 #define __SUNOS_MAXWIN 31 2431 2432 /* This is what SunOS does, so shall I. */ 2433 struct target_sigcontext { 2434 abi_ulong sigc_onstack; /* state to restore */ 2435 2436 abi_ulong sigc_mask; /* sigmask to restore */ 2437 abi_ulong sigc_sp; /* stack pointer */ 2438 abi_ulong sigc_pc; /* program counter */ 2439 abi_ulong sigc_npc; /* next program counter */ 2440 abi_ulong sigc_psr; /* for condition codes etc */ 2441 abi_ulong sigc_g1; /* User uses these two registers */ 2442 abi_ulong sigc_o0; /* within the trampoline code. */ 2443 2444 /* Now comes information regarding the users window set 2445 * at the time of the signal. 2446 */ 2447 abi_ulong sigc_oswins; /* outstanding windows */ 2448 2449 /* stack ptrs for each regwin buf */ 2450 char *sigc_spbuf[__SUNOS_MAXWIN]; 2451 2452 /* Windows to restore after signal */ 2453 struct { 2454 abi_ulong locals[8]; 2455 abi_ulong ins[8]; 2456 } sigc_wbuf[__SUNOS_MAXWIN]; 2457 }; 2458 /* A Sparc stack frame */ 2459 struct sparc_stackf { 2460 abi_ulong locals[8]; 2461 abi_ulong ins[8]; 2462 /* It's simpler to treat fp and callers_pc as elements of ins[] 2463 * since we never need to access them ourselves. 2464 */ 2465 char *structptr; 2466 abi_ulong xargs[6]; 2467 abi_ulong xxargs[1]; 2468 }; 2469 2470 typedef struct { 2471 struct { 2472 abi_ulong psr; 2473 abi_ulong pc; 2474 abi_ulong npc; 2475 abi_ulong y; 2476 abi_ulong u_regs[16]; /* globals and ins */ 2477 } si_regs; 2478 int si_mask; 2479 } __siginfo_t; 2480 2481 typedef struct { 2482 abi_ulong si_float_regs[32]; 2483 unsigned long si_fsr; 2484 unsigned long si_fpqdepth; 2485 struct { 2486 unsigned long *insn_addr; 2487 unsigned long insn; 2488 } si_fpqueue [16]; 2489 } qemu_siginfo_fpu_t; 2490 2491 2492 struct target_signal_frame { 2493 struct sparc_stackf ss; 2494 __siginfo_t info; 2495 abi_ulong fpu_save; 2496 abi_ulong insns[2] __attribute__ ((aligned (8))); 2497 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2498 abi_ulong extra_size; /* Should be 0 */ 2499 qemu_siginfo_fpu_t fpu_state; 2500 }; 2501 struct target_rt_signal_frame { 2502 struct sparc_stackf ss; 2503 siginfo_t info; 2504 abi_ulong regs[20]; 2505 sigset_t mask; 2506 abi_ulong fpu_save; 2507 unsigned int insns[2]; 2508 stack_t stack; 2509 unsigned int extra_size; /* Should be 0 */ 2510 qemu_siginfo_fpu_t fpu_state; 2511 }; 2512 2513 #define UREG_O0 16 2514 #define UREG_O6 22 2515 #define UREG_I0 0 2516 #define UREG_I1 1 2517 #define UREG_I2 2 2518 #define UREG_I3 3 2519 #define UREG_I4 4 2520 #define UREG_I5 5 2521 #define UREG_I6 6 2522 #define UREG_I7 7 2523 #define UREG_L0 8 2524 #define UREG_FP UREG_I6 2525 #define UREG_SP UREG_O6 2526 2527 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2528 CPUSPARCState *env, 2529 unsigned long framesize) 2530 { 2531 abi_ulong sp; 2532 2533 sp = env->regwptr[UREG_FP]; 2534 2535 /* This is the X/Open sanctioned signal stack switching. */ 2536 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2537 if (!on_sig_stack(sp) 2538 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2539 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2540 } 2541 } 2542 return sp - framesize; 2543 } 2544 2545 static int 2546 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2547 { 2548 int err = 0, i; 2549 2550 __put_user(env->psr, &si->si_regs.psr); 2551 __put_user(env->pc, &si->si_regs.pc); 2552 __put_user(env->npc, &si->si_regs.npc); 2553 __put_user(env->y, &si->si_regs.y); 2554 for (i=0; i < 8; i++) { 2555 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2556 } 2557 for (i=0; i < 8; i++) { 2558 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2559 } 2560 __put_user(mask, &si->si_mask); 2561 return err; 2562 } 2563 2564 #if 0 2565 static int 2566 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2567 CPUSPARCState *env, unsigned long mask) 2568 { 2569 int err = 0; 2570 2571 __put_user(mask, &sc->sigc_mask); 2572 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2573 __put_user(env->pc, &sc->sigc_pc); 2574 __put_user(env->npc, &sc->sigc_npc); 2575 __put_user(env->psr, &sc->sigc_psr); 2576 __put_user(env->gregs[1], &sc->sigc_g1); 2577 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2578 2579 return err; 2580 } 2581 #endif 2582 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2583 2584 static void setup_frame(int sig, struct target_sigaction *ka, 2585 target_sigset_t *set, CPUSPARCState *env) 2586 { 2587 abi_ulong sf_addr; 2588 struct target_signal_frame *sf; 2589 int sigframe_size, err, i; 2590 2591 /* 1. Make sure everything is clean */ 2592 //synchronize_user_stack(); 2593 2594 sigframe_size = NF_ALIGNEDSZ; 2595 sf_addr = get_sigframe(ka, env, sigframe_size); 2596 trace_user_setup_frame(env, sf_addr); 2597 2598 sf = lock_user(VERIFY_WRITE, sf_addr, 2599 sizeof(struct target_signal_frame), 0); 2600 if (!sf) { 2601 goto sigsegv; 2602 } 2603 #if 0 2604 if (invalid_frame_pointer(sf, sigframe_size)) 2605 goto sigill_and_return; 2606 #endif 2607 /* 2. Save the current process state */ 2608 err = setup___siginfo(&sf->info, env, set->sig[0]); 2609 __put_user(0, &sf->extra_size); 2610 2611 //save_fpu_state(regs, &sf->fpu_state); 2612 //__put_user(&sf->fpu_state, &sf->fpu_save); 2613 2614 __put_user(set->sig[0], &sf->info.si_mask); 2615 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2616 __put_user(set->sig[i + 1], &sf->extramask[i]); 2617 } 2618 2619 for (i = 0; i < 8; i++) { 2620 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2621 } 2622 for (i = 0; i < 8; i++) { 2623 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2624 } 2625 if (err) 2626 goto sigsegv; 2627 2628 /* 3. signal handler back-trampoline and parameters */ 2629 env->regwptr[UREG_FP] = sf_addr; 2630 env->regwptr[UREG_I0] = sig; 2631 env->regwptr[UREG_I1] = sf_addr + 2632 offsetof(struct target_signal_frame, info); 2633 env->regwptr[UREG_I2] = sf_addr + 2634 offsetof(struct target_signal_frame, info); 2635 2636 /* 4. signal handler */ 2637 env->pc = ka->_sa_handler; 2638 env->npc = (env->pc + 4); 2639 /* 5. return to kernel instructions */ 2640 if (ka->sa_restorer) { 2641 env->regwptr[UREG_I7] = ka->sa_restorer; 2642 } else { 2643 uint32_t val32; 2644 2645 env->regwptr[UREG_I7] = sf_addr + 2646 offsetof(struct target_signal_frame, insns) - 2 * 4; 2647 2648 /* mov __NR_sigreturn, %g1 */ 2649 val32 = 0x821020d8; 2650 __put_user(val32, &sf->insns[0]); 2651 2652 /* t 0x10 */ 2653 val32 = 0x91d02010; 2654 __put_user(val32, &sf->insns[1]); 2655 if (err) 2656 goto sigsegv; 2657 2658 /* Flush instruction space. */ 2659 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2660 // tb_flush(env); 2661 } 2662 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2663 return; 2664 #if 0 2665 sigill_and_return: 2666 force_sig(TARGET_SIGILL); 2667 #endif 2668 sigsegv: 2669 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2670 force_sigsegv(sig); 2671 } 2672 2673 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2674 target_siginfo_t *info, 2675 target_sigset_t *set, CPUSPARCState *env) 2676 { 2677 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2678 } 2679 2680 long do_sigreturn(CPUSPARCState *env) 2681 { 2682 abi_ulong sf_addr; 2683 struct target_signal_frame *sf; 2684 uint32_t up_psr, pc, npc; 2685 target_sigset_t set; 2686 sigset_t host_set; 2687 int err=0, i; 2688 2689 sf_addr = env->regwptr[UREG_FP]; 2690 trace_user_do_sigreturn(env, sf_addr); 2691 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2692 goto segv_and_exit; 2693 } 2694 2695 /* 1. Make sure we are not getting garbage from the user */ 2696 2697 if (sf_addr & 3) 2698 goto segv_and_exit; 2699 2700 __get_user(pc, &sf->info.si_regs.pc); 2701 __get_user(npc, &sf->info.si_regs.npc); 2702 2703 if ((pc | npc) & 3) { 2704 goto segv_and_exit; 2705 } 2706 2707 /* 2. Restore the state */ 2708 __get_user(up_psr, &sf->info.si_regs.psr); 2709 2710 /* User can only change condition codes and FPU enabling in %psr. */ 2711 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2712 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2713 2714 env->pc = pc; 2715 env->npc = npc; 2716 __get_user(env->y, &sf->info.si_regs.y); 2717 for (i=0; i < 8; i++) { 2718 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2719 } 2720 for (i=0; i < 8; i++) { 2721 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2722 } 2723 2724 /* FIXME: implement FPU save/restore: 2725 * __get_user(fpu_save, &sf->fpu_save); 2726 * if (fpu_save) 2727 * err |= restore_fpu_state(env, fpu_save); 2728 */ 2729 2730 /* This is pretty much atomic, no amount locking would prevent 2731 * the races which exist anyways. 2732 */ 2733 __get_user(set.sig[0], &sf->info.si_mask); 2734 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2735 __get_user(set.sig[i], &sf->extramask[i - 1]); 2736 } 2737 2738 target_to_host_sigset_internal(&host_set, &set); 2739 set_sigmask(&host_set); 2740 2741 if (err) { 2742 goto segv_and_exit; 2743 } 2744 unlock_user_struct(sf, sf_addr, 0); 2745 return -TARGET_QEMU_ESIGRETURN; 2746 2747 segv_and_exit: 2748 unlock_user_struct(sf, sf_addr, 0); 2749 force_sig(TARGET_SIGSEGV); 2750 return -TARGET_QEMU_ESIGRETURN; 2751 } 2752 2753 long do_rt_sigreturn(CPUSPARCState *env) 2754 { 2755 trace_user_do_rt_sigreturn(env, 0); 2756 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2757 return -TARGET_ENOSYS; 2758 } 2759 2760 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2761 #define MC_TSTATE 0 2762 #define MC_PC 1 2763 #define MC_NPC 2 2764 #define MC_Y 3 2765 #define MC_G1 4 2766 #define MC_G2 5 2767 #define MC_G3 6 2768 #define MC_G4 7 2769 #define MC_G5 8 2770 #define MC_G6 9 2771 #define MC_G7 10 2772 #define MC_O0 11 2773 #define MC_O1 12 2774 #define MC_O2 13 2775 #define MC_O3 14 2776 #define MC_O4 15 2777 #define MC_O5 16 2778 #define MC_O6 17 2779 #define MC_O7 18 2780 #define MC_NGREG 19 2781 2782 typedef abi_ulong target_mc_greg_t; 2783 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2784 2785 struct target_mc_fq { 2786 abi_ulong *mcfq_addr; 2787 uint32_t mcfq_insn; 2788 }; 2789 2790 struct target_mc_fpu { 2791 union { 2792 uint32_t sregs[32]; 2793 uint64_t dregs[32]; 2794 //uint128_t qregs[16]; 2795 } mcfpu_fregs; 2796 abi_ulong mcfpu_fsr; 2797 abi_ulong mcfpu_fprs; 2798 abi_ulong mcfpu_gsr; 2799 struct target_mc_fq *mcfpu_fq; 2800 unsigned char mcfpu_qcnt; 2801 unsigned char mcfpu_qentsz; 2802 unsigned char mcfpu_enab; 2803 }; 2804 typedef struct target_mc_fpu target_mc_fpu_t; 2805 2806 typedef struct { 2807 target_mc_gregset_t mc_gregs; 2808 target_mc_greg_t mc_fp; 2809 target_mc_greg_t mc_i7; 2810 target_mc_fpu_t mc_fpregs; 2811 } target_mcontext_t; 2812 2813 struct target_ucontext { 2814 struct target_ucontext *tuc_link; 2815 abi_ulong tuc_flags; 2816 target_sigset_t tuc_sigmask; 2817 target_mcontext_t tuc_mcontext; 2818 }; 2819 2820 /* A V9 register window */ 2821 struct target_reg_window { 2822 abi_ulong locals[8]; 2823 abi_ulong ins[8]; 2824 }; 2825 2826 #define TARGET_STACK_BIAS 2047 2827 2828 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2829 void sparc64_set_context(CPUSPARCState *env) 2830 { 2831 abi_ulong ucp_addr; 2832 struct target_ucontext *ucp; 2833 target_mc_gregset_t *grp; 2834 abi_ulong pc, npc, tstate; 2835 abi_ulong fp, i7, w_addr; 2836 unsigned int i; 2837 2838 ucp_addr = env->regwptr[UREG_I0]; 2839 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2840 goto do_sigsegv; 2841 } 2842 grp = &ucp->tuc_mcontext.mc_gregs; 2843 __get_user(pc, &((*grp)[MC_PC])); 2844 __get_user(npc, &((*grp)[MC_NPC])); 2845 if ((pc | npc) & 3) { 2846 goto do_sigsegv; 2847 } 2848 if (env->regwptr[UREG_I1]) { 2849 target_sigset_t target_set; 2850 sigset_t set; 2851 2852 if (TARGET_NSIG_WORDS == 1) { 2853 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2854 } else { 2855 abi_ulong *src, *dst; 2856 src = ucp->tuc_sigmask.sig; 2857 dst = target_set.sig; 2858 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2859 __get_user(*dst, src); 2860 } 2861 } 2862 target_to_host_sigset_internal(&set, &target_set); 2863 set_sigmask(&set); 2864 } 2865 env->pc = pc; 2866 env->npc = npc; 2867 __get_user(env->y, &((*grp)[MC_Y])); 2868 __get_user(tstate, &((*grp)[MC_TSTATE])); 2869 env->asi = (tstate >> 24) & 0xff; 2870 cpu_put_ccr(env, tstate >> 32); 2871 cpu_put_cwp64(env, tstate & 0x1f); 2872 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2873 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2874 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2875 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2876 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2877 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2878 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2879 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2880 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2881 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2882 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2883 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2884 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2885 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2886 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2887 2888 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2889 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2890 2891 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2892 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2893 abi_ulong) != 0) { 2894 goto do_sigsegv; 2895 } 2896 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2897 abi_ulong) != 0) { 2898 goto do_sigsegv; 2899 } 2900 /* FIXME this does not match how the kernel handles the FPU in 2901 * its sparc64_set_context implementation. In particular the FPU 2902 * is only restored if fenab is non-zero in: 2903 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2904 */ 2905 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2906 { 2907 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2908 for (i = 0; i < 64; i++, src++) { 2909 if (i & 1) { 2910 __get_user(env->fpr[i/2].l.lower, src); 2911 } else { 2912 __get_user(env->fpr[i/2].l.upper, src); 2913 } 2914 } 2915 } 2916 __get_user(env->fsr, 2917 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2918 __get_user(env->gsr, 2919 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2920 unlock_user_struct(ucp, ucp_addr, 0); 2921 return; 2922 do_sigsegv: 2923 unlock_user_struct(ucp, ucp_addr, 0); 2924 force_sig(TARGET_SIGSEGV); 2925 } 2926 2927 void sparc64_get_context(CPUSPARCState *env) 2928 { 2929 abi_ulong ucp_addr; 2930 struct target_ucontext *ucp; 2931 target_mc_gregset_t *grp; 2932 target_mcontext_t *mcp; 2933 abi_ulong fp, i7, w_addr; 2934 int err; 2935 unsigned int i; 2936 target_sigset_t target_set; 2937 sigset_t set; 2938 2939 ucp_addr = env->regwptr[UREG_I0]; 2940 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2941 goto do_sigsegv; 2942 } 2943 2944 mcp = &ucp->tuc_mcontext; 2945 grp = &mcp->mc_gregs; 2946 2947 /* Skip over the trap instruction, first. */ 2948 env->pc = env->npc; 2949 env->npc += 4; 2950 2951 /* If we're only reading the signal mask then do_sigprocmask() 2952 * is guaranteed not to fail, which is important because we don't 2953 * have any way to signal a failure or restart this operation since 2954 * this is not a normal syscall. 2955 */ 2956 err = do_sigprocmask(0, NULL, &set); 2957 assert(err == 0); 2958 host_to_target_sigset_internal(&target_set, &set); 2959 if (TARGET_NSIG_WORDS == 1) { 2960 __put_user(target_set.sig[0], 2961 (abi_ulong *)&ucp->tuc_sigmask); 2962 } else { 2963 abi_ulong *src, *dst; 2964 src = target_set.sig; 2965 dst = ucp->tuc_sigmask.sig; 2966 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2967 __put_user(*src, dst); 2968 } 2969 if (err) 2970 goto do_sigsegv; 2971 } 2972 2973 /* XXX: tstate must be saved properly */ 2974 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2975 __put_user(env->pc, &((*grp)[MC_PC])); 2976 __put_user(env->npc, &((*grp)[MC_NPC])); 2977 __put_user(env->y, &((*grp)[MC_Y])); 2978 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2979 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2980 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2981 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2982 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2983 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2984 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2985 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2986 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2987 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2988 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2989 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2990 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2991 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2992 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2993 2994 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2995 fp = i7 = 0; 2996 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2997 abi_ulong) != 0) { 2998 goto do_sigsegv; 2999 } 3000 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 3001 abi_ulong) != 0) { 3002 goto do_sigsegv; 3003 } 3004 __put_user(fp, &(mcp->mc_fp)); 3005 __put_user(i7, &(mcp->mc_i7)); 3006 3007 { 3008 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 3009 for (i = 0; i < 64; i++, dst++) { 3010 if (i & 1) { 3011 __put_user(env->fpr[i/2].l.lower, dst); 3012 } else { 3013 __put_user(env->fpr[i/2].l.upper, dst); 3014 } 3015 } 3016 } 3017 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 3018 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 3019 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 3020 3021 if (err) 3022 goto do_sigsegv; 3023 unlock_user_struct(ucp, ucp_addr, 1); 3024 return; 3025 do_sigsegv: 3026 unlock_user_struct(ucp, ucp_addr, 1); 3027 force_sig(TARGET_SIGSEGV); 3028 } 3029 #endif 3030 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 3031 3032 # if defined(TARGET_ABI_MIPSO32) 3033 struct target_sigcontext { 3034 uint32_t sc_regmask; /* Unused */ 3035 uint32_t sc_status; 3036 uint64_t sc_pc; 3037 uint64_t sc_regs[32]; 3038 uint64_t sc_fpregs[32]; 3039 uint32_t sc_ownedfp; /* Unused */ 3040 uint32_t sc_fpc_csr; 3041 uint32_t sc_fpc_eir; /* Unused */ 3042 uint32_t sc_used_math; 3043 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 3044 uint32_t pad0; 3045 uint64_t sc_mdhi; 3046 uint64_t sc_mdlo; 3047 target_ulong sc_hi1; /* Was sc_cause */ 3048 target_ulong sc_lo1; /* Was sc_badvaddr */ 3049 target_ulong sc_hi2; /* Was sc_sigset[4] */ 3050 target_ulong sc_lo2; 3051 target_ulong sc_hi3; 3052 target_ulong sc_lo3; 3053 }; 3054 # else /* N32 || N64 */ 3055 struct target_sigcontext { 3056 uint64_t sc_regs[32]; 3057 uint64_t sc_fpregs[32]; 3058 uint64_t sc_mdhi; 3059 uint64_t sc_hi1; 3060 uint64_t sc_hi2; 3061 uint64_t sc_hi3; 3062 uint64_t sc_mdlo; 3063 uint64_t sc_lo1; 3064 uint64_t sc_lo2; 3065 uint64_t sc_lo3; 3066 uint64_t sc_pc; 3067 uint32_t sc_fpc_csr; 3068 uint32_t sc_used_math; 3069 uint32_t sc_dsp; 3070 uint32_t sc_reserved; 3071 }; 3072 # endif /* O32 */ 3073 3074 struct sigframe { 3075 uint32_t sf_ass[4]; /* argument save space for o32 */ 3076 uint32_t sf_code[2]; /* signal trampoline */ 3077 struct target_sigcontext sf_sc; 3078 target_sigset_t sf_mask; 3079 }; 3080 3081 struct target_ucontext { 3082 target_ulong tuc_flags; 3083 target_ulong tuc_link; 3084 target_stack_t tuc_stack; 3085 target_ulong pad0; 3086 struct target_sigcontext tuc_mcontext; 3087 target_sigset_t tuc_sigmask; 3088 }; 3089 3090 struct target_rt_sigframe { 3091 uint32_t rs_ass[4]; /* argument save space for o32 */ 3092 uint32_t rs_code[2]; /* signal trampoline */ 3093 struct target_siginfo rs_info; 3094 struct target_ucontext rs_uc; 3095 }; 3096 3097 /* Install trampoline to jump back from signal handler */ 3098 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 3099 { 3100 int err = 0; 3101 3102 /* 3103 * Set up the return code ... 3104 * 3105 * li v0, __NR__foo_sigreturn 3106 * syscall 3107 */ 3108 3109 __put_user(0x24020000 + syscall, tramp + 0); 3110 __put_user(0x0000000c , tramp + 1); 3111 return err; 3112 } 3113 3114 static inline void setup_sigcontext(CPUMIPSState *regs, 3115 struct target_sigcontext *sc) 3116 { 3117 int i; 3118 3119 __put_user(exception_resume_pc(regs), &sc->sc_pc); 3120 regs->hflags &= ~MIPS_HFLAG_BMASK; 3121 3122 __put_user(0, &sc->sc_regs[0]); 3123 for (i = 1; i < 32; ++i) { 3124 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 3125 } 3126 3127 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 3128 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 3129 3130 /* Rather than checking for dsp existence, always copy. The storage 3131 would just be garbage otherwise. */ 3132 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 3133 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 3134 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 3135 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 3136 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 3137 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 3138 { 3139 uint32_t dsp = cpu_rddsp(0x3ff, regs); 3140 __put_user(dsp, &sc->sc_dsp); 3141 } 3142 3143 __put_user(1, &sc->sc_used_math); 3144 3145 for (i = 0; i < 32; ++i) { 3146 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 3147 } 3148 } 3149 3150 static inline void 3151 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 3152 { 3153 int i; 3154 3155 __get_user(regs->CP0_EPC, &sc->sc_pc); 3156 3157 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 3158 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 3159 3160 for (i = 1; i < 32; ++i) { 3161 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 3162 } 3163 3164 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 3165 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 3166 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 3167 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 3168 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 3169 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 3170 { 3171 uint32_t dsp; 3172 __get_user(dsp, &sc->sc_dsp); 3173 cpu_wrdsp(dsp, 0x3ff, regs); 3174 } 3175 3176 for (i = 0; i < 32; ++i) { 3177 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 3178 } 3179 } 3180 3181 /* 3182 * Determine which stack to use.. 3183 */ 3184 static inline abi_ulong 3185 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 3186 { 3187 unsigned long sp; 3188 3189 /* Default to using normal stack */ 3190 sp = regs->active_tc.gpr[29]; 3191 3192 /* 3193 * FPU emulator may have its own trampoline active just 3194 * above the user stack, 16-bytes before the next lowest 3195 * 16 byte boundary. Try to avoid trashing it. 3196 */ 3197 sp -= 32; 3198 3199 /* This is the X/Open sanctioned signal stack switching. */ 3200 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 3201 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3202 } 3203 3204 return (sp - frame_size) & ~7; 3205 } 3206 3207 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 3208 { 3209 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 3210 env->hflags &= ~MIPS_HFLAG_M16; 3211 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 3212 env->active_tc.PC &= ~(target_ulong) 1; 3213 } 3214 } 3215 3216 # if defined(TARGET_ABI_MIPSO32) 3217 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 3218 static void setup_frame(int sig, struct target_sigaction * ka, 3219 target_sigset_t *set, CPUMIPSState *regs) 3220 { 3221 struct sigframe *frame; 3222 abi_ulong frame_addr; 3223 int i; 3224 3225 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 3226 trace_user_setup_frame(regs, frame_addr); 3227 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3228 goto give_sigsegv; 3229 } 3230 3231 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3232 3233 setup_sigcontext(regs, &frame->sf_sc); 3234 3235 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3236 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3237 } 3238 3239 /* 3240 * Arguments to signal handler: 3241 * 3242 * a0 = signal number 3243 * a1 = 0 (should be cause) 3244 * a2 = pointer to struct sigcontext 3245 * 3246 * $25 and PC point to the signal handler, $29 points to the 3247 * struct sigframe. 3248 */ 3249 regs->active_tc.gpr[ 4] = sig; 3250 regs->active_tc.gpr[ 5] = 0; 3251 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3252 regs->active_tc.gpr[29] = frame_addr; 3253 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3254 /* The original kernel code sets CP0_EPC to the handler 3255 * since it returns to userland using eret 3256 * we cannot do this here, and we must set PC directly */ 3257 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3258 mips_set_hflags_isa_mode_from_pc(regs); 3259 unlock_user_struct(frame, frame_addr, 1); 3260 return; 3261 3262 give_sigsegv: 3263 force_sigsegv(sig); 3264 } 3265 3266 long do_sigreturn(CPUMIPSState *regs) 3267 { 3268 struct sigframe *frame; 3269 abi_ulong frame_addr; 3270 sigset_t blocked; 3271 target_sigset_t target_set; 3272 int i; 3273 3274 frame_addr = regs->active_tc.gpr[29]; 3275 trace_user_do_sigreturn(regs, frame_addr); 3276 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3277 goto badframe; 3278 3279 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3280 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3281 } 3282 3283 target_to_host_sigset_internal(&blocked, &target_set); 3284 set_sigmask(&blocked); 3285 3286 restore_sigcontext(regs, &frame->sf_sc); 3287 3288 #if 0 3289 /* 3290 * Don't let your children do this ... 3291 */ 3292 __asm__ __volatile__( 3293 "move\t$29, %0\n\t" 3294 "j\tsyscall_exit" 3295 :/* no outputs */ 3296 :"r" (®s)); 3297 /* Unreached */ 3298 #endif 3299 3300 regs->active_tc.PC = regs->CP0_EPC; 3301 mips_set_hflags_isa_mode_from_pc(regs); 3302 /* I am not sure this is right, but it seems to work 3303 * maybe a problem with nested signals ? */ 3304 regs->CP0_EPC = 0; 3305 return -TARGET_QEMU_ESIGRETURN; 3306 3307 badframe: 3308 force_sig(TARGET_SIGSEGV); 3309 return -TARGET_QEMU_ESIGRETURN; 3310 } 3311 # endif /* O32 */ 3312 3313 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3314 target_siginfo_t *info, 3315 target_sigset_t *set, CPUMIPSState *env) 3316 { 3317 struct target_rt_sigframe *frame; 3318 abi_ulong frame_addr; 3319 int i; 3320 3321 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3322 trace_user_setup_rt_frame(env, frame_addr); 3323 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3324 goto give_sigsegv; 3325 } 3326 3327 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3328 3329 tswap_siginfo(&frame->rs_info, info); 3330 3331 __put_user(0, &frame->rs_uc.tuc_flags); 3332 __put_user(0, &frame->rs_uc.tuc_link); 3333 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3334 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3335 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3336 &frame->rs_uc.tuc_stack.ss_flags); 3337 3338 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3339 3340 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3341 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3342 } 3343 3344 /* 3345 * Arguments to signal handler: 3346 * 3347 * a0 = signal number 3348 * a1 = pointer to siginfo_t 3349 * a2 = pointer to ucontext_t 3350 * 3351 * $25 and PC point to the signal handler, $29 points to the 3352 * struct sigframe. 3353 */ 3354 env->active_tc.gpr[ 4] = sig; 3355 env->active_tc.gpr[ 5] = frame_addr 3356 + offsetof(struct target_rt_sigframe, rs_info); 3357 env->active_tc.gpr[ 6] = frame_addr 3358 + offsetof(struct target_rt_sigframe, rs_uc); 3359 env->active_tc.gpr[29] = frame_addr; 3360 env->active_tc.gpr[31] = frame_addr 3361 + offsetof(struct target_rt_sigframe, rs_code); 3362 /* The original kernel code sets CP0_EPC to the handler 3363 * since it returns to userland using eret 3364 * we cannot do this here, and we must set PC directly */ 3365 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3366 mips_set_hflags_isa_mode_from_pc(env); 3367 unlock_user_struct(frame, frame_addr, 1); 3368 return; 3369 3370 give_sigsegv: 3371 unlock_user_struct(frame, frame_addr, 1); 3372 force_sigsegv(sig); 3373 } 3374 3375 long do_rt_sigreturn(CPUMIPSState *env) 3376 { 3377 struct target_rt_sigframe *frame; 3378 abi_ulong frame_addr; 3379 sigset_t blocked; 3380 3381 frame_addr = env->active_tc.gpr[29]; 3382 trace_user_do_rt_sigreturn(env, frame_addr); 3383 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3384 goto badframe; 3385 } 3386 3387 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3388 set_sigmask(&blocked); 3389 3390 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3391 3392 if (do_sigaltstack(frame_addr + 3393 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3394 0, get_sp_from_cpustate(env)) == -EFAULT) 3395 goto badframe; 3396 3397 env->active_tc.PC = env->CP0_EPC; 3398 mips_set_hflags_isa_mode_from_pc(env); 3399 /* I am not sure this is right, but it seems to work 3400 * maybe a problem with nested signals ? */ 3401 env->CP0_EPC = 0; 3402 return -TARGET_QEMU_ESIGRETURN; 3403 3404 badframe: 3405 force_sig(TARGET_SIGSEGV); 3406 return -TARGET_QEMU_ESIGRETURN; 3407 } 3408 3409 #elif defined(TARGET_SH4) 3410 3411 /* 3412 * code and data structures from linux kernel: 3413 * include/asm-sh/sigcontext.h 3414 * arch/sh/kernel/signal.c 3415 */ 3416 3417 struct target_sigcontext { 3418 target_ulong oldmask; 3419 3420 /* CPU registers */ 3421 target_ulong sc_gregs[16]; 3422 target_ulong sc_pc; 3423 target_ulong sc_pr; 3424 target_ulong sc_sr; 3425 target_ulong sc_gbr; 3426 target_ulong sc_mach; 3427 target_ulong sc_macl; 3428 3429 /* FPU registers */ 3430 target_ulong sc_fpregs[16]; 3431 target_ulong sc_xfpregs[16]; 3432 unsigned int sc_fpscr; 3433 unsigned int sc_fpul; 3434 unsigned int sc_ownedfp; 3435 }; 3436 3437 struct target_sigframe 3438 { 3439 struct target_sigcontext sc; 3440 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3441 uint16_t retcode[3]; 3442 }; 3443 3444 3445 struct target_ucontext { 3446 target_ulong tuc_flags; 3447 struct target_ucontext *tuc_link; 3448 target_stack_t tuc_stack; 3449 struct target_sigcontext tuc_mcontext; 3450 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3451 }; 3452 3453 struct target_rt_sigframe 3454 { 3455 struct target_siginfo info; 3456 struct target_ucontext uc; 3457 uint16_t retcode[3]; 3458 }; 3459 3460 3461 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3462 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3463 3464 static abi_ulong get_sigframe(struct target_sigaction *ka, 3465 unsigned long sp, size_t frame_size) 3466 { 3467 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3468 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3469 } 3470 3471 return (sp - frame_size) & -8ul; 3472 } 3473 3474 /* Notice when we're in the middle of a gUSA region and reset. 3475 Note that this will only occur for !parallel_cpus, as we will 3476 translate such sequences differently in a parallel context. */ 3477 static void unwind_gusa(CPUSH4State *regs) 3478 { 3479 /* If the stack pointer is sufficiently negative, and we haven't 3480 completed the sequence, then reset to the entry to the region. */ 3481 /* ??? The SH4 kernel checks for and address above 0xC0000000. 3482 However, the page mappings in qemu linux-user aren't as restricted 3483 and we wind up with the normal stack mapped above 0xF0000000. 3484 That said, there is no reason why the kernel should be allowing 3485 a gUSA region that spans 1GB. Use a tighter check here, for what 3486 can actually be enabled by the immediate move. */ 3487 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) { 3488 /* Reset the PC to before the gUSA region, as computed from 3489 R0 = region end, SP = -(region size), plus one more for the 3490 insn that actually initializes SP to the region size. */ 3491 regs->pc = regs->gregs[0] + regs->gregs[15] - 2; 3492 3493 /* Reset the SP to the saved version in R1. */ 3494 regs->gregs[15] = regs->gregs[1]; 3495 } 3496 } 3497 3498 static void setup_sigcontext(struct target_sigcontext *sc, 3499 CPUSH4State *regs, unsigned long mask) 3500 { 3501 int i; 3502 3503 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3504 COPY(gregs[0]); COPY(gregs[1]); 3505 COPY(gregs[2]); COPY(gregs[3]); 3506 COPY(gregs[4]); COPY(gregs[5]); 3507 COPY(gregs[6]); COPY(gregs[7]); 3508 COPY(gregs[8]); COPY(gregs[9]); 3509 COPY(gregs[10]); COPY(gregs[11]); 3510 COPY(gregs[12]); COPY(gregs[13]); 3511 COPY(gregs[14]); COPY(gregs[15]); 3512 COPY(gbr); COPY(mach); 3513 COPY(macl); COPY(pr); 3514 COPY(sr); COPY(pc); 3515 #undef COPY 3516 3517 for (i=0; i<16; i++) { 3518 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3519 } 3520 __put_user(regs->fpscr, &sc->sc_fpscr); 3521 __put_user(regs->fpul, &sc->sc_fpul); 3522 3523 /* non-iBCS2 extensions.. */ 3524 __put_user(mask, &sc->oldmask); 3525 } 3526 3527 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3528 { 3529 int i; 3530 3531 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3532 COPY(gregs[0]); COPY(gregs[1]); 3533 COPY(gregs[2]); COPY(gregs[3]); 3534 COPY(gregs[4]); COPY(gregs[5]); 3535 COPY(gregs[6]); COPY(gregs[7]); 3536 COPY(gregs[8]); COPY(gregs[9]); 3537 COPY(gregs[10]); COPY(gregs[11]); 3538 COPY(gregs[12]); COPY(gregs[13]); 3539 COPY(gregs[14]); COPY(gregs[15]); 3540 COPY(gbr); COPY(mach); 3541 COPY(macl); COPY(pr); 3542 COPY(sr); COPY(pc); 3543 #undef COPY 3544 3545 for (i=0; i<16; i++) { 3546 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3547 } 3548 __get_user(regs->fpscr, &sc->sc_fpscr); 3549 __get_user(regs->fpul, &sc->sc_fpul); 3550 3551 regs->tra = -1; /* disable syscall checks */ 3552 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); 3553 } 3554 3555 static void setup_frame(int sig, struct target_sigaction *ka, 3556 target_sigset_t *set, CPUSH4State *regs) 3557 { 3558 struct target_sigframe *frame; 3559 abi_ulong frame_addr; 3560 int i; 3561 3562 unwind_gusa(regs); 3563 3564 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3565 trace_user_setup_frame(regs, frame_addr); 3566 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3567 goto give_sigsegv; 3568 } 3569 3570 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3571 3572 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3573 __put_user(set->sig[i + 1], &frame->extramask[i]); 3574 } 3575 3576 /* Set up to return from userspace. If provided, use a stub 3577 already in userspace. */ 3578 if (ka->sa_flags & TARGET_SA_RESTORER) { 3579 regs->pr = (unsigned long) ka->sa_restorer; 3580 } else { 3581 /* Generate return code (system call to sigreturn) */ 3582 abi_ulong retcode_addr = frame_addr + 3583 offsetof(struct target_sigframe, retcode); 3584 __put_user(MOVW(2), &frame->retcode[0]); 3585 __put_user(TRAP_NOARG, &frame->retcode[1]); 3586 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3587 regs->pr = (unsigned long) retcode_addr; 3588 } 3589 3590 /* Set up registers for signal handler */ 3591 regs->gregs[15] = frame_addr; 3592 regs->gregs[4] = sig; /* Arg for signal handler */ 3593 regs->gregs[5] = 0; 3594 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3595 regs->pc = (unsigned long) ka->_sa_handler; 3596 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); 3597 3598 unlock_user_struct(frame, frame_addr, 1); 3599 return; 3600 3601 give_sigsegv: 3602 unlock_user_struct(frame, frame_addr, 1); 3603 force_sigsegv(sig); 3604 } 3605 3606 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3607 target_siginfo_t *info, 3608 target_sigset_t *set, CPUSH4State *regs) 3609 { 3610 struct target_rt_sigframe *frame; 3611 abi_ulong frame_addr; 3612 int i; 3613 3614 unwind_gusa(regs); 3615 3616 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3617 trace_user_setup_rt_frame(regs, frame_addr); 3618 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3619 goto give_sigsegv; 3620 } 3621 3622 tswap_siginfo(&frame->info, info); 3623 3624 /* Create the ucontext. */ 3625 __put_user(0, &frame->uc.tuc_flags); 3626 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3627 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3628 &frame->uc.tuc_stack.ss_sp); 3629 __put_user(sas_ss_flags(regs->gregs[15]), 3630 &frame->uc.tuc_stack.ss_flags); 3631 __put_user(target_sigaltstack_used.ss_size, 3632 &frame->uc.tuc_stack.ss_size); 3633 setup_sigcontext(&frame->uc.tuc_mcontext, 3634 regs, set->sig[0]); 3635 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3636 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3637 } 3638 3639 /* Set up to return from userspace. If provided, use a stub 3640 already in userspace. */ 3641 if (ka->sa_flags & TARGET_SA_RESTORER) { 3642 regs->pr = (unsigned long) ka->sa_restorer; 3643 } else { 3644 /* Generate return code (system call to sigreturn) */ 3645 abi_ulong retcode_addr = frame_addr + 3646 offsetof(struct target_rt_sigframe, retcode); 3647 __put_user(MOVW(2), &frame->retcode[0]); 3648 __put_user(TRAP_NOARG, &frame->retcode[1]); 3649 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3650 regs->pr = (unsigned long) retcode_addr; 3651 } 3652 3653 /* Set up registers for signal handler */ 3654 regs->gregs[15] = frame_addr; 3655 regs->gregs[4] = sig; /* Arg for signal handler */ 3656 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3657 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3658 regs->pc = (unsigned long) ka->_sa_handler; 3659 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); 3660 3661 unlock_user_struct(frame, frame_addr, 1); 3662 return; 3663 3664 give_sigsegv: 3665 unlock_user_struct(frame, frame_addr, 1); 3666 force_sigsegv(sig); 3667 } 3668 3669 long do_sigreturn(CPUSH4State *regs) 3670 { 3671 struct target_sigframe *frame; 3672 abi_ulong frame_addr; 3673 sigset_t blocked; 3674 target_sigset_t target_set; 3675 int i; 3676 int err = 0; 3677 3678 frame_addr = regs->gregs[15]; 3679 trace_user_do_sigreturn(regs, frame_addr); 3680 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3681 goto badframe; 3682 } 3683 3684 __get_user(target_set.sig[0], &frame->sc.oldmask); 3685 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3686 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3687 } 3688 3689 if (err) 3690 goto badframe; 3691 3692 target_to_host_sigset_internal(&blocked, &target_set); 3693 set_sigmask(&blocked); 3694 3695 restore_sigcontext(regs, &frame->sc); 3696 3697 unlock_user_struct(frame, frame_addr, 0); 3698 return -TARGET_QEMU_ESIGRETURN; 3699 3700 badframe: 3701 unlock_user_struct(frame, frame_addr, 0); 3702 force_sig(TARGET_SIGSEGV); 3703 return -TARGET_QEMU_ESIGRETURN; 3704 } 3705 3706 long do_rt_sigreturn(CPUSH4State *regs) 3707 { 3708 struct target_rt_sigframe *frame; 3709 abi_ulong frame_addr; 3710 sigset_t blocked; 3711 3712 frame_addr = regs->gregs[15]; 3713 trace_user_do_rt_sigreturn(regs, frame_addr); 3714 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3715 goto badframe; 3716 } 3717 3718 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3719 set_sigmask(&blocked); 3720 3721 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3722 3723 if (do_sigaltstack(frame_addr + 3724 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3725 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3726 goto badframe; 3727 } 3728 3729 unlock_user_struct(frame, frame_addr, 0); 3730 return -TARGET_QEMU_ESIGRETURN; 3731 3732 badframe: 3733 unlock_user_struct(frame, frame_addr, 0); 3734 force_sig(TARGET_SIGSEGV); 3735 return -TARGET_QEMU_ESIGRETURN; 3736 } 3737 #elif defined(TARGET_MICROBLAZE) 3738 3739 struct target_sigcontext { 3740 struct target_pt_regs regs; /* needs to be first */ 3741 uint32_t oldmask; 3742 }; 3743 3744 struct target_stack_t { 3745 abi_ulong ss_sp; 3746 int ss_flags; 3747 unsigned int ss_size; 3748 }; 3749 3750 struct target_ucontext { 3751 abi_ulong tuc_flags; 3752 abi_ulong tuc_link; 3753 struct target_stack_t tuc_stack; 3754 struct target_sigcontext tuc_mcontext; 3755 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3756 }; 3757 3758 /* Signal frames. */ 3759 struct target_signal_frame { 3760 struct target_ucontext uc; 3761 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3762 uint32_t tramp[2]; 3763 }; 3764 3765 struct rt_signal_frame { 3766 siginfo_t info; 3767 ucontext_t uc; 3768 uint32_t tramp[2]; 3769 }; 3770 3771 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3772 { 3773 __put_user(env->regs[0], &sc->regs.r0); 3774 __put_user(env->regs[1], &sc->regs.r1); 3775 __put_user(env->regs[2], &sc->regs.r2); 3776 __put_user(env->regs[3], &sc->regs.r3); 3777 __put_user(env->regs[4], &sc->regs.r4); 3778 __put_user(env->regs[5], &sc->regs.r5); 3779 __put_user(env->regs[6], &sc->regs.r6); 3780 __put_user(env->regs[7], &sc->regs.r7); 3781 __put_user(env->regs[8], &sc->regs.r8); 3782 __put_user(env->regs[9], &sc->regs.r9); 3783 __put_user(env->regs[10], &sc->regs.r10); 3784 __put_user(env->regs[11], &sc->regs.r11); 3785 __put_user(env->regs[12], &sc->regs.r12); 3786 __put_user(env->regs[13], &sc->regs.r13); 3787 __put_user(env->regs[14], &sc->regs.r14); 3788 __put_user(env->regs[15], &sc->regs.r15); 3789 __put_user(env->regs[16], &sc->regs.r16); 3790 __put_user(env->regs[17], &sc->regs.r17); 3791 __put_user(env->regs[18], &sc->regs.r18); 3792 __put_user(env->regs[19], &sc->regs.r19); 3793 __put_user(env->regs[20], &sc->regs.r20); 3794 __put_user(env->regs[21], &sc->regs.r21); 3795 __put_user(env->regs[22], &sc->regs.r22); 3796 __put_user(env->regs[23], &sc->regs.r23); 3797 __put_user(env->regs[24], &sc->regs.r24); 3798 __put_user(env->regs[25], &sc->regs.r25); 3799 __put_user(env->regs[26], &sc->regs.r26); 3800 __put_user(env->regs[27], &sc->regs.r27); 3801 __put_user(env->regs[28], &sc->regs.r28); 3802 __put_user(env->regs[29], &sc->regs.r29); 3803 __put_user(env->regs[30], &sc->regs.r30); 3804 __put_user(env->regs[31], &sc->regs.r31); 3805 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3806 } 3807 3808 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3809 { 3810 __get_user(env->regs[0], &sc->regs.r0); 3811 __get_user(env->regs[1], &sc->regs.r1); 3812 __get_user(env->regs[2], &sc->regs.r2); 3813 __get_user(env->regs[3], &sc->regs.r3); 3814 __get_user(env->regs[4], &sc->regs.r4); 3815 __get_user(env->regs[5], &sc->regs.r5); 3816 __get_user(env->regs[6], &sc->regs.r6); 3817 __get_user(env->regs[7], &sc->regs.r7); 3818 __get_user(env->regs[8], &sc->regs.r8); 3819 __get_user(env->regs[9], &sc->regs.r9); 3820 __get_user(env->regs[10], &sc->regs.r10); 3821 __get_user(env->regs[11], &sc->regs.r11); 3822 __get_user(env->regs[12], &sc->regs.r12); 3823 __get_user(env->regs[13], &sc->regs.r13); 3824 __get_user(env->regs[14], &sc->regs.r14); 3825 __get_user(env->regs[15], &sc->regs.r15); 3826 __get_user(env->regs[16], &sc->regs.r16); 3827 __get_user(env->regs[17], &sc->regs.r17); 3828 __get_user(env->regs[18], &sc->regs.r18); 3829 __get_user(env->regs[19], &sc->regs.r19); 3830 __get_user(env->regs[20], &sc->regs.r20); 3831 __get_user(env->regs[21], &sc->regs.r21); 3832 __get_user(env->regs[22], &sc->regs.r22); 3833 __get_user(env->regs[23], &sc->regs.r23); 3834 __get_user(env->regs[24], &sc->regs.r24); 3835 __get_user(env->regs[25], &sc->regs.r25); 3836 __get_user(env->regs[26], &sc->regs.r26); 3837 __get_user(env->regs[27], &sc->regs.r27); 3838 __get_user(env->regs[28], &sc->regs.r28); 3839 __get_user(env->regs[29], &sc->regs.r29); 3840 __get_user(env->regs[30], &sc->regs.r30); 3841 __get_user(env->regs[31], &sc->regs.r31); 3842 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3843 } 3844 3845 static abi_ulong get_sigframe(struct target_sigaction *ka, 3846 CPUMBState *env, int frame_size) 3847 { 3848 abi_ulong sp = env->regs[1]; 3849 3850 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3851 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3852 } 3853 3854 return ((sp - frame_size) & -8UL); 3855 } 3856 3857 static void setup_frame(int sig, struct target_sigaction *ka, 3858 target_sigset_t *set, CPUMBState *env) 3859 { 3860 struct target_signal_frame *frame; 3861 abi_ulong frame_addr; 3862 int i; 3863 3864 frame_addr = get_sigframe(ka, env, sizeof *frame); 3865 trace_user_setup_frame(env, frame_addr); 3866 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3867 goto badframe; 3868 3869 /* Save the mask. */ 3870 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3871 3872 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3873 __put_user(set->sig[i], &frame->extramask[i - 1]); 3874 } 3875 3876 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3877 3878 /* Set up to return from userspace. If provided, use a stub 3879 already in userspace. */ 3880 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3881 if (ka->sa_flags & TARGET_SA_RESTORER) { 3882 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3883 } else { 3884 uint32_t t; 3885 /* Note, these encodings are _big endian_! */ 3886 /* addi r12, r0, __NR_sigreturn */ 3887 t = 0x31800000UL | TARGET_NR_sigreturn; 3888 __put_user(t, frame->tramp + 0); 3889 /* brki r14, 0x8 */ 3890 t = 0xb9cc0008UL; 3891 __put_user(t, frame->tramp + 1); 3892 3893 /* Return from sighandler will jump to the tramp. 3894 Negative 8 offset because return is rtsd r15, 8 */ 3895 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3896 - 8; 3897 } 3898 3899 /* Set up registers for signal handler */ 3900 env->regs[1] = frame_addr; 3901 /* Signal handler args: */ 3902 env->regs[5] = sig; /* Arg 0: signum */ 3903 env->regs[6] = 0; 3904 /* arg 1: sigcontext */ 3905 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3906 3907 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3908 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3909 3910 unlock_user_struct(frame, frame_addr, 1); 3911 return; 3912 badframe: 3913 force_sigsegv(sig); 3914 } 3915 3916 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3917 target_siginfo_t *info, 3918 target_sigset_t *set, CPUMBState *env) 3919 { 3920 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3921 } 3922 3923 long do_sigreturn(CPUMBState *env) 3924 { 3925 struct target_signal_frame *frame; 3926 abi_ulong frame_addr; 3927 target_sigset_t target_set; 3928 sigset_t set; 3929 int i; 3930 3931 frame_addr = env->regs[R_SP]; 3932 trace_user_do_sigreturn(env, frame_addr); 3933 /* Make sure the guest isn't playing games. */ 3934 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3935 goto badframe; 3936 3937 /* Restore blocked signals */ 3938 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3939 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3940 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3941 } 3942 target_to_host_sigset_internal(&set, &target_set); 3943 set_sigmask(&set); 3944 3945 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3946 /* We got here through a sigreturn syscall, our path back is via an 3947 rtb insn so setup r14 for that. */ 3948 env->regs[14] = env->sregs[SR_PC]; 3949 3950 unlock_user_struct(frame, frame_addr, 0); 3951 return -TARGET_QEMU_ESIGRETURN; 3952 badframe: 3953 force_sig(TARGET_SIGSEGV); 3954 return -TARGET_QEMU_ESIGRETURN; 3955 } 3956 3957 long do_rt_sigreturn(CPUMBState *env) 3958 { 3959 trace_user_do_rt_sigreturn(env, 0); 3960 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3961 return -TARGET_ENOSYS; 3962 } 3963 3964 #elif defined(TARGET_CRIS) 3965 3966 struct target_sigcontext { 3967 struct target_pt_regs regs; /* needs to be first */ 3968 uint32_t oldmask; 3969 uint32_t usp; /* usp before stacking this gunk on it */ 3970 }; 3971 3972 /* Signal frames. */ 3973 struct target_signal_frame { 3974 struct target_sigcontext sc; 3975 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3976 uint16_t retcode[4]; /* Trampoline code. */ 3977 }; 3978 3979 struct rt_signal_frame { 3980 siginfo_t *pinfo; 3981 void *puc; 3982 siginfo_t info; 3983 ucontext_t uc; 3984 uint16_t retcode[4]; /* Trampoline code. */ 3985 }; 3986 3987 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3988 { 3989 __put_user(env->regs[0], &sc->regs.r0); 3990 __put_user(env->regs[1], &sc->regs.r1); 3991 __put_user(env->regs[2], &sc->regs.r2); 3992 __put_user(env->regs[3], &sc->regs.r3); 3993 __put_user(env->regs[4], &sc->regs.r4); 3994 __put_user(env->regs[5], &sc->regs.r5); 3995 __put_user(env->regs[6], &sc->regs.r6); 3996 __put_user(env->regs[7], &sc->regs.r7); 3997 __put_user(env->regs[8], &sc->regs.r8); 3998 __put_user(env->regs[9], &sc->regs.r9); 3999 __put_user(env->regs[10], &sc->regs.r10); 4000 __put_user(env->regs[11], &sc->regs.r11); 4001 __put_user(env->regs[12], &sc->regs.r12); 4002 __put_user(env->regs[13], &sc->regs.r13); 4003 __put_user(env->regs[14], &sc->usp); 4004 __put_user(env->regs[15], &sc->regs.acr); 4005 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 4006 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 4007 __put_user(env->pc, &sc->regs.erp); 4008 } 4009 4010 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 4011 { 4012 __get_user(env->regs[0], &sc->regs.r0); 4013 __get_user(env->regs[1], &sc->regs.r1); 4014 __get_user(env->regs[2], &sc->regs.r2); 4015 __get_user(env->regs[3], &sc->regs.r3); 4016 __get_user(env->regs[4], &sc->regs.r4); 4017 __get_user(env->regs[5], &sc->regs.r5); 4018 __get_user(env->regs[6], &sc->regs.r6); 4019 __get_user(env->regs[7], &sc->regs.r7); 4020 __get_user(env->regs[8], &sc->regs.r8); 4021 __get_user(env->regs[9], &sc->regs.r9); 4022 __get_user(env->regs[10], &sc->regs.r10); 4023 __get_user(env->regs[11], &sc->regs.r11); 4024 __get_user(env->regs[12], &sc->regs.r12); 4025 __get_user(env->regs[13], &sc->regs.r13); 4026 __get_user(env->regs[14], &sc->usp); 4027 __get_user(env->regs[15], &sc->regs.acr); 4028 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 4029 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 4030 __get_user(env->pc, &sc->regs.erp); 4031 } 4032 4033 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 4034 { 4035 abi_ulong sp; 4036 /* Align the stack downwards to 4. */ 4037 sp = (env->regs[R_SP] & ~3); 4038 return sp - framesize; 4039 } 4040 4041 static void setup_frame(int sig, struct target_sigaction *ka, 4042 target_sigset_t *set, CPUCRISState *env) 4043 { 4044 struct target_signal_frame *frame; 4045 abi_ulong frame_addr; 4046 int i; 4047 4048 frame_addr = get_sigframe(env, sizeof *frame); 4049 trace_user_setup_frame(env, frame_addr); 4050 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 4051 goto badframe; 4052 4053 /* 4054 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 4055 * use this trampoline anymore but it sets it up for GDB. 4056 * In QEMU, using the trampoline simplifies things a bit so we use it. 4057 * 4058 * This is movu.w __NR_sigreturn, r9; break 13; 4059 */ 4060 __put_user(0x9c5f, frame->retcode+0); 4061 __put_user(TARGET_NR_sigreturn, 4062 frame->retcode + 1); 4063 __put_user(0xe93d, frame->retcode + 2); 4064 4065 /* Save the mask. */ 4066 __put_user(set->sig[0], &frame->sc.oldmask); 4067 4068 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 4069 __put_user(set->sig[i], &frame->extramask[i - 1]); 4070 } 4071 4072 setup_sigcontext(&frame->sc, env); 4073 4074 /* Move the stack and setup the arguments for the handler. */ 4075 env->regs[R_SP] = frame_addr; 4076 env->regs[10] = sig; 4077 env->pc = (unsigned long) ka->_sa_handler; 4078 /* Link SRP so the guest returns through the trampoline. */ 4079 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 4080 4081 unlock_user_struct(frame, frame_addr, 1); 4082 return; 4083 badframe: 4084 force_sigsegv(sig); 4085 } 4086 4087 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4088 target_siginfo_t *info, 4089 target_sigset_t *set, CPUCRISState *env) 4090 { 4091 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 4092 } 4093 4094 long do_sigreturn(CPUCRISState *env) 4095 { 4096 struct target_signal_frame *frame; 4097 abi_ulong frame_addr; 4098 target_sigset_t target_set; 4099 sigset_t set; 4100 int i; 4101 4102 frame_addr = env->regs[R_SP]; 4103 trace_user_do_sigreturn(env, frame_addr); 4104 /* Make sure the guest isn't playing games. */ 4105 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 4106 goto badframe; 4107 } 4108 4109 /* Restore blocked signals */ 4110 __get_user(target_set.sig[0], &frame->sc.oldmask); 4111 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 4112 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 4113 } 4114 target_to_host_sigset_internal(&set, &target_set); 4115 set_sigmask(&set); 4116 4117 restore_sigcontext(&frame->sc, env); 4118 unlock_user_struct(frame, frame_addr, 0); 4119 return -TARGET_QEMU_ESIGRETURN; 4120 badframe: 4121 force_sig(TARGET_SIGSEGV); 4122 return -TARGET_QEMU_ESIGRETURN; 4123 } 4124 4125 long do_rt_sigreturn(CPUCRISState *env) 4126 { 4127 trace_user_do_rt_sigreturn(env, 0); 4128 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 4129 return -TARGET_ENOSYS; 4130 } 4131 4132 #elif defined(TARGET_NIOS2) 4133 4134 #define MCONTEXT_VERSION 2 4135 4136 struct target_sigcontext { 4137 int version; 4138 unsigned long gregs[32]; 4139 }; 4140 4141 struct target_ucontext { 4142 abi_ulong tuc_flags; 4143 abi_ulong tuc_link; 4144 target_stack_t tuc_stack; 4145 struct target_sigcontext tuc_mcontext; 4146 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4147 }; 4148 4149 struct target_rt_sigframe { 4150 struct target_siginfo info; 4151 struct target_ucontext uc; 4152 }; 4153 4154 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka) 4155 { 4156 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) { 4157 #ifdef CONFIG_STACK_GROWSUP 4158 return target_sigaltstack_used.ss_sp; 4159 #else 4160 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4161 #endif 4162 } 4163 return sp; 4164 } 4165 4166 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env) 4167 { 4168 unsigned long *gregs = uc->tuc_mcontext.gregs; 4169 4170 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version); 4171 __put_user(env->regs[1], &gregs[0]); 4172 __put_user(env->regs[2], &gregs[1]); 4173 __put_user(env->regs[3], &gregs[2]); 4174 __put_user(env->regs[4], &gregs[3]); 4175 __put_user(env->regs[5], &gregs[4]); 4176 __put_user(env->regs[6], &gregs[5]); 4177 __put_user(env->regs[7], &gregs[6]); 4178 __put_user(env->regs[8], &gregs[7]); 4179 __put_user(env->regs[9], &gregs[8]); 4180 __put_user(env->regs[10], &gregs[9]); 4181 __put_user(env->regs[11], &gregs[10]); 4182 __put_user(env->regs[12], &gregs[11]); 4183 __put_user(env->regs[13], &gregs[12]); 4184 __put_user(env->regs[14], &gregs[13]); 4185 __put_user(env->regs[15], &gregs[14]); 4186 __put_user(env->regs[16], &gregs[15]); 4187 __put_user(env->regs[17], &gregs[16]); 4188 __put_user(env->regs[18], &gregs[17]); 4189 __put_user(env->regs[19], &gregs[18]); 4190 __put_user(env->regs[20], &gregs[19]); 4191 __put_user(env->regs[21], &gregs[20]); 4192 __put_user(env->regs[22], &gregs[21]); 4193 __put_user(env->regs[23], &gregs[22]); 4194 __put_user(env->regs[R_RA], &gregs[23]); 4195 __put_user(env->regs[R_FP], &gregs[24]); 4196 __put_user(env->regs[R_GP], &gregs[25]); 4197 __put_user(env->regs[R_EA], &gregs[27]); 4198 __put_user(env->regs[R_SP], &gregs[28]); 4199 4200 return 0; 4201 } 4202 4203 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc, 4204 int *pr2) 4205 { 4206 int temp; 4207 abi_ulong off, frame_addr = env->regs[R_SP]; 4208 unsigned long *gregs = uc->tuc_mcontext.gregs; 4209 int err; 4210 4211 /* Always make any pending restarted system calls return -EINTR */ 4212 /* current->restart_block.fn = do_no_restart_syscall; */ 4213 4214 __get_user(temp, &uc->tuc_mcontext.version); 4215 if (temp != MCONTEXT_VERSION) { 4216 return 1; 4217 } 4218 4219 /* restore passed registers */ 4220 __get_user(env->regs[1], &gregs[0]); 4221 __get_user(env->regs[2], &gregs[1]); 4222 __get_user(env->regs[3], &gregs[2]); 4223 __get_user(env->regs[4], &gregs[3]); 4224 __get_user(env->regs[5], &gregs[4]); 4225 __get_user(env->regs[6], &gregs[5]); 4226 __get_user(env->regs[7], &gregs[6]); 4227 __get_user(env->regs[8], &gregs[7]); 4228 __get_user(env->regs[9], &gregs[8]); 4229 __get_user(env->regs[10], &gregs[9]); 4230 __get_user(env->regs[11], &gregs[10]); 4231 __get_user(env->regs[12], &gregs[11]); 4232 __get_user(env->regs[13], &gregs[12]); 4233 __get_user(env->regs[14], &gregs[13]); 4234 __get_user(env->regs[15], &gregs[14]); 4235 __get_user(env->regs[16], &gregs[15]); 4236 __get_user(env->regs[17], &gregs[16]); 4237 __get_user(env->regs[18], &gregs[17]); 4238 __get_user(env->regs[19], &gregs[18]); 4239 __get_user(env->regs[20], &gregs[19]); 4240 __get_user(env->regs[21], &gregs[20]); 4241 __get_user(env->regs[22], &gregs[21]); 4242 __get_user(env->regs[23], &gregs[22]); 4243 /* gregs[23] is handled below */ 4244 /* Verify, should this be settable */ 4245 __get_user(env->regs[R_FP], &gregs[24]); 4246 /* Verify, should this be settable */ 4247 __get_user(env->regs[R_GP], &gregs[25]); 4248 /* Not really necessary no user settable bits */ 4249 __get_user(temp, &gregs[26]); 4250 __get_user(env->regs[R_EA], &gregs[27]); 4251 4252 __get_user(env->regs[R_RA], &gregs[23]); 4253 __get_user(env->regs[R_SP], &gregs[28]); 4254 4255 off = offsetof(struct target_rt_sigframe, uc.tuc_stack); 4256 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env)); 4257 if (err == -EFAULT) { 4258 return 1; 4259 } 4260 4261 *pr2 = env->regs[2]; 4262 return 0; 4263 } 4264 4265 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env, 4266 size_t frame_size) 4267 { 4268 unsigned long usp; 4269 4270 /* Default to using normal stack. */ 4271 usp = env->regs[R_SP]; 4272 4273 /* This is the X/Open sanctioned signal stack switching. */ 4274 usp = sigsp(usp, ka); 4275 4276 /* Verify, is it 32 or 64 bit aligned */ 4277 return (void *)((usp - frame_size) & -8UL); 4278 } 4279 4280 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4281 target_siginfo_t *info, 4282 target_sigset_t *set, 4283 CPUNios2State *env) 4284 { 4285 struct target_rt_sigframe *frame; 4286 int i, err = 0; 4287 4288 frame = get_sigframe(ka, env, sizeof(*frame)); 4289 4290 if (ka->sa_flags & SA_SIGINFO) { 4291 tswap_siginfo(&frame->info, info); 4292 } 4293 4294 /* Create the ucontext. */ 4295 __put_user(0, &frame->uc.tuc_flags); 4296 __put_user(0, &frame->uc.tuc_link); 4297 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4298 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags); 4299 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4300 err |= rt_setup_ucontext(&frame->uc, env); 4301 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4302 __put_user((abi_ulong)set->sig[i], 4303 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4304 } 4305 4306 if (err) { 4307 goto give_sigsegv; 4308 } 4309 4310 /* Set up to return from userspace; jump to fixed address sigreturn 4311 trampoline on kuser page. */ 4312 env->regs[R_RA] = (unsigned long) (0x1044); 4313 4314 /* Set up registers for signal handler */ 4315 env->regs[R_SP] = (unsigned long) frame; 4316 env->regs[4] = (unsigned long) sig; 4317 env->regs[5] = (unsigned long) &frame->info; 4318 env->regs[6] = (unsigned long) &frame->uc; 4319 env->regs[R_EA] = (unsigned long) ka->_sa_handler; 4320 return; 4321 4322 give_sigsegv: 4323 if (sig == TARGET_SIGSEGV) { 4324 ka->_sa_handler = TARGET_SIG_DFL; 4325 } 4326 force_sigsegv(sig); 4327 return; 4328 } 4329 4330 long do_sigreturn(CPUNios2State *env) 4331 { 4332 trace_user_do_sigreturn(env, 0); 4333 fprintf(stderr, "do_sigreturn: not implemented\n"); 4334 return -TARGET_ENOSYS; 4335 } 4336 4337 long do_rt_sigreturn(CPUNios2State *env) 4338 { 4339 /* Verify, can we follow the stack back */ 4340 abi_ulong frame_addr = env->regs[R_SP]; 4341 struct target_rt_sigframe *frame; 4342 sigset_t set; 4343 int rval; 4344 4345 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4346 goto badframe; 4347 } 4348 4349 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4350 do_sigprocmask(SIG_SETMASK, &set, NULL); 4351 4352 if (rt_restore_ucontext(env, &frame->uc, &rval)) { 4353 goto badframe; 4354 } 4355 4356 unlock_user_struct(frame, frame_addr, 0); 4357 return rval; 4358 4359 badframe: 4360 unlock_user_struct(frame, frame_addr, 0); 4361 force_sig(TARGET_SIGSEGV); 4362 return 0; 4363 } 4364 /* TARGET_NIOS2 */ 4365 4366 #elif defined(TARGET_OPENRISC) 4367 4368 struct target_sigcontext { 4369 struct target_pt_regs regs; 4370 abi_ulong oldmask; 4371 abi_ulong usp; 4372 }; 4373 4374 struct target_ucontext { 4375 abi_ulong tuc_flags; 4376 abi_ulong tuc_link; 4377 target_stack_t tuc_stack; 4378 struct target_sigcontext tuc_mcontext; 4379 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4380 }; 4381 4382 struct target_rt_sigframe { 4383 abi_ulong pinfo; 4384 uint64_t puc; 4385 struct target_siginfo info; 4386 struct target_sigcontext sc; 4387 struct target_ucontext uc; 4388 unsigned char retcode[16]; /* trampoline code */ 4389 }; 4390 4391 /* This is the asm-generic/ucontext.h version */ 4392 #if 0 4393 static int restore_sigcontext(CPUOpenRISCState *regs, 4394 struct target_sigcontext *sc) 4395 { 4396 unsigned int err = 0; 4397 unsigned long old_usp; 4398 4399 /* Alwys make any pending restarted system call return -EINTR */ 4400 current_thread_info()->restart_block.fn = do_no_restart_syscall; 4401 4402 /* restore the regs from &sc->regs (same as sc, since regs is first) 4403 * (sc is already checked for VERIFY_READ since the sigframe was 4404 * checked in sys_sigreturn previously) 4405 */ 4406 4407 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 4408 goto badframe; 4409 } 4410 4411 /* make sure the U-flag is set so user-mode cannot fool us */ 4412 4413 regs->sr &= ~SR_SM; 4414 4415 /* restore the old USP as it was before we stacked the sc etc. 4416 * (we cannot just pop the sigcontext since we aligned the sp and 4417 * stuff after pushing it) 4418 */ 4419 4420 __get_user(old_usp, &sc->usp); 4421 phx_signal("old_usp 0x%lx", old_usp); 4422 4423 __PHX__ REALLY /* ??? */ 4424 wrusp(old_usp); 4425 regs->gpr[1] = old_usp; 4426 4427 /* TODO: the other ports use regs->orig_XX to disable syscall checks 4428 * after this completes, but we don't use that mechanism. maybe we can 4429 * use it now ? 4430 */ 4431 4432 return err; 4433 4434 badframe: 4435 return 1; 4436 } 4437 #endif 4438 4439 /* Set up a signal frame. */ 4440 4441 static void setup_sigcontext(struct target_sigcontext *sc, 4442 CPUOpenRISCState *regs, 4443 unsigned long mask) 4444 { 4445 unsigned long usp = cpu_get_gpr(regs, 1); 4446 4447 /* copy the regs. they are first in sc so we can use sc directly */ 4448 4449 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 4450 4451 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 4452 the signal handler. The frametype will be restored to its previous 4453 value in restore_sigcontext. */ 4454 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 4455 4456 /* then some other stuff */ 4457 __put_user(mask, &sc->oldmask); 4458 __put_user(usp, &sc->usp); 4459 } 4460 4461 static inline unsigned long align_sigframe(unsigned long sp) 4462 { 4463 return sp & ~3UL; 4464 } 4465 4466 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 4467 CPUOpenRISCState *regs, 4468 size_t frame_size) 4469 { 4470 unsigned long sp = cpu_get_gpr(regs, 1); 4471 int onsigstack = on_sig_stack(sp); 4472 4473 /* redzone */ 4474 /* This is the X/Open sanctioned signal stack switching. */ 4475 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 4476 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4477 } 4478 4479 sp = align_sigframe(sp - frame_size); 4480 4481 /* 4482 * If we are on the alternate signal stack and would overflow it, don't. 4483 * Return an always-bogus address instead so we will die with SIGSEGV. 4484 */ 4485 4486 if (onsigstack && !likely(on_sig_stack(sp))) { 4487 return -1L; 4488 } 4489 4490 return sp; 4491 } 4492 4493 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4494 target_siginfo_t *info, 4495 target_sigset_t *set, CPUOpenRISCState *env) 4496 { 4497 int err = 0; 4498 abi_ulong frame_addr; 4499 unsigned long return_ip; 4500 struct target_rt_sigframe *frame; 4501 abi_ulong info_addr, uc_addr; 4502 4503 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4504 trace_user_setup_rt_frame(env, frame_addr); 4505 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4506 goto give_sigsegv; 4507 } 4508 4509 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4510 __put_user(info_addr, &frame->pinfo); 4511 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4512 __put_user(uc_addr, &frame->puc); 4513 4514 if (ka->sa_flags & SA_SIGINFO) { 4515 tswap_siginfo(&frame->info, info); 4516 } 4517 4518 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/ 4519 __put_user(0, &frame->uc.tuc_flags); 4520 __put_user(0, &frame->uc.tuc_link); 4521 __put_user(target_sigaltstack_used.ss_sp, 4522 &frame->uc.tuc_stack.ss_sp); 4523 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)), 4524 &frame->uc.tuc_stack.ss_flags); 4525 __put_user(target_sigaltstack_used.ss_size, 4526 &frame->uc.tuc_stack.ss_size); 4527 setup_sigcontext(&frame->sc, env, set->sig[0]); 4528 4529 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4530 4531 /* trampoline - the desired return ip is the retcode itself */ 4532 return_ip = (unsigned long)&frame->retcode; 4533 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4534 __put_user(0xa960, (short *)(frame->retcode + 0)); 4535 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4536 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4537 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4538 4539 if (err) { 4540 goto give_sigsegv; 4541 } 4542 4543 /* TODO what is the current->exec_domain stuff and invmap ? */ 4544 4545 /* Set up registers for signal handler */ 4546 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4547 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */ 4548 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */ 4549 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */ 4550 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */ 4551 4552 /* actually move the usp to reflect the stacked frame */ 4553 cpu_set_gpr(env, 1, (unsigned long)frame); 4554 4555 return; 4556 4557 give_sigsegv: 4558 unlock_user_struct(frame, frame_addr, 1); 4559 force_sigsegv(sig); 4560 } 4561 4562 long do_sigreturn(CPUOpenRISCState *env) 4563 { 4564 trace_user_do_sigreturn(env, 0); 4565 fprintf(stderr, "do_sigreturn: not implemented\n"); 4566 return -TARGET_ENOSYS; 4567 } 4568 4569 long do_rt_sigreturn(CPUOpenRISCState *env) 4570 { 4571 trace_user_do_rt_sigreturn(env, 0); 4572 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4573 return -TARGET_ENOSYS; 4574 } 4575 /* TARGET_OPENRISC */ 4576 4577 #elif defined(TARGET_S390X) 4578 4579 #define __NUM_GPRS 16 4580 #define __NUM_FPRS 16 4581 #define __NUM_ACRS 16 4582 4583 #define S390_SYSCALL_SIZE 2 4584 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4585 4586 #define _SIGCONTEXT_NSIG 64 4587 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4588 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4589 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4590 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4591 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4592 4593 typedef struct { 4594 target_psw_t psw; 4595 target_ulong gprs[__NUM_GPRS]; 4596 unsigned int acrs[__NUM_ACRS]; 4597 } target_s390_regs_common; 4598 4599 typedef struct { 4600 unsigned int fpc; 4601 double fprs[__NUM_FPRS]; 4602 } target_s390_fp_regs; 4603 4604 typedef struct { 4605 target_s390_regs_common regs; 4606 target_s390_fp_regs fpregs; 4607 } target_sigregs; 4608 4609 struct target_sigcontext { 4610 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4611 target_sigregs *sregs; 4612 }; 4613 4614 typedef struct { 4615 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4616 struct target_sigcontext sc; 4617 target_sigregs sregs; 4618 int signo; 4619 uint8_t retcode[S390_SYSCALL_SIZE]; 4620 } sigframe; 4621 4622 struct target_ucontext { 4623 target_ulong tuc_flags; 4624 struct target_ucontext *tuc_link; 4625 target_stack_t tuc_stack; 4626 target_sigregs tuc_mcontext; 4627 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4628 }; 4629 4630 typedef struct { 4631 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4632 uint8_t retcode[S390_SYSCALL_SIZE]; 4633 struct target_siginfo info; 4634 struct target_ucontext uc; 4635 } rt_sigframe; 4636 4637 static inline abi_ulong 4638 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4639 { 4640 abi_ulong sp; 4641 4642 /* Default to using normal stack */ 4643 sp = env->regs[15]; 4644 4645 /* This is the X/Open sanctioned signal stack switching. */ 4646 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4647 if (!sas_ss_flags(sp)) { 4648 sp = target_sigaltstack_used.ss_sp + 4649 target_sigaltstack_used.ss_size; 4650 } 4651 } 4652 4653 /* This is the legacy signal stack switching. */ 4654 else if (/* FIXME !user_mode(regs) */ 0 && 4655 !(ka->sa_flags & TARGET_SA_RESTORER) && 4656 ka->sa_restorer) { 4657 sp = (abi_ulong) ka->sa_restorer; 4658 } 4659 4660 return (sp - frame_size) & -8ul; 4661 } 4662 4663 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4664 { 4665 int i; 4666 //save_access_regs(current->thread.acrs); FIXME 4667 4668 /* Copy a 'clean' PSW mask to the user to avoid leaking 4669 information about whether PER is currently on. */ 4670 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4671 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4672 for (i = 0; i < 16; i++) { 4673 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4674 } 4675 for (i = 0; i < 16; i++) { 4676 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4677 } 4678 /* 4679 * We have to store the fp registers to current->thread.fp_regs 4680 * to merge them with the emulated registers. 4681 */ 4682 //save_fp_regs(¤t->thread.fp_regs); FIXME 4683 for (i = 0; i < 16; i++) { 4684 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4685 } 4686 } 4687 4688 static void setup_frame(int sig, struct target_sigaction *ka, 4689 target_sigset_t *set, CPUS390XState *env) 4690 { 4691 sigframe *frame; 4692 abi_ulong frame_addr; 4693 4694 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4695 trace_user_setup_frame(env, frame_addr); 4696 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4697 goto give_sigsegv; 4698 } 4699 4700 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4701 4702 save_sigregs(env, &frame->sregs); 4703 4704 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4705 (abi_ulong *)&frame->sc.sregs); 4706 4707 /* Set up to return from userspace. If provided, use a stub 4708 already in userspace. */ 4709 if (ka->sa_flags & TARGET_SA_RESTORER) { 4710 env->regs[14] = (unsigned long) 4711 ka->sa_restorer | PSW_ADDR_AMODE; 4712 } else { 4713 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4714 | PSW_ADDR_AMODE; 4715 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4716 (uint16_t *)(frame->retcode)); 4717 } 4718 4719 /* Set up backchain. */ 4720 __put_user(env->regs[15], (abi_ulong *) frame); 4721 4722 /* Set up registers for signal handler */ 4723 env->regs[15] = frame_addr; 4724 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4725 4726 env->regs[2] = sig; //map_signal(sig); 4727 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4728 4729 /* We forgot to include these in the sigcontext. 4730 To avoid breaking binary compatibility, they are passed as args. */ 4731 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4732 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4733 4734 /* Place signal number on stack to allow backtrace from handler. */ 4735 __put_user(env->regs[2], &frame->signo); 4736 unlock_user_struct(frame, frame_addr, 1); 4737 return; 4738 4739 give_sigsegv: 4740 force_sigsegv(sig); 4741 } 4742 4743 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4744 target_siginfo_t *info, 4745 target_sigset_t *set, CPUS390XState *env) 4746 { 4747 int i; 4748 rt_sigframe *frame; 4749 abi_ulong frame_addr; 4750 4751 frame_addr = get_sigframe(ka, env, sizeof *frame); 4752 trace_user_setup_rt_frame(env, frame_addr); 4753 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4754 goto give_sigsegv; 4755 } 4756 4757 tswap_siginfo(&frame->info, info); 4758 4759 /* Create the ucontext. */ 4760 __put_user(0, &frame->uc.tuc_flags); 4761 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4762 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4763 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4764 &frame->uc.tuc_stack.ss_flags); 4765 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4766 save_sigregs(env, &frame->uc.tuc_mcontext); 4767 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4768 __put_user((abi_ulong)set->sig[i], 4769 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4770 } 4771 4772 /* Set up to return from userspace. If provided, use a stub 4773 already in userspace. */ 4774 if (ka->sa_flags & TARGET_SA_RESTORER) { 4775 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4776 } else { 4777 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4778 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4779 (uint16_t *)(frame->retcode)); 4780 } 4781 4782 /* Set up backchain. */ 4783 __put_user(env->regs[15], (abi_ulong *) frame); 4784 4785 /* Set up registers for signal handler */ 4786 env->regs[15] = frame_addr; 4787 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4788 4789 env->regs[2] = sig; //map_signal(sig); 4790 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4791 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4792 return; 4793 4794 give_sigsegv: 4795 force_sigsegv(sig); 4796 } 4797 4798 static int 4799 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4800 { 4801 int err = 0; 4802 int i; 4803 4804 for (i = 0; i < 16; i++) { 4805 __get_user(env->regs[i], &sc->regs.gprs[i]); 4806 } 4807 4808 __get_user(env->psw.mask, &sc->regs.psw.mask); 4809 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4810 (unsigned long long)env->psw.addr); 4811 __get_user(env->psw.addr, &sc->regs.psw.addr); 4812 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4813 4814 for (i = 0; i < 16; i++) { 4815 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4816 } 4817 for (i = 0; i < 16; i++) { 4818 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4819 } 4820 4821 return err; 4822 } 4823 4824 long do_sigreturn(CPUS390XState *env) 4825 { 4826 sigframe *frame; 4827 abi_ulong frame_addr = env->regs[15]; 4828 target_sigset_t target_set; 4829 sigset_t set; 4830 4831 trace_user_do_sigreturn(env, frame_addr); 4832 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4833 goto badframe; 4834 } 4835 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4836 4837 target_to_host_sigset_internal(&set, &target_set); 4838 set_sigmask(&set); /* ~_BLOCKABLE? */ 4839 4840 if (restore_sigregs(env, &frame->sregs)) { 4841 goto badframe; 4842 } 4843 4844 unlock_user_struct(frame, frame_addr, 0); 4845 return -TARGET_QEMU_ESIGRETURN; 4846 4847 badframe: 4848 force_sig(TARGET_SIGSEGV); 4849 return -TARGET_QEMU_ESIGRETURN; 4850 } 4851 4852 long do_rt_sigreturn(CPUS390XState *env) 4853 { 4854 rt_sigframe *frame; 4855 abi_ulong frame_addr = env->regs[15]; 4856 sigset_t set; 4857 4858 trace_user_do_rt_sigreturn(env, frame_addr); 4859 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4860 goto badframe; 4861 } 4862 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4863 4864 set_sigmask(&set); /* ~_BLOCKABLE? */ 4865 4866 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4867 goto badframe; 4868 } 4869 4870 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4871 get_sp_from_cpustate(env)) == -EFAULT) { 4872 goto badframe; 4873 } 4874 unlock_user_struct(frame, frame_addr, 0); 4875 return -TARGET_QEMU_ESIGRETURN; 4876 4877 badframe: 4878 unlock_user_struct(frame, frame_addr, 0); 4879 force_sig(TARGET_SIGSEGV); 4880 return -TARGET_QEMU_ESIGRETURN; 4881 } 4882 4883 #elif defined(TARGET_PPC) 4884 4885 /* Size of dummy stack frame allocated when calling signal handler. 4886 See arch/powerpc/include/asm/ptrace.h. */ 4887 #if defined(TARGET_PPC64) 4888 #define SIGNAL_FRAMESIZE 128 4889 #else 4890 #define SIGNAL_FRAMESIZE 64 4891 #endif 4892 4893 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4894 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4895 struct target_mcontext { 4896 target_ulong mc_gregs[48]; 4897 /* Includes fpscr. */ 4898 uint64_t mc_fregs[33]; 4899 #if defined(TARGET_PPC64) 4900 /* Pointer to the vector regs */ 4901 target_ulong v_regs; 4902 #else 4903 target_ulong mc_pad[2]; 4904 #endif 4905 /* We need to handle Altivec and SPE at the same time, which no 4906 kernel needs to do. Fortunately, the kernel defines this bit to 4907 be Altivec-register-large all the time, rather than trying to 4908 twiddle it based on the specific platform. */ 4909 union { 4910 /* SPE vector registers. One extra for SPEFSCR. */ 4911 uint32_t spe[33]; 4912 /* Altivec vector registers. The packing of VSCR and VRSAVE 4913 varies depending on whether we're PPC64 or not: PPC64 splits 4914 them apart; PPC32 stuffs them together. 4915 We also need to account for the VSX registers on PPC64 4916 */ 4917 #if defined(TARGET_PPC64) 4918 #define QEMU_NVRREG (34 + 16) 4919 /* On ppc64, this mcontext structure is naturally *unaligned*, 4920 * or rather it is aligned on a 8 bytes boundary but not on 4921 * a 16 bytes one. This pad fixes it up. This is also why the 4922 * vector regs are referenced by the v_regs pointer above so 4923 * any amount of padding can be added here 4924 */ 4925 target_ulong pad; 4926 #else 4927 /* On ppc32, we are already aligned to 16 bytes */ 4928 #define QEMU_NVRREG 33 4929 #endif 4930 /* We cannot use ppc_avr_t here as we do *not* want the implied 4931 * 16-bytes alignment that would result from it. This would have 4932 * the effect of making the whole struct target_mcontext aligned 4933 * which breaks the layout of struct target_ucontext on ppc64. 4934 */ 4935 uint64_t altivec[QEMU_NVRREG][2]; 4936 #undef QEMU_NVRREG 4937 } mc_vregs; 4938 }; 4939 4940 /* See arch/powerpc/include/asm/sigcontext.h. */ 4941 struct target_sigcontext { 4942 target_ulong _unused[4]; 4943 int32_t signal; 4944 #if defined(TARGET_PPC64) 4945 int32_t pad0; 4946 #endif 4947 target_ulong handler; 4948 target_ulong oldmask; 4949 target_ulong regs; /* struct pt_regs __user * */ 4950 #if defined(TARGET_PPC64) 4951 struct target_mcontext mcontext; 4952 #endif 4953 }; 4954 4955 /* Indices for target_mcontext.mc_gregs, below. 4956 See arch/powerpc/include/asm/ptrace.h for details. */ 4957 enum { 4958 TARGET_PT_R0 = 0, 4959 TARGET_PT_R1 = 1, 4960 TARGET_PT_R2 = 2, 4961 TARGET_PT_R3 = 3, 4962 TARGET_PT_R4 = 4, 4963 TARGET_PT_R5 = 5, 4964 TARGET_PT_R6 = 6, 4965 TARGET_PT_R7 = 7, 4966 TARGET_PT_R8 = 8, 4967 TARGET_PT_R9 = 9, 4968 TARGET_PT_R10 = 10, 4969 TARGET_PT_R11 = 11, 4970 TARGET_PT_R12 = 12, 4971 TARGET_PT_R13 = 13, 4972 TARGET_PT_R14 = 14, 4973 TARGET_PT_R15 = 15, 4974 TARGET_PT_R16 = 16, 4975 TARGET_PT_R17 = 17, 4976 TARGET_PT_R18 = 18, 4977 TARGET_PT_R19 = 19, 4978 TARGET_PT_R20 = 20, 4979 TARGET_PT_R21 = 21, 4980 TARGET_PT_R22 = 22, 4981 TARGET_PT_R23 = 23, 4982 TARGET_PT_R24 = 24, 4983 TARGET_PT_R25 = 25, 4984 TARGET_PT_R26 = 26, 4985 TARGET_PT_R27 = 27, 4986 TARGET_PT_R28 = 28, 4987 TARGET_PT_R29 = 29, 4988 TARGET_PT_R30 = 30, 4989 TARGET_PT_R31 = 31, 4990 TARGET_PT_NIP = 32, 4991 TARGET_PT_MSR = 33, 4992 TARGET_PT_ORIG_R3 = 34, 4993 TARGET_PT_CTR = 35, 4994 TARGET_PT_LNK = 36, 4995 TARGET_PT_XER = 37, 4996 TARGET_PT_CCR = 38, 4997 /* Yes, there are two registers with #39. One is 64-bit only. */ 4998 TARGET_PT_MQ = 39, 4999 TARGET_PT_SOFTE = 39, 5000 TARGET_PT_TRAP = 40, 5001 TARGET_PT_DAR = 41, 5002 TARGET_PT_DSISR = 42, 5003 TARGET_PT_RESULT = 43, 5004 TARGET_PT_REGS_COUNT = 44 5005 }; 5006 5007 5008 struct target_ucontext { 5009 target_ulong tuc_flags; 5010 target_ulong tuc_link; /* ucontext_t __user * */ 5011 struct target_sigaltstack tuc_stack; 5012 #if !defined(TARGET_PPC64) 5013 int32_t tuc_pad[7]; 5014 target_ulong tuc_regs; /* struct mcontext __user * 5015 points to uc_mcontext field */ 5016 #endif 5017 target_sigset_t tuc_sigmask; 5018 #if defined(TARGET_PPC64) 5019 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 5020 struct target_sigcontext tuc_sigcontext; 5021 #else 5022 int32_t tuc_maskext[30]; 5023 int32_t tuc_pad2[3]; 5024 struct target_mcontext tuc_mcontext; 5025 #endif 5026 }; 5027 5028 /* See arch/powerpc/kernel/signal_32.c. */ 5029 struct target_sigframe { 5030 struct target_sigcontext sctx; 5031 struct target_mcontext mctx; 5032 int32_t abigap[56]; 5033 }; 5034 5035 #if defined(TARGET_PPC64) 5036 5037 #define TARGET_TRAMP_SIZE 6 5038 5039 struct target_rt_sigframe { 5040 /* sys_rt_sigreturn requires the ucontext be the first field */ 5041 struct target_ucontext uc; 5042 target_ulong _unused[2]; 5043 uint32_t trampoline[TARGET_TRAMP_SIZE]; 5044 target_ulong pinfo; /* struct siginfo __user * */ 5045 target_ulong puc; /* void __user * */ 5046 struct target_siginfo info; 5047 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 5048 char abigap[288]; 5049 } __attribute__((aligned(16))); 5050 5051 #else 5052 5053 struct target_rt_sigframe { 5054 struct target_siginfo info; 5055 struct target_ucontext uc; 5056 int32_t abigap[56]; 5057 }; 5058 5059 #endif 5060 5061 #if defined(TARGET_PPC64) 5062 5063 struct target_func_ptr { 5064 target_ulong entry; 5065 target_ulong toc; 5066 }; 5067 5068 #endif 5069 5070 /* We use the mc_pad field for the signal return trampoline. */ 5071 #define tramp mc_pad 5072 5073 /* See arch/powerpc/kernel/signal.c. */ 5074 static target_ulong get_sigframe(struct target_sigaction *ka, 5075 CPUPPCState *env, 5076 int frame_size) 5077 { 5078 target_ulong oldsp; 5079 5080 oldsp = env->gpr[1]; 5081 5082 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 5083 (sas_ss_flags(oldsp) == 0)) { 5084 oldsp = (target_sigaltstack_used.ss_sp 5085 + target_sigaltstack_used.ss_size); 5086 } 5087 5088 return (oldsp - frame_size) & ~0xFUL; 5089 } 5090 5091 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \ 5092 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN))) 5093 #define PPC_VEC_HI 0 5094 #define PPC_VEC_LO 1 5095 #else 5096 #define PPC_VEC_HI 1 5097 #define PPC_VEC_LO 0 5098 #endif 5099 5100 5101 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 5102 { 5103 target_ulong msr = env->msr; 5104 int i; 5105 target_ulong ccr = 0; 5106 5107 /* In general, the kernel attempts to be intelligent about what it 5108 needs to save for Altivec/FP/SPE registers. We don't care that 5109 much, so we just go ahead and save everything. */ 5110 5111 /* Save general registers. */ 5112 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5113 __put_user(env->gpr[i], &frame->mc_gregs[i]); 5114 } 5115 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 5116 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 5117 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 5118 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 5119 5120 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 5121 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 5122 } 5123 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 5124 5125 /* Save Altivec registers if necessary. */ 5126 if (env->insns_flags & PPC_ALTIVEC) { 5127 uint32_t *vrsave; 5128 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 5129 ppc_avr_t *avr = &env->avr[i]; 5130 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i]; 5131 5132 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 5133 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 5134 } 5135 /* Set MSR_VR in the saved MSR value to indicate that 5136 frame->mc_vregs contains valid data. */ 5137 msr |= MSR_VR; 5138 #if defined(TARGET_PPC64) 5139 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33]; 5140 /* 64-bit needs to put a pointer to the vectors in the frame */ 5141 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs); 5142 #else 5143 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32]; 5144 #endif 5145 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave); 5146 } 5147 5148 /* Save VSX second halves */ 5149 if (env->insns_flags2 & PPC2_VSX) { 5150 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 5151 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) { 5152 __put_user(env->vsr[i], &vsregs[i]); 5153 } 5154 } 5155 5156 /* Save floating point registers. */ 5157 if (env->insns_flags & PPC_FLOAT) { 5158 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 5159 __put_user(env->fpr[i], &frame->mc_fregs[i]); 5160 } 5161 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 5162 } 5163 5164 /* Save SPE registers. The kernel only saves the high half. */ 5165 if (env->insns_flags & PPC_SPE) { 5166 #if defined(TARGET_PPC64) 5167 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5168 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 5169 } 5170 #else 5171 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 5172 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 5173 } 5174 #endif 5175 /* Set MSR_SPE in the saved MSR value to indicate that 5176 frame->mc_vregs contains valid data. */ 5177 msr |= MSR_SPE; 5178 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 5179 } 5180 5181 /* Store MSR. */ 5182 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 5183 } 5184 5185 static void encode_trampoline(int sigret, uint32_t *tramp) 5186 { 5187 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 5188 if (sigret) { 5189 __put_user(0x38000000 | sigret, &tramp[0]); 5190 __put_user(0x44000002, &tramp[1]); 5191 } 5192 } 5193 5194 static void restore_user_regs(CPUPPCState *env, 5195 struct target_mcontext *frame, int sig) 5196 { 5197 target_ulong save_r2 = 0; 5198 target_ulong msr; 5199 target_ulong ccr; 5200 5201 int i; 5202 5203 if (!sig) { 5204 save_r2 = env->gpr[2]; 5205 } 5206 5207 /* Restore general registers. */ 5208 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5209 __get_user(env->gpr[i], &frame->mc_gregs[i]); 5210 } 5211 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 5212 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 5213 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 5214 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 5215 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 5216 5217 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 5218 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 5219 } 5220 5221 if (!sig) { 5222 env->gpr[2] = save_r2; 5223 } 5224 /* Restore MSR. */ 5225 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 5226 5227 /* If doing signal return, restore the previous little-endian mode. */ 5228 if (sig) 5229 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 5230 5231 /* Restore Altivec registers if necessary. */ 5232 if (env->insns_flags & PPC_ALTIVEC) { 5233 ppc_avr_t *v_regs; 5234 uint32_t *vrsave; 5235 #if defined(TARGET_PPC64) 5236 uint64_t v_addr; 5237 /* 64-bit needs to recover the pointer to the vectors from the frame */ 5238 __get_user(v_addr, &frame->v_regs); 5239 v_regs = g2h(v_addr); 5240 #else 5241 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec; 5242 #endif 5243 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 5244 ppc_avr_t *avr = &env->avr[i]; 5245 ppc_avr_t *vreg = &v_regs[i]; 5246 5247 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 5248 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 5249 } 5250 /* Set MSR_VEC in the saved MSR value to indicate that 5251 frame->mc_vregs contains valid data. */ 5252 #if defined(TARGET_PPC64) 5253 vrsave = (uint32_t *)&v_regs[33]; 5254 #else 5255 vrsave = (uint32_t *)&v_regs[32]; 5256 #endif 5257 __get_user(env->spr[SPR_VRSAVE], vrsave); 5258 } 5259 5260 /* Restore VSX second halves */ 5261 if (env->insns_flags2 & PPC2_VSX) { 5262 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 5263 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) { 5264 __get_user(env->vsr[i], &vsregs[i]); 5265 } 5266 } 5267 5268 /* Restore floating point registers. */ 5269 if (env->insns_flags & PPC_FLOAT) { 5270 uint64_t fpscr; 5271 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 5272 __get_user(env->fpr[i], &frame->mc_fregs[i]); 5273 } 5274 __get_user(fpscr, &frame->mc_fregs[32]); 5275 env->fpscr = (uint32_t) fpscr; 5276 } 5277 5278 /* Save SPE registers. The kernel only saves the high half. */ 5279 if (env->insns_flags & PPC_SPE) { 5280 #if defined(TARGET_PPC64) 5281 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5282 uint32_t hi; 5283 5284 __get_user(hi, &frame->mc_vregs.spe[i]); 5285 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 5286 } 5287 #else 5288 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 5289 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 5290 } 5291 #endif 5292 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 5293 } 5294 } 5295 5296 #if !defined(TARGET_PPC64) 5297 static void setup_frame(int sig, struct target_sigaction *ka, 5298 target_sigset_t *set, CPUPPCState *env) 5299 { 5300 struct target_sigframe *frame; 5301 struct target_sigcontext *sc; 5302 target_ulong frame_addr, newsp; 5303 int err = 0; 5304 5305 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5306 trace_user_setup_frame(env, frame_addr); 5307 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 5308 goto sigsegv; 5309 sc = &frame->sctx; 5310 5311 __put_user(ka->_sa_handler, &sc->handler); 5312 __put_user(set->sig[0], &sc->oldmask); 5313 __put_user(set->sig[1], &sc->_unused[3]); 5314 __put_user(h2g(&frame->mctx), &sc->regs); 5315 __put_user(sig, &sc->signal); 5316 5317 /* Save user regs. */ 5318 save_user_regs(env, &frame->mctx); 5319 5320 /* Construct the trampoline code on the stack. */ 5321 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 5322 5323 /* The kernel checks for the presence of a VDSO here. We don't 5324 emulate a vdso, so use a sigreturn system call. */ 5325 env->lr = (target_ulong) h2g(frame->mctx.tramp); 5326 5327 /* Turn off all fp exceptions. */ 5328 env->fpscr = 0; 5329 5330 /* Create a stack frame for the caller of the handler. */ 5331 newsp = frame_addr - SIGNAL_FRAMESIZE; 5332 err |= put_user(env->gpr[1], newsp, target_ulong); 5333 5334 if (err) 5335 goto sigsegv; 5336 5337 /* Set up registers for signal handler. */ 5338 env->gpr[1] = newsp; 5339 env->gpr[3] = sig; 5340 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 5341 5342 env->nip = (target_ulong) ka->_sa_handler; 5343 5344 /* Signal handlers are entered in big-endian mode. */ 5345 env->msr &= ~(1ull << MSR_LE); 5346 5347 unlock_user_struct(frame, frame_addr, 1); 5348 return; 5349 5350 sigsegv: 5351 unlock_user_struct(frame, frame_addr, 1); 5352 force_sigsegv(sig); 5353 } 5354 #endif /* !defined(TARGET_PPC64) */ 5355 5356 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5357 target_siginfo_t *info, 5358 target_sigset_t *set, CPUPPCState *env) 5359 { 5360 struct target_rt_sigframe *rt_sf; 5361 uint32_t *trampptr = 0; 5362 struct target_mcontext *mctx = 0; 5363 target_ulong rt_sf_addr, newsp = 0; 5364 int i, err = 0; 5365 #if defined(TARGET_PPC64) 5366 struct target_sigcontext *sc = 0; 5367 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 5368 #endif 5369 5370 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 5371 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 5372 goto sigsegv; 5373 5374 tswap_siginfo(&rt_sf->info, info); 5375 5376 __put_user(0, &rt_sf->uc.tuc_flags); 5377 __put_user(0, &rt_sf->uc.tuc_link); 5378 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 5379 &rt_sf->uc.tuc_stack.ss_sp); 5380 __put_user(sas_ss_flags(env->gpr[1]), 5381 &rt_sf->uc.tuc_stack.ss_flags); 5382 __put_user(target_sigaltstack_used.ss_size, 5383 &rt_sf->uc.tuc_stack.ss_size); 5384 #if !defined(TARGET_PPC64) 5385 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 5386 &rt_sf->uc.tuc_regs); 5387 #endif 5388 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5389 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 5390 } 5391 5392 #if defined(TARGET_PPC64) 5393 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 5394 trampptr = &rt_sf->trampoline[0]; 5395 5396 sc = &rt_sf->uc.tuc_sigcontext; 5397 __put_user(h2g(mctx), &sc->regs); 5398 __put_user(sig, &sc->signal); 5399 #else 5400 mctx = &rt_sf->uc.tuc_mcontext; 5401 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 5402 #endif 5403 5404 save_user_regs(env, mctx); 5405 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 5406 5407 /* The kernel checks for the presence of a VDSO here. We don't 5408 emulate a vdso, so use a sigreturn system call. */ 5409 env->lr = (target_ulong) h2g(trampptr); 5410 5411 /* Turn off all fp exceptions. */ 5412 env->fpscr = 0; 5413 5414 /* Create a stack frame for the caller of the handler. */ 5415 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 5416 err |= put_user(env->gpr[1], newsp, target_ulong); 5417 5418 if (err) 5419 goto sigsegv; 5420 5421 /* Set up registers for signal handler. */ 5422 env->gpr[1] = newsp; 5423 env->gpr[3] = (target_ulong) sig; 5424 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 5425 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 5426 env->gpr[6] = (target_ulong) h2g(rt_sf); 5427 5428 #if defined(TARGET_PPC64) 5429 if (get_ppc64_abi(image) < 2) { 5430 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 5431 struct target_func_ptr *handler = 5432 (struct target_func_ptr *)g2h(ka->_sa_handler); 5433 env->nip = tswapl(handler->entry); 5434 env->gpr[2] = tswapl(handler->toc); 5435 } else { 5436 /* ELFv2 PPC64 function pointers are entry points, but R12 5437 * must also be set */ 5438 env->nip = tswapl((target_ulong) ka->_sa_handler); 5439 env->gpr[12] = env->nip; 5440 } 5441 #else 5442 env->nip = (target_ulong) ka->_sa_handler; 5443 #endif 5444 5445 /* Signal handlers are entered in big-endian mode. */ 5446 env->msr &= ~(1ull << MSR_LE); 5447 5448 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5449 return; 5450 5451 sigsegv: 5452 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5453 force_sigsegv(sig); 5454 5455 } 5456 5457 #if !defined(TARGET_PPC64) 5458 long do_sigreturn(CPUPPCState *env) 5459 { 5460 struct target_sigcontext *sc = NULL; 5461 struct target_mcontext *sr = NULL; 5462 target_ulong sr_addr = 0, sc_addr; 5463 sigset_t blocked; 5464 target_sigset_t set; 5465 5466 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 5467 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 5468 goto sigsegv; 5469 5470 #if defined(TARGET_PPC64) 5471 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 5472 #else 5473 __get_user(set.sig[0], &sc->oldmask); 5474 __get_user(set.sig[1], &sc->_unused[3]); 5475 #endif 5476 target_to_host_sigset_internal(&blocked, &set); 5477 set_sigmask(&blocked); 5478 5479 __get_user(sr_addr, &sc->regs); 5480 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 5481 goto sigsegv; 5482 restore_user_regs(env, sr, 1); 5483 5484 unlock_user_struct(sr, sr_addr, 1); 5485 unlock_user_struct(sc, sc_addr, 1); 5486 return -TARGET_QEMU_ESIGRETURN; 5487 5488 sigsegv: 5489 unlock_user_struct(sr, sr_addr, 1); 5490 unlock_user_struct(sc, sc_addr, 1); 5491 force_sig(TARGET_SIGSEGV); 5492 return -TARGET_QEMU_ESIGRETURN; 5493 } 5494 #endif /* !defined(TARGET_PPC64) */ 5495 5496 /* See arch/powerpc/kernel/signal_32.c. */ 5497 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 5498 { 5499 struct target_mcontext *mcp; 5500 target_ulong mcp_addr; 5501 sigset_t blocked; 5502 target_sigset_t set; 5503 5504 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 5505 sizeof (set))) 5506 return 1; 5507 5508 #if defined(TARGET_PPC64) 5509 mcp_addr = h2g(ucp) + 5510 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 5511 #else 5512 __get_user(mcp_addr, &ucp->tuc_regs); 5513 #endif 5514 5515 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 5516 return 1; 5517 5518 target_to_host_sigset_internal(&blocked, &set); 5519 set_sigmask(&blocked); 5520 restore_user_regs(env, mcp, sig); 5521 5522 unlock_user_struct(mcp, mcp_addr, 1); 5523 return 0; 5524 } 5525 5526 long do_rt_sigreturn(CPUPPCState *env) 5527 { 5528 struct target_rt_sigframe *rt_sf = NULL; 5529 target_ulong rt_sf_addr; 5530 5531 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 5532 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 5533 goto sigsegv; 5534 5535 if (do_setcontext(&rt_sf->uc, env, 1)) 5536 goto sigsegv; 5537 5538 do_sigaltstack(rt_sf_addr 5539 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 5540 0, env->gpr[1]); 5541 5542 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5543 return -TARGET_QEMU_ESIGRETURN; 5544 5545 sigsegv: 5546 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5547 force_sig(TARGET_SIGSEGV); 5548 return -TARGET_QEMU_ESIGRETURN; 5549 } 5550 5551 #elif defined(TARGET_M68K) 5552 5553 struct target_sigcontext { 5554 abi_ulong sc_mask; 5555 abi_ulong sc_usp; 5556 abi_ulong sc_d0; 5557 abi_ulong sc_d1; 5558 abi_ulong sc_a0; 5559 abi_ulong sc_a1; 5560 unsigned short sc_sr; 5561 abi_ulong sc_pc; 5562 }; 5563 5564 struct target_sigframe 5565 { 5566 abi_ulong pretcode; 5567 int sig; 5568 int code; 5569 abi_ulong psc; 5570 char retcode[8]; 5571 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5572 struct target_sigcontext sc; 5573 }; 5574 5575 typedef int target_greg_t; 5576 #define TARGET_NGREG 18 5577 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5578 5579 typedef struct target_fpregset { 5580 int f_fpcntl[3]; 5581 int f_fpregs[8*3]; 5582 } target_fpregset_t; 5583 5584 struct target_mcontext { 5585 int version; 5586 target_gregset_t gregs; 5587 target_fpregset_t fpregs; 5588 }; 5589 5590 #define TARGET_MCONTEXT_VERSION 2 5591 5592 struct target_ucontext { 5593 abi_ulong tuc_flags; 5594 abi_ulong tuc_link; 5595 target_stack_t tuc_stack; 5596 struct target_mcontext tuc_mcontext; 5597 abi_long tuc_filler[80]; 5598 target_sigset_t tuc_sigmask; 5599 }; 5600 5601 struct target_rt_sigframe 5602 { 5603 abi_ulong pretcode; 5604 int sig; 5605 abi_ulong pinfo; 5606 abi_ulong puc; 5607 char retcode[8]; 5608 struct target_siginfo info; 5609 struct target_ucontext uc; 5610 }; 5611 5612 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5613 abi_ulong mask) 5614 { 5615 __put_user(mask, &sc->sc_mask); 5616 __put_user(env->aregs[7], &sc->sc_usp); 5617 __put_user(env->dregs[0], &sc->sc_d0); 5618 __put_user(env->dregs[1], &sc->sc_d1); 5619 __put_user(env->aregs[0], &sc->sc_a0); 5620 __put_user(env->aregs[1], &sc->sc_a1); 5621 __put_user(env->sr, &sc->sc_sr); 5622 __put_user(env->pc, &sc->sc_pc); 5623 } 5624 5625 static void 5626 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5627 { 5628 int temp; 5629 5630 __get_user(env->aregs[7], &sc->sc_usp); 5631 __get_user(env->dregs[0], &sc->sc_d0); 5632 __get_user(env->dregs[1], &sc->sc_d1); 5633 __get_user(env->aregs[0], &sc->sc_a0); 5634 __get_user(env->aregs[1], &sc->sc_a1); 5635 __get_user(env->pc, &sc->sc_pc); 5636 __get_user(temp, &sc->sc_sr); 5637 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5638 } 5639 5640 /* 5641 * Determine which stack to use.. 5642 */ 5643 static inline abi_ulong 5644 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5645 size_t frame_size) 5646 { 5647 unsigned long sp; 5648 5649 sp = regs->aregs[7]; 5650 5651 /* This is the X/Open sanctioned signal stack switching. */ 5652 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5653 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5654 } 5655 5656 return ((sp - frame_size) & -8UL); 5657 } 5658 5659 static void setup_frame(int sig, struct target_sigaction *ka, 5660 target_sigset_t *set, CPUM68KState *env) 5661 { 5662 struct target_sigframe *frame; 5663 abi_ulong frame_addr; 5664 abi_ulong retcode_addr; 5665 abi_ulong sc_addr; 5666 int i; 5667 5668 frame_addr = get_sigframe(ka, env, sizeof *frame); 5669 trace_user_setup_frame(env, frame_addr); 5670 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5671 goto give_sigsegv; 5672 } 5673 5674 __put_user(sig, &frame->sig); 5675 5676 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5677 __put_user(sc_addr, &frame->psc); 5678 5679 setup_sigcontext(&frame->sc, env, set->sig[0]); 5680 5681 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5682 __put_user(set->sig[i], &frame->extramask[i - 1]); 5683 } 5684 5685 /* Set up to return from userspace. */ 5686 5687 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5688 __put_user(retcode_addr, &frame->pretcode); 5689 5690 /* moveq #,d0; trap #0 */ 5691 5692 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5693 (uint32_t *)(frame->retcode)); 5694 5695 /* Set up to return from userspace */ 5696 5697 env->aregs[7] = frame_addr; 5698 env->pc = ka->_sa_handler; 5699 5700 unlock_user_struct(frame, frame_addr, 1); 5701 return; 5702 5703 give_sigsegv: 5704 force_sigsegv(sig); 5705 } 5706 5707 static inline void target_rt_save_fpu_state(struct target_ucontext *uc, 5708 CPUM68KState *env) 5709 { 5710 int i; 5711 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs; 5712 5713 __put_user(env->fpcr, &fpregs->f_fpcntl[0]); 5714 __put_user(env->fpsr, &fpregs->f_fpcntl[1]); 5715 /* fpiar is not emulated */ 5716 5717 for (i = 0; i < 8; i++) { 5718 uint32_t high = env->fregs[i].d.high << 16; 5719 __put_user(high, &fpregs->f_fpregs[i * 3]); 5720 __put_user(env->fregs[i].d.low, 5721 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]); 5722 } 5723 } 5724 5725 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5726 CPUM68KState *env) 5727 { 5728 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5729 uint32_t sr = cpu_m68k_get_ccr(env); 5730 5731 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5732 __put_user(env->dregs[0], &gregs[0]); 5733 __put_user(env->dregs[1], &gregs[1]); 5734 __put_user(env->dregs[2], &gregs[2]); 5735 __put_user(env->dregs[3], &gregs[3]); 5736 __put_user(env->dregs[4], &gregs[4]); 5737 __put_user(env->dregs[5], &gregs[5]); 5738 __put_user(env->dregs[6], &gregs[6]); 5739 __put_user(env->dregs[7], &gregs[7]); 5740 __put_user(env->aregs[0], &gregs[8]); 5741 __put_user(env->aregs[1], &gregs[9]); 5742 __put_user(env->aregs[2], &gregs[10]); 5743 __put_user(env->aregs[3], &gregs[11]); 5744 __put_user(env->aregs[4], &gregs[12]); 5745 __put_user(env->aregs[5], &gregs[13]); 5746 __put_user(env->aregs[6], &gregs[14]); 5747 __put_user(env->aregs[7], &gregs[15]); 5748 __put_user(env->pc, &gregs[16]); 5749 __put_user(sr, &gregs[17]); 5750 5751 target_rt_save_fpu_state(uc, env); 5752 5753 return 0; 5754 } 5755 5756 static inline void target_rt_restore_fpu_state(CPUM68KState *env, 5757 struct target_ucontext *uc) 5758 { 5759 int i; 5760 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs; 5761 uint32_t fpcr; 5762 5763 __get_user(fpcr, &fpregs->f_fpcntl[0]); 5764 cpu_m68k_set_fpcr(env, fpcr); 5765 __get_user(env->fpsr, &fpregs->f_fpcntl[1]); 5766 /* fpiar is not emulated */ 5767 5768 for (i = 0; i < 8; i++) { 5769 uint32_t high; 5770 __get_user(high, &fpregs->f_fpregs[i * 3]); 5771 env->fregs[i].d.high = high >> 16; 5772 __get_user(env->fregs[i].d.low, 5773 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]); 5774 } 5775 } 5776 5777 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5778 struct target_ucontext *uc) 5779 { 5780 int temp; 5781 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5782 5783 __get_user(temp, &uc->tuc_mcontext.version); 5784 if (temp != TARGET_MCONTEXT_VERSION) 5785 goto badframe; 5786 5787 /* restore passed registers */ 5788 __get_user(env->dregs[0], &gregs[0]); 5789 __get_user(env->dregs[1], &gregs[1]); 5790 __get_user(env->dregs[2], &gregs[2]); 5791 __get_user(env->dregs[3], &gregs[3]); 5792 __get_user(env->dregs[4], &gregs[4]); 5793 __get_user(env->dregs[5], &gregs[5]); 5794 __get_user(env->dregs[6], &gregs[6]); 5795 __get_user(env->dregs[7], &gregs[7]); 5796 __get_user(env->aregs[0], &gregs[8]); 5797 __get_user(env->aregs[1], &gregs[9]); 5798 __get_user(env->aregs[2], &gregs[10]); 5799 __get_user(env->aregs[3], &gregs[11]); 5800 __get_user(env->aregs[4], &gregs[12]); 5801 __get_user(env->aregs[5], &gregs[13]); 5802 __get_user(env->aregs[6], &gregs[14]); 5803 __get_user(env->aregs[7], &gregs[15]); 5804 __get_user(env->pc, &gregs[16]); 5805 __get_user(temp, &gregs[17]); 5806 cpu_m68k_set_ccr(env, temp); 5807 5808 target_rt_restore_fpu_state(env, uc); 5809 5810 return 0; 5811 5812 badframe: 5813 return 1; 5814 } 5815 5816 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5817 target_siginfo_t *info, 5818 target_sigset_t *set, CPUM68KState *env) 5819 { 5820 struct target_rt_sigframe *frame; 5821 abi_ulong frame_addr; 5822 abi_ulong retcode_addr; 5823 abi_ulong info_addr; 5824 abi_ulong uc_addr; 5825 int err = 0; 5826 int i; 5827 5828 frame_addr = get_sigframe(ka, env, sizeof *frame); 5829 trace_user_setup_rt_frame(env, frame_addr); 5830 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5831 goto give_sigsegv; 5832 } 5833 5834 __put_user(sig, &frame->sig); 5835 5836 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5837 __put_user(info_addr, &frame->pinfo); 5838 5839 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5840 __put_user(uc_addr, &frame->puc); 5841 5842 tswap_siginfo(&frame->info, info); 5843 5844 /* Create the ucontext */ 5845 5846 __put_user(0, &frame->uc.tuc_flags); 5847 __put_user(0, &frame->uc.tuc_link); 5848 __put_user(target_sigaltstack_used.ss_sp, 5849 &frame->uc.tuc_stack.ss_sp); 5850 __put_user(sas_ss_flags(env->aregs[7]), 5851 &frame->uc.tuc_stack.ss_flags); 5852 __put_user(target_sigaltstack_used.ss_size, 5853 &frame->uc.tuc_stack.ss_size); 5854 err |= target_rt_setup_ucontext(&frame->uc, env); 5855 5856 if (err) 5857 goto give_sigsegv; 5858 5859 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5860 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5861 } 5862 5863 /* Set up to return from userspace. */ 5864 5865 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5866 __put_user(retcode_addr, &frame->pretcode); 5867 5868 /* moveq #,d0; notb d0; trap #0 */ 5869 5870 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5871 (uint32_t *)(frame->retcode + 0)); 5872 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5873 5874 if (err) 5875 goto give_sigsegv; 5876 5877 /* Set up to return from userspace */ 5878 5879 env->aregs[7] = frame_addr; 5880 env->pc = ka->_sa_handler; 5881 5882 unlock_user_struct(frame, frame_addr, 1); 5883 return; 5884 5885 give_sigsegv: 5886 unlock_user_struct(frame, frame_addr, 1); 5887 force_sigsegv(sig); 5888 } 5889 5890 long do_sigreturn(CPUM68KState *env) 5891 { 5892 struct target_sigframe *frame; 5893 abi_ulong frame_addr = env->aregs[7] - 4; 5894 target_sigset_t target_set; 5895 sigset_t set; 5896 int i; 5897 5898 trace_user_do_sigreturn(env, frame_addr); 5899 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5900 goto badframe; 5901 5902 /* set blocked signals */ 5903 5904 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5905 5906 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5907 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5908 } 5909 5910 target_to_host_sigset_internal(&set, &target_set); 5911 set_sigmask(&set); 5912 5913 /* restore registers */ 5914 5915 restore_sigcontext(env, &frame->sc); 5916 5917 unlock_user_struct(frame, frame_addr, 0); 5918 return -TARGET_QEMU_ESIGRETURN; 5919 5920 badframe: 5921 force_sig(TARGET_SIGSEGV); 5922 return -TARGET_QEMU_ESIGRETURN; 5923 } 5924 5925 long do_rt_sigreturn(CPUM68KState *env) 5926 { 5927 struct target_rt_sigframe *frame; 5928 abi_ulong frame_addr = env->aregs[7] - 4; 5929 sigset_t set; 5930 5931 trace_user_do_rt_sigreturn(env, frame_addr); 5932 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5933 goto badframe; 5934 5935 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5936 set_sigmask(&set); 5937 5938 /* restore registers */ 5939 5940 if (target_rt_restore_ucontext(env, &frame->uc)) 5941 goto badframe; 5942 5943 if (do_sigaltstack(frame_addr + 5944 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5945 0, get_sp_from_cpustate(env)) == -EFAULT) 5946 goto badframe; 5947 5948 unlock_user_struct(frame, frame_addr, 0); 5949 return -TARGET_QEMU_ESIGRETURN; 5950 5951 badframe: 5952 unlock_user_struct(frame, frame_addr, 0); 5953 force_sig(TARGET_SIGSEGV); 5954 return -TARGET_QEMU_ESIGRETURN; 5955 } 5956 5957 #elif defined(TARGET_ALPHA) 5958 5959 struct target_sigcontext { 5960 abi_long sc_onstack; 5961 abi_long sc_mask; 5962 abi_long sc_pc; 5963 abi_long sc_ps; 5964 abi_long sc_regs[32]; 5965 abi_long sc_ownedfp; 5966 abi_long sc_fpregs[32]; 5967 abi_ulong sc_fpcr; 5968 abi_ulong sc_fp_control; 5969 abi_ulong sc_reserved1; 5970 abi_ulong sc_reserved2; 5971 abi_ulong sc_ssize; 5972 abi_ulong sc_sbase; 5973 abi_ulong sc_traparg_a0; 5974 abi_ulong sc_traparg_a1; 5975 abi_ulong sc_traparg_a2; 5976 abi_ulong sc_fp_trap_pc; 5977 abi_ulong sc_fp_trigger_sum; 5978 abi_ulong sc_fp_trigger_inst; 5979 }; 5980 5981 struct target_ucontext { 5982 abi_ulong tuc_flags; 5983 abi_ulong tuc_link; 5984 abi_ulong tuc_osf_sigmask; 5985 target_stack_t tuc_stack; 5986 struct target_sigcontext tuc_mcontext; 5987 target_sigset_t tuc_sigmask; 5988 }; 5989 5990 struct target_sigframe { 5991 struct target_sigcontext sc; 5992 unsigned int retcode[3]; 5993 }; 5994 5995 struct target_rt_sigframe { 5996 target_siginfo_t info; 5997 struct target_ucontext uc; 5998 unsigned int retcode[3]; 5999 }; 6000 6001 #define INSN_MOV_R30_R16 0x47fe0410 6002 #define INSN_LDI_R0 0x201f0000 6003 #define INSN_CALLSYS 0x00000083 6004 6005 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 6006 abi_ulong frame_addr, target_sigset_t *set) 6007 { 6008 int i; 6009 6010 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 6011 __put_user(set->sig[0], &sc->sc_mask); 6012 __put_user(env->pc, &sc->sc_pc); 6013 __put_user(8, &sc->sc_ps); 6014 6015 for (i = 0; i < 31; ++i) { 6016 __put_user(env->ir[i], &sc->sc_regs[i]); 6017 } 6018 __put_user(0, &sc->sc_regs[31]); 6019 6020 for (i = 0; i < 31; ++i) { 6021 __put_user(env->fir[i], &sc->sc_fpregs[i]); 6022 } 6023 __put_user(0, &sc->sc_fpregs[31]); 6024 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 6025 6026 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 6027 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 6028 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 6029 } 6030 6031 static void restore_sigcontext(CPUAlphaState *env, 6032 struct target_sigcontext *sc) 6033 { 6034 uint64_t fpcr; 6035 int i; 6036 6037 __get_user(env->pc, &sc->sc_pc); 6038 6039 for (i = 0; i < 31; ++i) { 6040 __get_user(env->ir[i], &sc->sc_regs[i]); 6041 } 6042 for (i = 0; i < 31; ++i) { 6043 __get_user(env->fir[i], &sc->sc_fpregs[i]); 6044 } 6045 6046 __get_user(fpcr, &sc->sc_fpcr); 6047 cpu_alpha_store_fpcr(env, fpcr); 6048 } 6049 6050 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 6051 CPUAlphaState *env, 6052 unsigned long framesize) 6053 { 6054 abi_ulong sp = env->ir[IR_SP]; 6055 6056 /* This is the X/Open sanctioned signal stack switching. */ 6057 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 6058 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 6059 } 6060 return (sp - framesize) & -32; 6061 } 6062 6063 static void setup_frame(int sig, struct target_sigaction *ka, 6064 target_sigset_t *set, CPUAlphaState *env) 6065 { 6066 abi_ulong frame_addr, r26; 6067 struct target_sigframe *frame; 6068 int err = 0; 6069 6070 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 6071 trace_user_setup_frame(env, frame_addr); 6072 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6073 goto give_sigsegv; 6074 } 6075 6076 setup_sigcontext(&frame->sc, env, frame_addr, set); 6077 6078 if (ka->sa_restorer) { 6079 r26 = ka->sa_restorer; 6080 } else { 6081 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 6082 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 6083 &frame->retcode[1]); 6084 __put_user(INSN_CALLSYS, &frame->retcode[2]); 6085 /* imb() */ 6086 r26 = frame_addr; 6087 } 6088 6089 unlock_user_struct(frame, frame_addr, 1); 6090 6091 if (err) { 6092 give_sigsegv: 6093 force_sigsegv(sig); 6094 return; 6095 } 6096 6097 env->ir[IR_RA] = r26; 6098 env->ir[IR_PV] = env->pc = ka->_sa_handler; 6099 env->ir[IR_A0] = sig; 6100 env->ir[IR_A1] = 0; 6101 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 6102 env->ir[IR_SP] = frame_addr; 6103 } 6104 6105 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6106 target_siginfo_t *info, 6107 target_sigset_t *set, CPUAlphaState *env) 6108 { 6109 abi_ulong frame_addr, r26; 6110 struct target_rt_sigframe *frame; 6111 int i, err = 0; 6112 6113 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 6114 trace_user_setup_rt_frame(env, frame_addr); 6115 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6116 goto give_sigsegv; 6117 } 6118 6119 tswap_siginfo(&frame->info, info); 6120 6121 __put_user(0, &frame->uc.tuc_flags); 6122 __put_user(0, &frame->uc.tuc_link); 6123 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 6124 __put_user(target_sigaltstack_used.ss_sp, 6125 &frame->uc.tuc_stack.ss_sp); 6126 __put_user(sas_ss_flags(env->ir[IR_SP]), 6127 &frame->uc.tuc_stack.ss_flags); 6128 __put_user(target_sigaltstack_used.ss_size, 6129 &frame->uc.tuc_stack.ss_size); 6130 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 6131 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 6132 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 6133 } 6134 6135 if (ka->sa_restorer) { 6136 r26 = ka->sa_restorer; 6137 } else { 6138 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 6139 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 6140 &frame->retcode[1]); 6141 __put_user(INSN_CALLSYS, &frame->retcode[2]); 6142 /* imb(); */ 6143 r26 = frame_addr; 6144 } 6145 6146 if (err) { 6147 give_sigsegv: 6148 force_sigsegv(sig); 6149 return; 6150 } 6151 6152 env->ir[IR_RA] = r26; 6153 env->ir[IR_PV] = env->pc = ka->_sa_handler; 6154 env->ir[IR_A0] = sig; 6155 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 6156 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 6157 env->ir[IR_SP] = frame_addr; 6158 } 6159 6160 long do_sigreturn(CPUAlphaState *env) 6161 { 6162 struct target_sigcontext *sc; 6163 abi_ulong sc_addr = env->ir[IR_A0]; 6164 target_sigset_t target_set; 6165 sigset_t set; 6166 6167 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 6168 goto badframe; 6169 } 6170 6171 target_sigemptyset(&target_set); 6172 __get_user(target_set.sig[0], &sc->sc_mask); 6173 6174 target_to_host_sigset_internal(&set, &target_set); 6175 set_sigmask(&set); 6176 6177 restore_sigcontext(env, sc); 6178 unlock_user_struct(sc, sc_addr, 0); 6179 return -TARGET_QEMU_ESIGRETURN; 6180 6181 badframe: 6182 force_sig(TARGET_SIGSEGV); 6183 return -TARGET_QEMU_ESIGRETURN; 6184 } 6185 6186 long do_rt_sigreturn(CPUAlphaState *env) 6187 { 6188 abi_ulong frame_addr = env->ir[IR_A0]; 6189 struct target_rt_sigframe *frame; 6190 sigset_t set; 6191 6192 trace_user_do_rt_sigreturn(env, frame_addr); 6193 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 6194 goto badframe; 6195 } 6196 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 6197 set_sigmask(&set); 6198 6199 restore_sigcontext(env, &frame->uc.tuc_mcontext); 6200 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 6201 uc.tuc_stack), 6202 0, env->ir[IR_SP]) == -EFAULT) { 6203 goto badframe; 6204 } 6205 6206 unlock_user_struct(frame, frame_addr, 0); 6207 return -TARGET_QEMU_ESIGRETURN; 6208 6209 6210 badframe: 6211 unlock_user_struct(frame, frame_addr, 0); 6212 force_sig(TARGET_SIGSEGV); 6213 return -TARGET_QEMU_ESIGRETURN; 6214 } 6215 6216 #elif defined(TARGET_TILEGX) 6217 6218 struct target_sigcontext { 6219 union { 6220 /* General-purpose registers. */ 6221 abi_ulong gregs[56]; 6222 struct { 6223 abi_ulong __gregs[53]; 6224 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 6225 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 6226 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 6227 }; 6228 }; 6229 abi_ulong pc; /* Program counter. */ 6230 abi_ulong ics; /* In Interrupt Critical Section? */ 6231 abi_ulong faultnum; /* Fault number. */ 6232 abi_ulong pad[5]; 6233 }; 6234 6235 struct target_ucontext { 6236 abi_ulong tuc_flags; 6237 abi_ulong tuc_link; 6238 target_stack_t tuc_stack; 6239 struct target_sigcontext tuc_mcontext; 6240 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 6241 }; 6242 6243 struct target_rt_sigframe { 6244 unsigned char save_area[16]; /* caller save area */ 6245 struct target_siginfo info; 6246 struct target_ucontext uc; 6247 abi_ulong retcode[2]; 6248 }; 6249 6250 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 6251 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 6252 6253 6254 static void setup_sigcontext(struct target_sigcontext *sc, 6255 CPUArchState *env, int signo) 6256 { 6257 int i; 6258 6259 for (i = 0; i < TILEGX_R_COUNT; ++i) { 6260 __put_user(env->regs[i], &sc->gregs[i]); 6261 } 6262 6263 __put_user(env->pc, &sc->pc); 6264 __put_user(0, &sc->ics); 6265 __put_user(signo, &sc->faultnum); 6266 } 6267 6268 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 6269 { 6270 int i; 6271 6272 for (i = 0; i < TILEGX_R_COUNT; ++i) { 6273 __get_user(env->regs[i], &sc->gregs[i]); 6274 } 6275 6276 __get_user(env->pc, &sc->pc); 6277 } 6278 6279 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 6280 size_t frame_size) 6281 { 6282 unsigned long sp = env->regs[TILEGX_R_SP]; 6283 6284 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 6285 return -1UL; 6286 } 6287 6288 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 6289 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 6290 } 6291 6292 sp -= frame_size; 6293 sp &= -16UL; 6294 return sp; 6295 } 6296 6297 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6298 target_siginfo_t *info, 6299 target_sigset_t *set, CPUArchState *env) 6300 { 6301 abi_ulong frame_addr; 6302 struct target_rt_sigframe *frame; 6303 unsigned long restorer; 6304 6305 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 6306 trace_user_setup_rt_frame(env, frame_addr); 6307 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6308 goto give_sigsegv; 6309 } 6310 6311 /* Always write at least the signal number for the stack backtracer. */ 6312 if (ka->sa_flags & TARGET_SA_SIGINFO) { 6313 /* At sigreturn time, restore the callee-save registers too. */ 6314 tswap_siginfo(&frame->info, info); 6315 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 6316 } else { 6317 __put_user(info->si_signo, &frame->info.si_signo); 6318 } 6319 6320 /* Create the ucontext. */ 6321 __put_user(0, &frame->uc.tuc_flags); 6322 __put_user(0, &frame->uc.tuc_link); 6323 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 6324 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 6325 &frame->uc.tuc_stack.ss_flags); 6326 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 6327 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 6328 6329 if (ka->sa_flags & TARGET_SA_RESTORER) { 6330 restorer = (unsigned long) ka->sa_restorer; 6331 } else { 6332 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 6333 __put_user(INSN_SWINT1, &frame->retcode[1]); 6334 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 6335 } 6336 env->pc = (unsigned long) ka->_sa_handler; 6337 env->regs[TILEGX_R_SP] = (unsigned long) frame; 6338 env->regs[TILEGX_R_LR] = restorer; 6339 env->regs[0] = (unsigned long) sig; 6340 env->regs[1] = (unsigned long) &frame->info; 6341 env->regs[2] = (unsigned long) &frame->uc; 6342 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 6343 6344 unlock_user_struct(frame, frame_addr, 1); 6345 return; 6346 6347 give_sigsegv: 6348 force_sigsegv(sig); 6349 } 6350 6351 long do_rt_sigreturn(CPUTLGState *env) 6352 { 6353 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 6354 struct target_rt_sigframe *frame; 6355 sigset_t set; 6356 6357 trace_user_do_rt_sigreturn(env, frame_addr); 6358 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 6359 goto badframe; 6360 } 6361 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 6362 set_sigmask(&set); 6363 6364 restore_sigcontext(env, &frame->uc.tuc_mcontext); 6365 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 6366 uc.tuc_stack), 6367 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 6368 goto badframe; 6369 } 6370 6371 unlock_user_struct(frame, frame_addr, 0); 6372 return -TARGET_QEMU_ESIGRETURN; 6373 6374 6375 badframe: 6376 unlock_user_struct(frame, frame_addr, 0); 6377 force_sig(TARGET_SIGSEGV); 6378 return -TARGET_QEMU_ESIGRETURN; 6379 } 6380 6381 #elif defined(TARGET_HPPA) 6382 6383 struct target_sigcontext { 6384 abi_ulong sc_flags; 6385 abi_ulong sc_gr[32]; 6386 uint64_t sc_fr[32]; 6387 abi_ulong sc_iasq[2]; 6388 abi_ulong sc_iaoq[2]; 6389 abi_ulong sc_sar; 6390 }; 6391 6392 struct target_ucontext { 6393 abi_uint tuc_flags; 6394 abi_ulong tuc_link; 6395 target_stack_t tuc_stack; 6396 abi_uint pad[1]; 6397 struct target_sigcontext tuc_mcontext; 6398 target_sigset_t tuc_sigmask; 6399 }; 6400 6401 struct target_rt_sigframe { 6402 abi_uint tramp[9]; 6403 target_siginfo_t info; 6404 struct target_ucontext uc; 6405 /* hidden location of upper halves of pa2.0 64-bit gregs */ 6406 }; 6407 6408 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env) 6409 { 6410 int flags = 0; 6411 int i; 6412 6413 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */ 6414 6415 if (env->iaoq_f < TARGET_PAGE_SIZE) { 6416 /* In the gateway page, executing a syscall. */ 6417 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */ 6418 __put_user(env->gr[31], &sc->sc_iaoq[0]); 6419 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]); 6420 } else { 6421 __put_user(env->iaoq_f, &sc->sc_iaoq[0]); 6422 __put_user(env->iaoq_b, &sc->sc_iaoq[1]); 6423 } 6424 __put_user(0, &sc->sc_iasq[0]); 6425 __put_user(0, &sc->sc_iasq[1]); 6426 __put_user(flags, &sc->sc_flags); 6427 6428 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]); 6429 for (i = 1; i < 32; ++i) { 6430 __put_user(env->gr[i], &sc->sc_gr[i]); 6431 } 6432 6433 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]); 6434 for (i = 1; i < 32; ++i) { 6435 __put_user(env->fr[i], &sc->sc_fr[i]); 6436 } 6437 6438 __put_user(env->sar, &sc->sc_sar); 6439 } 6440 6441 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc) 6442 { 6443 target_ulong psw; 6444 int i; 6445 6446 __get_user(psw, &sc->sc_gr[0]); 6447 cpu_hppa_put_psw(env, psw); 6448 6449 for (i = 1; i < 32; ++i) { 6450 __get_user(env->gr[i], &sc->sc_gr[i]); 6451 } 6452 for (i = 0; i < 32; ++i) { 6453 __get_user(env->fr[i], &sc->sc_fr[i]); 6454 } 6455 cpu_hppa_loaded_fr0(env); 6456 6457 __get_user(env->iaoq_f, &sc->sc_iaoq[0]); 6458 __get_user(env->iaoq_b, &sc->sc_iaoq[1]); 6459 __get_user(env->sar, &sc->sc_sar); 6460 } 6461 6462 /* No, this doesn't look right, but it's copied straight from the kernel. */ 6463 #define PARISC_RT_SIGFRAME_SIZE32 \ 6464 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64) 6465 6466 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6467 target_siginfo_t *info, 6468 target_sigset_t *set, CPUArchState *env) 6469 { 6470 abi_ulong frame_addr, sp, haddr; 6471 struct target_rt_sigframe *frame; 6472 int i; 6473 6474 sp = env->gr[30]; 6475 if (ka->sa_flags & TARGET_SA_ONSTACK) { 6476 if (sas_ss_flags(sp) == 0) { 6477 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f; 6478 } 6479 } 6480 frame_addr = QEMU_ALIGN_UP(sp, 64); 6481 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32; 6482 6483 trace_user_setup_rt_frame(env, frame_addr); 6484 6485 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6486 goto give_sigsegv; 6487 } 6488 6489 tswap_siginfo(&frame->info, info); 6490 frame->uc.tuc_flags = 0; 6491 frame->uc.tuc_link = 0; 6492 6493 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 6494 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 6495 &frame->uc.tuc_stack.ss_flags); 6496 __put_user(target_sigaltstack_used.ss_size, 6497 &frame->uc.tuc_stack.ss_size); 6498 6499 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 6500 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 6501 } 6502 6503 setup_sigcontext(&frame->uc.tuc_mcontext, env); 6504 6505 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */ 6506 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */ 6507 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */ 6508 __put_user(0x08000240, frame->tramp + 3); /* nop */ 6509 6510 unlock_user_struct(frame, frame_addr, 1); 6511 6512 env->gr[2] = h2g(frame->tramp); 6513 env->gr[30] = sp; 6514 env->gr[26] = sig; 6515 env->gr[25] = h2g(&frame->info); 6516 env->gr[24] = h2g(&frame->uc); 6517 6518 haddr = ka->_sa_handler; 6519 if (haddr & 2) { 6520 /* Function descriptor. */ 6521 target_ulong *fdesc, dest; 6522 6523 haddr &= -4; 6524 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) { 6525 goto give_sigsegv; 6526 } 6527 __get_user(dest, fdesc); 6528 __get_user(env->gr[19], fdesc + 1); 6529 unlock_user_struct(fdesc, haddr, 1); 6530 haddr = dest; 6531 } 6532 env->iaoq_f = haddr; 6533 env->iaoq_b = haddr + 4; 6534 return; 6535 6536 give_sigsegv: 6537 force_sigsegv(sig); 6538 } 6539 6540 long do_rt_sigreturn(CPUArchState *env) 6541 { 6542 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32; 6543 struct target_rt_sigframe *frame; 6544 sigset_t set; 6545 6546 trace_user_do_rt_sigreturn(env, frame_addr); 6547 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 6548 goto badframe; 6549 } 6550 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 6551 set_sigmask(&set); 6552 6553 restore_sigcontext(env, &frame->uc.tuc_mcontext); 6554 unlock_user_struct(frame, frame_addr, 0); 6555 6556 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 6557 uc.tuc_stack), 6558 0, env->gr[30]) == -EFAULT) { 6559 goto badframe; 6560 } 6561 6562 unlock_user_struct(frame, frame_addr, 0); 6563 return -TARGET_QEMU_ESIGRETURN; 6564 6565 badframe: 6566 force_sig(TARGET_SIGSEGV); 6567 return -TARGET_QEMU_ESIGRETURN; 6568 } 6569 6570 #else 6571 6572 static void setup_frame(int sig, struct target_sigaction *ka, 6573 target_sigset_t *set, CPUArchState *env) 6574 { 6575 fprintf(stderr, "setup_frame: not implemented\n"); 6576 } 6577 6578 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6579 target_siginfo_t *info, 6580 target_sigset_t *set, CPUArchState *env) 6581 { 6582 fprintf(stderr, "setup_rt_frame: not implemented\n"); 6583 } 6584 6585 long do_sigreturn(CPUArchState *env) 6586 { 6587 fprintf(stderr, "do_sigreturn: not implemented\n"); 6588 return -TARGET_ENOSYS; 6589 } 6590 6591 long do_rt_sigreturn(CPUArchState *env) 6592 { 6593 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 6594 return -TARGET_ENOSYS; 6595 } 6596 6597 #endif 6598 6599 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 6600 struct emulated_sigtable *k) 6601 { 6602 CPUState *cpu = ENV_GET_CPU(cpu_env); 6603 abi_ulong handler; 6604 sigset_t set; 6605 target_sigset_t target_old_set; 6606 struct target_sigaction *sa; 6607 TaskState *ts = cpu->opaque; 6608 6609 trace_user_handle_signal(cpu_env, sig); 6610 /* dequeue signal */ 6611 k->pending = 0; 6612 6613 sig = gdb_handlesig(cpu, sig); 6614 if (!sig) { 6615 sa = NULL; 6616 handler = TARGET_SIG_IGN; 6617 } else { 6618 sa = &sigact_table[sig - 1]; 6619 handler = sa->_sa_handler; 6620 } 6621 6622 if (do_strace) { 6623 print_taken_signal(sig, &k->info); 6624 } 6625 6626 if (handler == TARGET_SIG_DFL) { 6627 /* default handler : ignore some signal. The other are job control or fatal */ 6628 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 6629 kill(getpid(),SIGSTOP); 6630 } else if (sig != TARGET_SIGCHLD && 6631 sig != TARGET_SIGURG && 6632 sig != TARGET_SIGWINCH && 6633 sig != TARGET_SIGCONT) { 6634 dump_core_and_abort(sig); 6635 } 6636 } else if (handler == TARGET_SIG_IGN) { 6637 /* ignore sig */ 6638 } else if (handler == TARGET_SIG_ERR) { 6639 dump_core_and_abort(sig); 6640 } else { 6641 /* compute the blocked signals during the handler execution */ 6642 sigset_t *blocked_set; 6643 6644 target_to_host_sigset(&set, &sa->sa_mask); 6645 /* SA_NODEFER indicates that the current signal should not be 6646 blocked during the handler */ 6647 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 6648 sigaddset(&set, target_to_host_signal(sig)); 6649 6650 /* save the previous blocked signal state to restore it at the 6651 end of the signal execution (see do_sigreturn) */ 6652 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 6653 6654 /* block signals in the handler */ 6655 blocked_set = ts->in_sigsuspend ? 6656 &ts->sigsuspend_mask : &ts->signal_mask; 6657 sigorset(&ts->signal_mask, blocked_set, &set); 6658 ts->in_sigsuspend = 0; 6659 6660 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 6661 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 6662 { 6663 CPUX86State *env = cpu_env; 6664 if (env->eflags & VM_MASK) 6665 save_v86_state(env); 6666 } 6667 #endif 6668 /* prepare the stack frame of the virtual CPU */ 6669 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 6670 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \ 6671 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \ 6672 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) 6673 /* These targets do not have traditional signals. */ 6674 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 6675 #else 6676 if (sa->sa_flags & TARGET_SA_SIGINFO) 6677 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 6678 else 6679 setup_frame(sig, sa, &target_old_set, cpu_env); 6680 #endif 6681 if (sa->sa_flags & TARGET_SA_RESETHAND) { 6682 sa->_sa_handler = TARGET_SIG_DFL; 6683 } 6684 } 6685 } 6686 6687 void process_pending_signals(CPUArchState *cpu_env) 6688 { 6689 CPUState *cpu = ENV_GET_CPU(cpu_env); 6690 int sig; 6691 TaskState *ts = cpu->opaque; 6692 sigset_t set; 6693 sigset_t *blocked_set; 6694 6695 while (atomic_read(&ts->signal_pending)) { 6696 /* FIXME: This is not threadsafe. */ 6697 sigfillset(&set); 6698 sigprocmask(SIG_SETMASK, &set, 0); 6699 6700 restart_scan: 6701 sig = ts->sync_signal.pending; 6702 if (sig) { 6703 /* Synchronous signals are forced, 6704 * see force_sig_info() and callers in Linux 6705 * Note that not all of our queue_signal() calls in QEMU correspond 6706 * to force_sig_info() calls in Linux (some are send_sig_info()). 6707 * However it seems like a kernel bug to me to allow the process 6708 * to block a synchronous signal since it could then just end up 6709 * looping round and round indefinitely. 6710 */ 6711 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 6712 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 6713 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 6714 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 6715 } 6716 6717 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 6718 } 6719 6720 for (sig = 1; sig <= TARGET_NSIG; sig++) { 6721 blocked_set = ts->in_sigsuspend ? 6722 &ts->sigsuspend_mask : &ts->signal_mask; 6723 6724 if (ts->sigtab[sig - 1].pending && 6725 (!sigismember(blocked_set, 6726 target_to_host_signal_table[sig]))) { 6727 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 6728 /* Restart scan from the beginning, as handle_pending_signal 6729 * might have resulted in a new synchronous signal (eg SIGSEGV). 6730 */ 6731 goto restart_scan; 6732 } 6733 } 6734 6735 /* if no signal is pending, unblock signals and recheck (the act 6736 * of unblocking might cause us to take another host signal which 6737 * will set signal_pending again). 6738 */ 6739 atomic_set(&ts->signal_pending, 0); 6740 ts->in_sigsuspend = 0; 6741 set = ts->signal_mask; 6742 sigdelset(&set, SIGSEGV); 6743 sigdelset(&set, SIGBUS); 6744 sigprocmask(SIG_SETMASK, &set, 0); 6745 } 6746 ts->in_sigsuspend = 0; 6747 } 6748