1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ 257 !defined(TARGET_X86_64) && !defined(TARGET_NIOS2) 258 /* Just set the guest's signal mask to the specified value; the 259 * caller is assumed to have called block_signals() already. 260 */ 261 static void set_sigmask(const sigset_t *set) 262 { 263 TaskState *ts = (TaskState *)thread_cpu->opaque; 264 265 ts->signal_mask = *set; 266 } 267 #endif 268 269 /* siginfo conversion */ 270 271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 272 const siginfo_t *info) 273 { 274 int sig = host_to_target_signal(info->si_signo); 275 int si_code = info->si_code; 276 int si_type; 277 tinfo->si_signo = sig; 278 tinfo->si_errno = 0; 279 tinfo->si_code = info->si_code; 280 281 /* This memset serves two purposes: 282 * (1) ensure we don't leak random junk to the guest later 283 * (2) placate false positives from gcc about fields 284 * being used uninitialized if it chooses to inline both this 285 * function and tswap_siginfo() into host_to_target_siginfo(). 286 */ 287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 288 289 /* This is awkward, because we have to use a combination of 290 * the si_code and si_signo to figure out which of the union's 291 * members are valid. (Within the host kernel it is always possible 292 * to tell, but the kernel carefully avoids giving userspace the 293 * high 16 bits of si_code, so we don't have the information to 294 * do this the easy way...) We therefore make our best guess, 295 * bearing in mind that a guest can spoof most of the si_codes 296 * via rt_sigqueueinfo() if it likes. 297 * 298 * Once we have made our guess, we record it in the top 16 bits of 299 * the si_code, so that tswap_siginfo() later can use it. 300 * tswap_siginfo() will strip these top bits out before writing 301 * si_code to the guest (sign-extending the lower bits). 302 */ 303 304 switch (si_code) { 305 case SI_USER: 306 case SI_TKILL: 307 case SI_KERNEL: 308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 309 * These are the only unspoofable si_code values. 310 */ 311 tinfo->_sifields._kill._pid = info->si_pid; 312 tinfo->_sifields._kill._uid = info->si_uid; 313 si_type = QEMU_SI_KILL; 314 break; 315 default: 316 /* Everything else is spoofable. Make best guess based on signal */ 317 switch (sig) { 318 case TARGET_SIGCHLD: 319 tinfo->_sifields._sigchld._pid = info->si_pid; 320 tinfo->_sifields._sigchld._uid = info->si_uid; 321 tinfo->_sifields._sigchld._status 322 = host_to_target_waitstatus(info->si_status); 323 tinfo->_sifields._sigchld._utime = info->si_utime; 324 tinfo->_sifields._sigchld._stime = info->si_stime; 325 si_type = QEMU_SI_CHLD; 326 break; 327 case TARGET_SIGIO: 328 tinfo->_sifields._sigpoll._band = info->si_band; 329 tinfo->_sifields._sigpoll._fd = info->si_fd; 330 si_type = QEMU_SI_POLL; 331 break; 332 default: 333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 334 tinfo->_sifields._rt._pid = info->si_pid; 335 tinfo->_sifields._rt._uid = info->si_uid; 336 /* XXX: potential problem if 64 bit */ 337 tinfo->_sifields._rt._sigval.sival_ptr 338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 339 si_type = QEMU_SI_RT; 340 break; 341 } 342 break; 343 } 344 345 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 346 } 347 348 static void tswap_siginfo(target_siginfo_t *tinfo, 349 const target_siginfo_t *info) 350 { 351 int si_type = extract32(info->si_code, 16, 16); 352 int si_code = sextract32(info->si_code, 0, 16); 353 354 __put_user(info->si_signo, &tinfo->si_signo); 355 __put_user(info->si_errno, &tinfo->si_errno); 356 __put_user(si_code, &tinfo->si_code); 357 358 /* We can use our internal marker of which fields in the structure 359 * are valid, rather than duplicating the guesswork of 360 * host_to_target_siginfo_noswap() here. 361 */ 362 switch (si_type) { 363 case QEMU_SI_KILL: 364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 366 break; 367 case QEMU_SI_TIMER: 368 __put_user(info->_sifields._timer._timer1, 369 &tinfo->_sifields._timer._timer1); 370 __put_user(info->_sifields._timer._timer2, 371 &tinfo->_sifields._timer._timer2); 372 break; 373 case QEMU_SI_POLL: 374 __put_user(info->_sifields._sigpoll._band, 375 &tinfo->_sifields._sigpoll._band); 376 __put_user(info->_sifields._sigpoll._fd, 377 &tinfo->_sifields._sigpoll._fd); 378 break; 379 case QEMU_SI_FAULT: 380 __put_user(info->_sifields._sigfault._addr, 381 &tinfo->_sifields._sigfault._addr); 382 break; 383 case QEMU_SI_CHLD: 384 __put_user(info->_sifields._sigchld._pid, 385 &tinfo->_sifields._sigchld._pid); 386 __put_user(info->_sifields._sigchld._uid, 387 &tinfo->_sifields._sigchld._uid); 388 __put_user(info->_sifields._sigchld._status, 389 &tinfo->_sifields._sigchld._status); 390 __put_user(info->_sifields._sigchld._utime, 391 &tinfo->_sifields._sigchld._utime); 392 __put_user(info->_sifields._sigchld._stime, 393 &tinfo->_sifields._sigchld._stime); 394 break; 395 case QEMU_SI_RT: 396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 398 __put_user(info->_sifields._rt._sigval.sival_ptr, 399 &tinfo->_sifields._rt._sigval.sival_ptr); 400 break; 401 default: 402 g_assert_not_reached(); 403 } 404 } 405 406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 407 { 408 target_siginfo_t tgt_tmp; 409 host_to_target_siginfo_noswap(&tgt_tmp, info); 410 tswap_siginfo(tinfo, &tgt_tmp); 411 } 412 413 /* XXX: we support only POSIX RT signals are used. */ 414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 416 { 417 /* This conversion is used only for the rt_sigqueueinfo syscall, 418 * and so we know that the _rt fields are the valid ones. 419 */ 420 abi_ulong sival_ptr; 421 422 __get_user(info->si_signo, &tinfo->si_signo); 423 __get_user(info->si_errno, &tinfo->si_errno); 424 __get_user(info->si_code, &tinfo->si_code); 425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 428 info->si_value.sival_ptr = (void *)(long)sival_ptr; 429 } 430 431 static int fatal_signal (int sig) 432 { 433 switch (sig) { 434 case TARGET_SIGCHLD: 435 case TARGET_SIGURG: 436 case TARGET_SIGWINCH: 437 /* Ignored by default. */ 438 return 0; 439 case TARGET_SIGCONT: 440 case TARGET_SIGSTOP: 441 case TARGET_SIGTSTP: 442 case TARGET_SIGTTIN: 443 case TARGET_SIGTTOU: 444 /* Job control signals. */ 445 return 0; 446 default: 447 return 1; 448 } 449 } 450 451 /* returns 1 if given signal should dump core if not handled */ 452 static int core_dump_signal(int sig) 453 { 454 switch (sig) { 455 case TARGET_SIGABRT: 456 case TARGET_SIGFPE: 457 case TARGET_SIGILL: 458 case TARGET_SIGQUIT: 459 case TARGET_SIGSEGV: 460 case TARGET_SIGTRAP: 461 case TARGET_SIGBUS: 462 return (1); 463 default: 464 return (0); 465 } 466 } 467 468 void signal_init(void) 469 { 470 TaskState *ts = (TaskState *)thread_cpu->opaque; 471 struct sigaction act; 472 struct sigaction oact; 473 int i, j; 474 int host_sig; 475 476 /* generate signal conversion tables */ 477 for(i = 1; i < _NSIG; i++) { 478 if (host_to_target_signal_table[i] == 0) 479 host_to_target_signal_table[i] = i; 480 } 481 for(i = 1; i < _NSIG; i++) { 482 j = host_to_target_signal_table[i]; 483 target_to_host_signal_table[j] = i; 484 } 485 486 /* Set the signal mask from the host mask. */ 487 sigprocmask(0, 0, &ts->signal_mask); 488 489 /* set all host signal handlers. ALL signals are blocked during 490 the handlers to serialize them. */ 491 memset(sigact_table, 0, sizeof(sigact_table)); 492 493 sigfillset(&act.sa_mask); 494 act.sa_flags = SA_SIGINFO; 495 act.sa_sigaction = host_signal_handler; 496 for(i = 1; i <= TARGET_NSIG; i++) { 497 host_sig = target_to_host_signal(i); 498 sigaction(host_sig, NULL, &oact); 499 if (oact.sa_sigaction == (void *)SIG_IGN) { 500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 501 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 503 } 504 /* If there's already a handler installed then something has 505 gone horribly wrong, so don't even try to handle that case. */ 506 /* Install some handlers for our own use. We need at least 507 SIGSEGV and SIGBUS, to detect exceptions. We can not just 508 trap all signals because it affects syscall interrupt 509 behavior. But do trap all default-fatal signals. */ 510 if (fatal_signal (i)) 511 sigaction(host_sig, &act, NULL); 512 } 513 } 514 515 #if !(defined(TARGET_X86_64) || defined(TARGET_UNICORE32)) 516 /* Force a synchronously taken signal. The kernel force_sig() function 517 * also forces the signal to "not blocked, not ignored", but for QEMU 518 * that work is done in process_pending_signals(). 519 */ 520 static void force_sig(int sig) 521 { 522 CPUState *cpu = thread_cpu; 523 CPUArchState *env = cpu->env_ptr; 524 target_siginfo_t info; 525 526 info.si_signo = sig; 527 info.si_errno = 0; 528 info.si_code = TARGET_SI_KERNEL; 529 info._sifields._kill._pid = 0; 530 info._sifields._kill._uid = 0; 531 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 532 } 533 534 /* Force a SIGSEGV if we couldn't write to memory trying to set 535 * up the signal frame. oldsig is the signal we were trying to handle 536 * at the point of failure. 537 */ 538 static void force_sigsegv(int oldsig) 539 { 540 if (oldsig == SIGSEGV) { 541 /* Make sure we don't try to deliver the signal again; this will 542 * end up with handle_pending_signal() calling dump_core_and_abort(). 543 */ 544 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL; 545 } 546 force_sig(TARGET_SIGSEGV); 547 } 548 #endif 549 550 /* abort execution with signal */ 551 static void QEMU_NORETURN dump_core_and_abort(int target_sig) 552 { 553 CPUState *cpu = thread_cpu; 554 CPUArchState *env = cpu->env_ptr; 555 TaskState *ts = (TaskState *)cpu->opaque; 556 int host_sig, core_dumped = 0; 557 struct sigaction act; 558 559 host_sig = target_to_host_signal(target_sig); 560 trace_user_force_sig(env, target_sig, host_sig); 561 gdb_signalled(env, target_sig); 562 563 /* dump core if supported by target binary format */ 564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 565 stop_all_tasks(); 566 core_dumped = 567 ((*ts->bprm->core_dump)(target_sig, env) == 0); 568 } 569 if (core_dumped) { 570 /* we already dumped the core of target process, we don't want 571 * a coredump of qemu itself */ 572 struct rlimit nodump; 573 getrlimit(RLIMIT_CORE, &nodump); 574 nodump.rlim_cur=0; 575 setrlimit(RLIMIT_CORE, &nodump); 576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 577 target_sig, strsignal(host_sig), "core dumped" ); 578 } 579 580 /* The proper exit code for dying from an uncaught signal is 581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 582 * a negative value. To get the proper exit code we need to 583 * actually die from an uncaught signal. Here the default signal 584 * handler is installed, we send ourself a signal and we wait for 585 * it to arrive. */ 586 sigfillset(&act.sa_mask); 587 act.sa_handler = SIG_DFL; 588 act.sa_flags = 0; 589 sigaction(host_sig, &act, NULL); 590 591 /* For some reason raise(host_sig) doesn't send the signal when 592 * statically linked on x86-64. */ 593 kill(getpid(), host_sig); 594 595 /* Make sure the signal isn't masked (just reuse the mask inside 596 of act) */ 597 sigdelset(&act.sa_mask, host_sig); 598 sigsuspend(&act.sa_mask); 599 600 /* unreachable */ 601 abort(); 602 } 603 604 /* queue a signal so that it will be send to the virtual CPU as soon 605 as possible */ 606 int queue_signal(CPUArchState *env, int sig, int si_type, 607 target_siginfo_t *info) 608 { 609 CPUState *cpu = ENV_GET_CPU(env); 610 TaskState *ts = cpu->opaque; 611 612 trace_user_queue_signal(env, sig); 613 614 info->si_code = deposit32(info->si_code, 16, 16, si_type); 615 616 ts->sync_signal.info = *info; 617 ts->sync_signal.pending = sig; 618 /* signal that a new signal is pending */ 619 atomic_set(&ts->signal_pending, 1); 620 return 1; /* indicates that the signal was queued */ 621 } 622 623 #ifndef HAVE_SAFE_SYSCALL 624 static inline void rewind_if_in_safe_syscall(void *puc) 625 { 626 /* Default version: never rewind */ 627 } 628 #endif 629 630 static void host_signal_handler(int host_signum, siginfo_t *info, 631 void *puc) 632 { 633 CPUArchState *env = thread_cpu->env_ptr; 634 CPUState *cpu = ENV_GET_CPU(env); 635 TaskState *ts = cpu->opaque; 636 637 int sig; 638 target_siginfo_t tinfo; 639 ucontext_t *uc = puc; 640 struct emulated_sigtable *k; 641 642 /* the CPU emulator uses some host signals to detect exceptions, 643 we forward to it some signals */ 644 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 645 && info->si_code > 0) { 646 if (cpu_signal_handler(host_signum, info, puc)) 647 return; 648 } 649 650 /* get target signal number */ 651 sig = host_to_target_signal(host_signum); 652 if (sig < 1 || sig > TARGET_NSIG) 653 return; 654 trace_user_host_signal(env, host_signum, sig); 655 656 rewind_if_in_safe_syscall(puc); 657 658 host_to_target_siginfo_noswap(&tinfo, info); 659 k = &ts->sigtab[sig - 1]; 660 k->info = tinfo; 661 k->pending = sig; 662 ts->signal_pending = 1; 663 664 /* Block host signals until target signal handler entered. We 665 * can't block SIGSEGV or SIGBUS while we're executing guest 666 * code in case the guest code provokes one in the window between 667 * now and it getting out to the main loop. Signals will be 668 * unblocked again in process_pending_signals(). 669 * 670 * WARNING: we cannot use sigfillset() here because the uc_sigmask 671 * field is a kernel sigset_t, which is much smaller than the 672 * libc sigset_t which sigfillset() operates on. Using sigfillset() 673 * would write 0xff bytes off the end of the structure and trash 674 * data on the struct. 675 * We can't use sizeof(uc->uc_sigmask) either, because the libc 676 * headers define the struct field with the wrong (too large) type. 677 */ 678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 679 sigdelset(&uc->uc_sigmask, SIGSEGV); 680 sigdelset(&uc->uc_sigmask, SIGBUS); 681 682 /* interrupt the virtual CPU as soon as possible */ 683 cpu_exit(thread_cpu); 684 } 685 686 /* do_sigaltstack() returns target values and errnos. */ 687 /* compare linux/kernel/signal.c:do_sigaltstack() */ 688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 689 { 690 int ret; 691 struct target_sigaltstack oss; 692 693 /* XXX: test errors */ 694 if(uoss_addr) 695 { 696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 698 __put_user(sas_ss_flags(sp), &oss.ss_flags); 699 } 700 701 if(uss_addr) 702 { 703 struct target_sigaltstack *uss; 704 struct target_sigaltstack ss; 705 size_t minstacksize = TARGET_MINSIGSTKSZ; 706 707 #if defined(TARGET_PPC64) 708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 710 if (get_ppc64_abi(image) > 1) { 711 minstacksize = 4096; 712 } 713 #endif 714 715 ret = -TARGET_EFAULT; 716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 717 goto out; 718 } 719 __get_user(ss.ss_sp, &uss->ss_sp); 720 __get_user(ss.ss_size, &uss->ss_size); 721 __get_user(ss.ss_flags, &uss->ss_flags); 722 unlock_user_struct(uss, uss_addr, 0); 723 724 ret = -TARGET_EPERM; 725 if (on_sig_stack(sp)) 726 goto out; 727 728 ret = -TARGET_EINVAL; 729 if (ss.ss_flags != TARGET_SS_DISABLE 730 && ss.ss_flags != TARGET_SS_ONSTACK 731 && ss.ss_flags != 0) 732 goto out; 733 734 if (ss.ss_flags == TARGET_SS_DISABLE) { 735 ss.ss_size = 0; 736 ss.ss_sp = 0; 737 } else { 738 ret = -TARGET_ENOMEM; 739 if (ss.ss_size < minstacksize) { 740 goto out; 741 } 742 } 743 744 target_sigaltstack_used.ss_sp = ss.ss_sp; 745 target_sigaltstack_used.ss_size = ss.ss_size; 746 } 747 748 if (uoss_addr) { 749 ret = -TARGET_EFAULT; 750 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 751 goto out; 752 } 753 754 ret = 0; 755 out: 756 return ret; 757 } 758 759 /* do_sigaction() return target values and host errnos */ 760 int do_sigaction(int sig, const struct target_sigaction *act, 761 struct target_sigaction *oact) 762 { 763 struct target_sigaction *k; 764 struct sigaction act1; 765 int host_sig; 766 int ret = 0; 767 768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 769 return -TARGET_EINVAL; 770 } 771 772 if (block_signals()) { 773 return -TARGET_ERESTARTSYS; 774 } 775 776 k = &sigact_table[sig - 1]; 777 if (oact) { 778 __put_user(k->_sa_handler, &oact->_sa_handler); 779 __put_user(k->sa_flags, &oact->sa_flags); 780 #if !defined(TARGET_MIPS) 781 __put_user(k->sa_restorer, &oact->sa_restorer); 782 #endif 783 /* Not swapped. */ 784 oact->sa_mask = k->sa_mask; 785 } 786 if (act) { 787 /* FIXME: This is not threadsafe. */ 788 __get_user(k->_sa_handler, &act->_sa_handler); 789 __get_user(k->sa_flags, &act->sa_flags); 790 #if !defined(TARGET_MIPS) 791 __get_user(k->sa_restorer, &act->sa_restorer); 792 #endif 793 /* To be swapped in target_to_host_sigset. */ 794 k->sa_mask = act->sa_mask; 795 796 /* we update the host linux signal state */ 797 host_sig = target_to_host_signal(sig); 798 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 799 sigfillset(&act1.sa_mask); 800 act1.sa_flags = SA_SIGINFO; 801 if (k->sa_flags & TARGET_SA_RESTART) 802 act1.sa_flags |= SA_RESTART; 803 /* NOTE: it is important to update the host kernel signal 804 ignore state to avoid getting unexpected interrupted 805 syscalls */ 806 if (k->_sa_handler == TARGET_SIG_IGN) { 807 act1.sa_sigaction = (void *)SIG_IGN; 808 } else if (k->_sa_handler == TARGET_SIG_DFL) { 809 if (fatal_signal (sig)) 810 act1.sa_sigaction = host_signal_handler; 811 else 812 act1.sa_sigaction = (void *)SIG_DFL; 813 } else { 814 act1.sa_sigaction = host_signal_handler; 815 } 816 ret = sigaction(host_sig, &act1, NULL); 817 } 818 } 819 return ret; 820 } 821 822 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32 823 824 /* from the Linux kernel */ 825 826 struct target_fpreg { 827 uint16_t significand[4]; 828 uint16_t exponent; 829 }; 830 831 struct target_fpxreg { 832 uint16_t significand[4]; 833 uint16_t exponent; 834 uint16_t padding[3]; 835 }; 836 837 struct target_xmmreg { 838 abi_ulong element[4]; 839 }; 840 841 struct target_fpstate { 842 /* Regular FPU environment */ 843 abi_ulong cw; 844 abi_ulong sw; 845 abi_ulong tag; 846 abi_ulong ipoff; 847 abi_ulong cssel; 848 abi_ulong dataoff; 849 abi_ulong datasel; 850 struct target_fpreg _st[8]; 851 uint16_t status; 852 uint16_t magic; /* 0xffff = regular FPU data only */ 853 854 /* FXSR FPU environment */ 855 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */ 856 abi_ulong mxcsr; 857 abi_ulong reserved; 858 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ 859 struct target_xmmreg _xmm[8]; 860 abi_ulong padding[56]; 861 }; 862 863 #define X86_FXSR_MAGIC 0x0000 864 865 struct target_sigcontext { 866 uint16_t gs, __gsh; 867 uint16_t fs, __fsh; 868 uint16_t es, __esh; 869 uint16_t ds, __dsh; 870 abi_ulong edi; 871 abi_ulong esi; 872 abi_ulong ebp; 873 abi_ulong esp; 874 abi_ulong ebx; 875 abi_ulong edx; 876 abi_ulong ecx; 877 abi_ulong eax; 878 abi_ulong trapno; 879 abi_ulong err; 880 abi_ulong eip; 881 uint16_t cs, __csh; 882 abi_ulong eflags; 883 abi_ulong esp_at_signal; 884 uint16_t ss, __ssh; 885 abi_ulong fpstate; /* pointer */ 886 abi_ulong oldmask; 887 abi_ulong cr2; 888 }; 889 890 struct target_ucontext { 891 abi_ulong tuc_flags; 892 abi_ulong tuc_link; 893 target_stack_t tuc_stack; 894 struct target_sigcontext tuc_mcontext; 895 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 896 }; 897 898 struct sigframe 899 { 900 abi_ulong pretcode; 901 int sig; 902 struct target_sigcontext sc; 903 struct target_fpstate fpstate; 904 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 905 char retcode[8]; 906 }; 907 908 struct rt_sigframe 909 { 910 abi_ulong pretcode; 911 int sig; 912 abi_ulong pinfo; 913 abi_ulong puc; 914 struct target_siginfo info; 915 struct target_ucontext uc; 916 struct target_fpstate fpstate; 917 char retcode[8]; 918 }; 919 920 /* 921 * Set up a signal frame. 922 */ 923 924 /* XXX: save x87 state */ 925 static void setup_sigcontext(struct target_sigcontext *sc, 926 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 927 abi_ulong fpstate_addr) 928 { 929 CPUState *cs = CPU(x86_env_get_cpu(env)); 930 uint16_t magic; 931 932 /* already locked in setup_frame() */ 933 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 934 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 935 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 936 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 937 __put_user(env->regs[R_EDI], &sc->edi); 938 __put_user(env->regs[R_ESI], &sc->esi); 939 __put_user(env->regs[R_EBP], &sc->ebp); 940 __put_user(env->regs[R_ESP], &sc->esp); 941 __put_user(env->regs[R_EBX], &sc->ebx); 942 __put_user(env->regs[R_EDX], &sc->edx); 943 __put_user(env->regs[R_ECX], &sc->ecx); 944 __put_user(env->regs[R_EAX], &sc->eax); 945 __put_user(cs->exception_index, &sc->trapno); 946 __put_user(env->error_code, &sc->err); 947 __put_user(env->eip, &sc->eip); 948 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 949 __put_user(env->eflags, &sc->eflags); 950 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 951 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 952 953 cpu_x86_fsave(env, fpstate_addr, 1); 954 fpstate->status = fpstate->sw; 955 magic = 0xffff; 956 __put_user(magic, &fpstate->magic); 957 __put_user(fpstate_addr, &sc->fpstate); 958 959 /* non-iBCS2 extensions.. */ 960 __put_user(mask, &sc->oldmask); 961 __put_user(env->cr[2], &sc->cr2); 962 } 963 964 /* 965 * Determine which stack to use.. 966 */ 967 968 static inline abi_ulong 969 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 970 { 971 unsigned long esp; 972 973 /* Default to using normal stack */ 974 esp = env->regs[R_ESP]; 975 /* This is the X/Open sanctioned signal stack switching. */ 976 if (ka->sa_flags & TARGET_SA_ONSTACK) { 977 if (sas_ss_flags(esp) == 0) { 978 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 979 } 980 } else { 981 982 /* This is the legacy signal stack switching. */ 983 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 984 !(ka->sa_flags & TARGET_SA_RESTORER) && 985 ka->sa_restorer) { 986 esp = (unsigned long) ka->sa_restorer; 987 } 988 } 989 return (esp - frame_size) & -8ul; 990 } 991 992 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 993 static void setup_frame(int sig, struct target_sigaction *ka, 994 target_sigset_t *set, CPUX86State *env) 995 { 996 abi_ulong frame_addr; 997 struct sigframe *frame; 998 int i; 999 1000 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1001 trace_user_setup_frame(env, frame_addr); 1002 1003 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1004 goto give_sigsegv; 1005 1006 __put_user(sig, &frame->sig); 1007 1008 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 1009 frame_addr + offsetof(struct sigframe, fpstate)); 1010 1011 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1012 __put_user(set->sig[i], &frame->extramask[i - 1]); 1013 } 1014 1015 /* Set up to return from userspace. If provided, use a stub 1016 already in userspace. */ 1017 if (ka->sa_flags & TARGET_SA_RESTORER) { 1018 __put_user(ka->sa_restorer, &frame->pretcode); 1019 } else { 1020 uint16_t val16; 1021 abi_ulong retcode_addr; 1022 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 1023 __put_user(retcode_addr, &frame->pretcode); 1024 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 1025 val16 = 0xb858; 1026 __put_user(val16, (uint16_t *)(frame->retcode+0)); 1027 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 1028 val16 = 0x80cd; 1029 __put_user(val16, (uint16_t *)(frame->retcode+6)); 1030 } 1031 1032 1033 /* Set up registers for signal handler */ 1034 env->regs[R_ESP] = frame_addr; 1035 env->eip = ka->_sa_handler; 1036 1037 cpu_x86_load_seg(env, R_DS, __USER_DS); 1038 cpu_x86_load_seg(env, R_ES, __USER_DS); 1039 cpu_x86_load_seg(env, R_SS, __USER_DS); 1040 cpu_x86_load_seg(env, R_CS, __USER_CS); 1041 env->eflags &= ~TF_MASK; 1042 1043 unlock_user_struct(frame, frame_addr, 1); 1044 1045 return; 1046 1047 give_sigsegv: 1048 force_sigsegv(sig); 1049 } 1050 1051 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */ 1052 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1053 target_siginfo_t *info, 1054 target_sigset_t *set, CPUX86State *env) 1055 { 1056 abi_ulong frame_addr, addr; 1057 struct rt_sigframe *frame; 1058 int i; 1059 1060 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1061 trace_user_setup_rt_frame(env, frame_addr); 1062 1063 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1064 goto give_sigsegv; 1065 1066 __put_user(sig, &frame->sig); 1067 addr = frame_addr + offsetof(struct rt_sigframe, info); 1068 __put_user(addr, &frame->pinfo); 1069 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1070 __put_user(addr, &frame->puc); 1071 tswap_siginfo(&frame->info, info); 1072 1073 /* Create the ucontext. */ 1074 __put_user(0, &frame->uc.tuc_flags); 1075 __put_user(0, &frame->uc.tuc_link); 1076 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1077 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1078 &frame->uc.tuc_stack.ss_flags); 1079 __put_user(target_sigaltstack_used.ss_size, 1080 &frame->uc.tuc_stack.ss_size); 1081 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1082 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1083 1084 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1085 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1086 } 1087 1088 /* Set up to return from userspace. If provided, use a stub 1089 already in userspace. */ 1090 if (ka->sa_flags & TARGET_SA_RESTORER) { 1091 __put_user(ka->sa_restorer, &frame->pretcode); 1092 } else { 1093 uint16_t val16; 1094 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1095 __put_user(addr, &frame->pretcode); 1096 /* This is movl $,%eax ; int $0x80 */ 1097 __put_user(0xb8, (char *)(frame->retcode+0)); 1098 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1099 val16 = 0x80cd; 1100 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1101 } 1102 1103 /* Set up registers for signal handler */ 1104 env->regs[R_ESP] = frame_addr; 1105 env->eip = ka->_sa_handler; 1106 1107 cpu_x86_load_seg(env, R_DS, __USER_DS); 1108 cpu_x86_load_seg(env, R_ES, __USER_DS); 1109 cpu_x86_load_seg(env, R_SS, __USER_DS); 1110 cpu_x86_load_seg(env, R_CS, __USER_CS); 1111 env->eflags &= ~TF_MASK; 1112 1113 unlock_user_struct(frame, frame_addr, 1); 1114 1115 return; 1116 1117 give_sigsegv: 1118 force_sigsegv(sig); 1119 } 1120 1121 static int 1122 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1123 { 1124 unsigned int err = 0; 1125 abi_ulong fpstate_addr; 1126 unsigned int tmpflags; 1127 1128 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1129 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1130 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1131 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1132 1133 env->regs[R_EDI] = tswapl(sc->edi); 1134 env->regs[R_ESI] = tswapl(sc->esi); 1135 env->regs[R_EBP] = tswapl(sc->ebp); 1136 env->regs[R_ESP] = tswapl(sc->esp); 1137 env->regs[R_EBX] = tswapl(sc->ebx); 1138 env->regs[R_EDX] = tswapl(sc->edx); 1139 env->regs[R_ECX] = tswapl(sc->ecx); 1140 env->regs[R_EAX] = tswapl(sc->eax); 1141 env->eip = tswapl(sc->eip); 1142 1143 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1144 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1145 1146 tmpflags = tswapl(sc->eflags); 1147 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1148 // regs->orig_eax = -1; /* disable syscall checks */ 1149 1150 fpstate_addr = tswapl(sc->fpstate); 1151 if (fpstate_addr != 0) { 1152 if (!access_ok(VERIFY_READ, fpstate_addr, 1153 sizeof(struct target_fpstate))) 1154 goto badframe; 1155 cpu_x86_frstor(env, fpstate_addr, 1); 1156 } 1157 1158 return err; 1159 badframe: 1160 return 1; 1161 } 1162 1163 long do_sigreturn(CPUX86State *env) 1164 { 1165 struct sigframe *frame; 1166 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1167 target_sigset_t target_set; 1168 sigset_t set; 1169 int i; 1170 1171 trace_user_do_sigreturn(env, frame_addr); 1172 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1173 goto badframe; 1174 /* set blocked signals */ 1175 __get_user(target_set.sig[0], &frame->sc.oldmask); 1176 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1177 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1178 } 1179 1180 target_to_host_sigset_internal(&set, &target_set); 1181 set_sigmask(&set); 1182 1183 /* restore registers */ 1184 if (restore_sigcontext(env, &frame->sc)) 1185 goto badframe; 1186 unlock_user_struct(frame, frame_addr, 0); 1187 return -TARGET_QEMU_ESIGRETURN; 1188 1189 badframe: 1190 unlock_user_struct(frame, frame_addr, 0); 1191 force_sig(TARGET_SIGSEGV); 1192 return -TARGET_QEMU_ESIGRETURN; 1193 } 1194 1195 long do_rt_sigreturn(CPUX86State *env) 1196 { 1197 abi_ulong frame_addr; 1198 struct rt_sigframe *frame; 1199 sigset_t set; 1200 1201 frame_addr = env->regs[R_ESP] - 4; 1202 trace_user_do_rt_sigreturn(env, frame_addr); 1203 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1204 goto badframe; 1205 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1206 set_sigmask(&set); 1207 1208 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1209 goto badframe; 1210 } 1211 1212 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1213 get_sp_from_cpustate(env)) == -EFAULT) { 1214 goto badframe; 1215 } 1216 1217 unlock_user_struct(frame, frame_addr, 0); 1218 return -TARGET_QEMU_ESIGRETURN; 1219 1220 badframe: 1221 unlock_user_struct(frame, frame_addr, 0); 1222 force_sig(TARGET_SIGSEGV); 1223 return -TARGET_QEMU_ESIGRETURN; 1224 } 1225 1226 #elif defined(TARGET_AARCH64) 1227 1228 struct target_sigcontext { 1229 uint64_t fault_address; 1230 /* AArch64 registers */ 1231 uint64_t regs[31]; 1232 uint64_t sp; 1233 uint64_t pc; 1234 uint64_t pstate; 1235 /* 4K reserved for FP/SIMD state and future expansion */ 1236 char __reserved[4096] __attribute__((__aligned__(16))); 1237 }; 1238 1239 struct target_ucontext { 1240 abi_ulong tuc_flags; 1241 abi_ulong tuc_link; 1242 target_stack_t tuc_stack; 1243 target_sigset_t tuc_sigmask; 1244 /* glibc uses a 1024-bit sigset_t */ 1245 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1246 /* last for future expansion */ 1247 struct target_sigcontext tuc_mcontext; 1248 }; 1249 1250 /* 1251 * Header to be used at the beginning of structures extending the user 1252 * context. Such structures must be placed after the rt_sigframe on the stack 1253 * and be 16-byte aligned. The last structure must be a dummy one with the 1254 * magic and size set to 0. 1255 */ 1256 struct target_aarch64_ctx { 1257 uint32_t magic; 1258 uint32_t size; 1259 }; 1260 1261 #define TARGET_FPSIMD_MAGIC 0x46508001 1262 1263 struct target_fpsimd_context { 1264 struct target_aarch64_ctx head; 1265 uint32_t fpsr; 1266 uint32_t fpcr; 1267 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1268 }; 1269 1270 /* 1271 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to 1272 * user space as it will change with the addition of new context. User space 1273 * should check the magic/size information. 1274 */ 1275 struct target_aux_context { 1276 struct target_fpsimd_context fpsimd; 1277 /* additional context to be added before "end" */ 1278 struct target_aarch64_ctx end; 1279 }; 1280 1281 struct target_rt_sigframe { 1282 struct target_siginfo info; 1283 struct target_ucontext uc; 1284 uint64_t fp; 1285 uint64_t lr; 1286 uint32_t tramp[2]; 1287 }; 1288 1289 static int target_setup_sigframe(struct target_rt_sigframe *sf, 1290 CPUARMState *env, target_sigset_t *set) 1291 { 1292 int i; 1293 struct target_aux_context *aux = 1294 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1295 1296 /* set up the stack frame for unwinding */ 1297 __put_user(env->xregs[29], &sf->fp); 1298 __put_user(env->xregs[30], &sf->lr); 1299 1300 for (i = 0; i < 31; i++) { 1301 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1302 } 1303 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1304 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1305 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1306 1307 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1308 1309 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1310 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1311 } 1312 1313 for (i = 0; i < 32; i++) { 1314 #ifdef TARGET_WORDS_BIGENDIAN 1315 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1316 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1317 #else 1318 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1319 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1320 #endif 1321 } 1322 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr); 1323 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr); 1324 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic); 1325 __put_user(sizeof(struct target_fpsimd_context), 1326 &aux->fpsimd.head.size); 1327 1328 /* set the "end" magic */ 1329 __put_user(0, &aux->end.magic); 1330 __put_user(0, &aux->end.size); 1331 1332 return 0; 1333 } 1334 1335 static int target_restore_sigframe(CPUARMState *env, 1336 struct target_rt_sigframe *sf) 1337 { 1338 sigset_t set; 1339 int i; 1340 struct target_aux_context *aux = 1341 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved; 1342 uint32_t magic, size, fpsr, fpcr; 1343 uint64_t pstate; 1344 1345 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1346 set_sigmask(&set); 1347 1348 for (i = 0; i < 31; i++) { 1349 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1350 } 1351 1352 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1353 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1354 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1355 pstate_write(env, pstate); 1356 1357 __get_user(magic, &aux->fpsimd.head.magic); 1358 __get_user(size, &aux->fpsimd.head.size); 1359 1360 if (magic != TARGET_FPSIMD_MAGIC 1361 || size != sizeof(struct target_fpsimd_context)) { 1362 return 1; 1363 } 1364 1365 for (i = 0; i < 32; i++) { 1366 #ifdef TARGET_WORDS_BIGENDIAN 1367 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]); 1368 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]); 1369 #else 1370 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]); 1371 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]); 1372 #endif 1373 } 1374 __get_user(fpsr, &aux->fpsimd.fpsr); 1375 vfp_set_fpsr(env, fpsr); 1376 __get_user(fpcr, &aux->fpsimd.fpcr); 1377 vfp_set_fpcr(env, fpcr); 1378 1379 return 0; 1380 } 1381 1382 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env) 1383 { 1384 abi_ulong sp; 1385 1386 sp = env->xregs[31]; 1387 1388 /* 1389 * This is the X/Open sanctioned signal stack switching. 1390 */ 1391 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1392 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1393 } 1394 1395 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15; 1396 1397 return sp; 1398 } 1399 1400 static void target_setup_frame(int usig, struct target_sigaction *ka, 1401 target_siginfo_t *info, target_sigset_t *set, 1402 CPUARMState *env) 1403 { 1404 struct target_rt_sigframe *frame; 1405 abi_ulong frame_addr, return_addr; 1406 1407 frame_addr = get_sigframe(ka, env); 1408 trace_user_setup_frame(env, frame_addr); 1409 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1410 goto give_sigsegv; 1411 } 1412 1413 __put_user(0, &frame->uc.tuc_flags); 1414 __put_user(0, &frame->uc.tuc_link); 1415 1416 __put_user(target_sigaltstack_used.ss_sp, 1417 &frame->uc.tuc_stack.ss_sp); 1418 __put_user(sas_ss_flags(env->xregs[31]), 1419 &frame->uc.tuc_stack.ss_flags); 1420 __put_user(target_sigaltstack_used.ss_size, 1421 &frame->uc.tuc_stack.ss_size); 1422 target_setup_sigframe(frame, env, set); 1423 if (ka->sa_flags & TARGET_SA_RESTORER) { 1424 return_addr = ka->sa_restorer; 1425 } else { 1426 /* mov x8,#__NR_rt_sigreturn; svc #0 */ 1427 __put_user(0xd2801168, &frame->tramp[0]); 1428 __put_user(0xd4000001, &frame->tramp[1]); 1429 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); 1430 } 1431 env->xregs[0] = usig; 1432 env->xregs[31] = frame_addr; 1433 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); 1434 env->pc = ka->_sa_handler; 1435 env->xregs[30] = return_addr; 1436 if (info) { 1437 tswap_siginfo(&frame->info, info); 1438 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1439 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1440 } 1441 1442 unlock_user_struct(frame, frame_addr, 1); 1443 return; 1444 1445 give_sigsegv: 1446 unlock_user_struct(frame, frame_addr, 1); 1447 force_sigsegv(usig); 1448 } 1449 1450 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1451 target_siginfo_t *info, target_sigset_t *set, 1452 CPUARMState *env) 1453 { 1454 target_setup_frame(sig, ka, info, set, env); 1455 } 1456 1457 static void setup_frame(int sig, struct target_sigaction *ka, 1458 target_sigset_t *set, CPUARMState *env) 1459 { 1460 target_setup_frame(sig, ka, 0, set, env); 1461 } 1462 1463 long do_rt_sigreturn(CPUARMState *env) 1464 { 1465 struct target_rt_sigframe *frame = NULL; 1466 abi_ulong frame_addr = env->xregs[31]; 1467 1468 trace_user_do_rt_sigreturn(env, frame_addr); 1469 if (frame_addr & 15) { 1470 goto badframe; 1471 } 1472 1473 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1474 goto badframe; 1475 } 1476 1477 if (target_restore_sigframe(env, frame)) { 1478 goto badframe; 1479 } 1480 1481 if (do_sigaltstack(frame_addr + 1482 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1483 0, get_sp_from_cpustate(env)) == -EFAULT) { 1484 goto badframe; 1485 } 1486 1487 unlock_user_struct(frame, frame_addr, 0); 1488 return -TARGET_QEMU_ESIGRETURN; 1489 1490 badframe: 1491 unlock_user_struct(frame, frame_addr, 0); 1492 force_sig(TARGET_SIGSEGV); 1493 return -TARGET_QEMU_ESIGRETURN; 1494 } 1495 1496 long do_sigreturn(CPUARMState *env) 1497 { 1498 return do_rt_sigreturn(env); 1499 } 1500 1501 #elif defined(TARGET_ARM) 1502 1503 struct target_sigcontext { 1504 abi_ulong trap_no; 1505 abi_ulong error_code; 1506 abi_ulong oldmask; 1507 abi_ulong arm_r0; 1508 abi_ulong arm_r1; 1509 abi_ulong arm_r2; 1510 abi_ulong arm_r3; 1511 abi_ulong arm_r4; 1512 abi_ulong arm_r5; 1513 abi_ulong arm_r6; 1514 abi_ulong arm_r7; 1515 abi_ulong arm_r8; 1516 abi_ulong arm_r9; 1517 abi_ulong arm_r10; 1518 abi_ulong arm_fp; 1519 abi_ulong arm_ip; 1520 abi_ulong arm_sp; 1521 abi_ulong arm_lr; 1522 abi_ulong arm_pc; 1523 abi_ulong arm_cpsr; 1524 abi_ulong fault_address; 1525 }; 1526 1527 struct target_ucontext_v1 { 1528 abi_ulong tuc_flags; 1529 abi_ulong tuc_link; 1530 target_stack_t tuc_stack; 1531 struct target_sigcontext tuc_mcontext; 1532 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1533 }; 1534 1535 struct target_ucontext_v2 { 1536 abi_ulong tuc_flags; 1537 abi_ulong tuc_link; 1538 target_stack_t tuc_stack; 1539 struct target_sigcontext tuc_mcontext; 1540 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1541 char __unused[128 - sizeof(target_sigset_t)]; 1542 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 1543 }; 1544 1545 struct target_user_vfp { 1546 uint64_t fpregs[32]; 1547 abi_ulong fpscr; 1548 }; 1549 1550 struct target_user_vfp_exc { 1551 abi_ulong fpexc; 1552 abi_ulong fpinst; 1553 abi_ulong fpinst2; 1554 }; 1555 1556 struct target_vfp_sigframe { 1557 abi_ulong magic; 1558 abi_ulong size; 1559 struct target_user_vfp ufp; 1560 struct target_user_vfp_exc ufp_exc; 1561 } __attribute__((__aligned__(8))); 1562 1563 struct target_iwmmxt_sigframe { 1564 abi_ulong magic; 1565 abi_ulong size; 1566 uint64_t regs[16]; 1567 /* Note that not all the coprocessor control registers are stored here */ 1568 uint32_t wcssf; 1569 uint32_t wcasf; 1570 uint32_t wcgr0; 1571 uint32_t wcgr1; 1572 uint32_t wcgr2; 1573 uint32_t wcgr3; 1574 } __attribute__((__aligned__(8))); 1575 1576 #define TARGET_VFP_MAGIC 0x56465001 1577 #define TARGET_IWMMXT_MAGIC 0x12ef842a 1578 1579 struct sigframe_v1 1580 { 1581 struct target_sigcontext sc; 1582 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 1583 abi_ulong retcode; 1584 }; 1585 1586 struct sigframe_v2 1587 { 1588 struct target_ucontext_v2 uc; 1589 abi_ulong retcode; 1590 }; 1591 1592 struct rt_sigframe_v1 1593 { 1594 abi_ulong pinfo; 1595 abi_ulong puc; 1596 struct target_siginfo info; 1597 struct target_ucontext_v1 uc; 1598 abi_ulong retcode; 1599 }; 1600 1601 struct rt_sigframe_v2 1602 { 1603 struct target_siginfo info; 1604 struct target_ucontext_v2 uc; 1605 abi_ulong retcode; 1606 }; 1607 1608 #define TARGET_CONFIG_CPU_32 1 1609 1610 /* 1611 * For ARM syscalls, we encode the syscall number into the instruction. 1612 */ 1613 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 1614 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 1615 1616 /* 1617 * For Thumb syscalls, we pass the syscall number via r7. We therefore 1618 * need two 16-bit instructions. 1619 */ 1620 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 1621 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 1622 1623 static const abi_ulong retcodes[4] = { 1624 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 1625 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 1626 }; 1627 1628 1629 static inline int valid_user_regs(CPUARMState *regs) 1630 { 1631 return 1; 1632 } 1633 1634 static void 1635 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 1636 CPUARMState *env, abi_ulong mask) 1637 { 1638 __put_user(env->regs[0], &sc->arm_r0); 1639 __put_user(env->regs[1], &sc->arm_r1); 1640 __put_user(env->regs[2], &sc->arm_r2); 1641 __put_user(env->regs[3], &sc->arm_r3); 1642 __put_user(env->regs[4], &sc->arm_r4); 1643 __put_user(env->regs[5], &sc->arm_r5); 1644 __put_user(env->regs[6], &sc->arm_r6); 1645 __put_user(env->regs[7], &sc->arm_r7); 1646 __put_user(env->regs[8], &sc->arm_r8); 1647 __put_user(env->regs[9], &sc->arm_r9); 1648 __put_user(env->regs[10], &sc->arm_r10); 1649 __put_user(env->regs[11], &sc->arm_fp); 1650 __put_user(env->regs[12], &sc->arm_ip); 1651 __put_user(env->regs[13], &sc->arm_sp); 1652 __put_user(env->regs[14], &sc->arm_lr); 1653 __put_user(env->regs[15], &sc->arm_pc); 1654 #ifdef TARGET_CONFIG_CPU_32 1655 __put_user(cpsr_read(env), &sc->arm_cpsr); 1656 #endif 1657 1658 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 1659 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 1660 __put_user(/* current->thread.address */ 0, &sc->fault_address); 1661 __put_user(mask, &sc->oldmask); 1662 } 1663 1664 static inline abi_ulong 1665 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 1666 { 1667 unsigned long sp = regs->regs[13]; 1668 1669 /* 1670 * This is the X/Open sanctioned signal stack switching. 1671 */ 1672 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1673 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1674 } 1675 /* 1676 * ATPCS B01 mandates 8-byte alignment 1677 */ 1678 return (sp - framesize) & ~7; 1679 } 1680 1681 static void 1682 setup_return(CPUARMState *env, struct target_sigaction *ka, 1683 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 1684 { 1685 abi_ulong handler = ka->_sa_handler; 1686 abi_ulong retcode; 1687 int thumb = handler & 1; 1688 uint32_t cpsr = cpsr_read(env); 1689 1690 cpsr &= ~CPSR_IT; 1691 if (thumb) { 1692 cpsr |= CPSR_T; 1693 } else { 1694 cpsr &= ~CPSR_T; 1695 } 1696 1697 if (ka->sa_flags & TARGET_SA_RESTORER) { 1698 retcode = ka->sa_restorer; 1699 } else { 1700 unsigned int idx = thumb; 1701 1702 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1703 idx += 2; 1704 } 1705 1706 __put_user(retcodes[idx], rc); 1707 1708 retcode = rc_addr + thumb; 1709 } 1710 1711 env->regs[0] = usig; 1712 env->regs[13] = frame_addr; 1713 env->regs[14] = retcode; 1714 env->regs[15] = handler & (thumb ? ~1 : ~3); 1715 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 1716 } 1717 1718 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 1719 { 1720 int i; 1721 struct target_vfp_sigframe *vfpframe; 1722 vfpframe = (struct target_vfp_sigframe *)regspace; 1723 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 1724 __put_user(sizeof(*vfpframe), &vfpframe->size); 1725 for (i = 0; i < 32; i++) { 1726 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 1727 } 1728 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 1729 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 1730 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 1731 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 1732 return (abi_ulong*)(vfpframe+1); 1733 } 1734 1735 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 1736 CPUARMState *env) 1737 { 1738 int i; 1739 struct target_iwmmxt_sigframe *iwmmxtframe; 1740 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 1741 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 1742 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 1743 for (i = 0; i < 16; i++) { 1744 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 1745 } 1746 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 1747 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 1748 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 1749 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 1750 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 1751 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 1752 return (abi_ulong*)(iwmmxtframe+1); 1753 } 1754 1755 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 1756 target_sigset_t *set, CPUARMState *env) 1757 { 1758 struct target_sigaltstack stack; 1759 int i; 1760 abi_ulong *regspace; 1761 1762 /* Clear all the bits of the ucontext we don't use. */ 1763 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 1764 1765 memset(&stack, 0, sizeof(stack)); 1766 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1767 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1768 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1769 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 1770 1771 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 1772 /* Save coprocessor signal frame. */ 1773 regspace = uc->tuc_regspace; 1774 if (arm_feature(env, ARM_FEATURE_VFP)) { 1775 regspace = setup_sigframe_v2_vfp(regspace, env); 1776 } 1777 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 1778 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 1779 } 1780 1781 /* Write terminating magic word */ 1782 __put_user(0, regspace); 1783 1784 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1785 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 1786 } 1787 } 1788 1789 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 1790 static void setup_frame_v1(int usig, struct target_sigaction *ka, 1791 target_sigset_t *set, CPUARMState *regs) 1792 { 1793 struct sigframe_v1 *frame; 1794 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1795 int i; 1796 1797 trace_user_setup_frame(regs, frame_addr); 1798 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1799 goto sigsegv; 1800 } 1801 1802 setup_sigcontext(&frame->sc, regs, set->sig[0]); 1803 1804 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1805 __put_user(set->sig[i], &frame->extramask[i - 1]); 1806 } 1807 1808 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1809 frame_addr + offsetof(struct sigframe_v1, retcode)); 1810 1811 unlock_user_struct(frame, frame_addr, 1); 1812 return; 1813 sigsegv: 1814 force_sigsegv(usig); 1815 } 1816 1817 static void setup_frame_v2(int usig, struct target_sigaction *ka, 1818 target_sigset_t *set, CPUARMState *regs) 1819 { 1820 struct sigframe_v2 *frame; 1821 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 1822 1823 trace_user_setup_frame(regs, frame_addr); 1824 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1825 goto sigsegv; 1826 } 1827 1828 setup_sigframe_v2(&frame->uc, set, regs); 1829 1830 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 1831 frame_addr + offsetof(struct sigframe_v2, retcode)); 1832 1833 unlock_user_struct(frame, frame_addr, 1); 1834 return; 1835 sigsegv: 1836 force_sigsegv(usig); 1837 } 1838 1839 static void setup_frame(int usig, struct target_sigaction *ka, 1840 target_sigset_t *set, CPUARMState *regs) 1841 { 1842 if (get_osversion() >= 0x020612) { 1843 setup_frame_v2(usig, ka, set, regs); 1844 } else { 1845 setup_frame_v1(usig, ka, set, regs); 1846 } 1847 } 1848 1849 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 1850 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 1851 target_siginfo_t *info, 1852 target_sigset_t *set, CPUARMState *env) 1853 { 1854 struct rt_sigframe_v1 *frame; 1855 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1856 struct target_sigaltstack stack; 1857 int i; 1858 abi_ulong info_addr, uc_addr; 1859 1860 trace_user_setup_rt_frame(env, frame_addr); 1861 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1862 goto sigsegv; 1863 } 1864 1865 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 1866 __put_user(info_addr, &frame->pinfo); 1867 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 1868 __put_user(uc_addr, &frame->puc); 1869 tswap_siginfo(&frame->info, info); 1870 1871 /* Clear all the bits of the ucontext we don't use. */ 1872 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 1873 1874 memset(&stack, 0, sizeof(stack)); 1875 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 1876 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 1877 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 1878 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 1879 1880 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 1881 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1882 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1883 } 1884 1885 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1886 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 1887 1888 env->regs[1] = info_addr; 1889 env->regs[2] = uc_addr; 1890 1891 unlock_user_struct(frame, frame_addr, 1); 1892 return; 1893 sigsegv: 1894 force_sigsegv(usig); 1895 } 1896 1897 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 1898 target_siginfo_t *info, 1899 target_sigset_t *set, CPUARMState *env) 1900 { 1901 struct rt_sigframe_v2 *frame; 1902 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1903 abi_ulong info_addr, uc_addr; 1904 1905 trace_user_setup_rt_frame(env, frame_addr); 1906 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 1907 goto sigsegv; 1908 } 1909 1910 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 1911 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 1912 tswap_siginfo(&frame->info, info); 1913 1914 setup_sigframe_v2(&frame->uc, set, env); 1915 1916 setup_return(env, ka, &frame->retcode, frame_addr, usig, 1917 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 1918 1919 env->regs[1] = info_addr; 1920 env->regs[2] = uc_addr; 1921 1922 unlock_user_struct(frame, frame_addr, 1); 1923 return; 1924 sigsegv: 1925 force_sigsegv(usig); 1926 } 1927 1928 static void setup_rt_frame(int usig, struct target_sigaction *ka, 1929 target_siginfo_t *info, 1930 target_sigset_t *set, CPUARMState *env) 1931 { 1932 if (get_osversion() >= 0x020612) { 1933 setup_rt_frame_v2(usig, ka, info, set, env); 1934 } else { 1935 setup_rt_frame_v1(usig, ka, info, set, env); 1936 } 1937 } 1938 1939 static int 1940 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 1941 { 1942 int err = 0; 1943 uint32_t cpsr; 1944 1945 __get_user(env->regs[0], &sc->arm_r0); 1946 __get_user(env->regs[1], &sc->arm_r1); 1947 __get_user(env->regs[2], &sc->arm_r2); 1948 __get_user(env->regs[3], &sc->arm_r3); 1949 __get_user(env->regs[4], &sc->arm_r4); 1950 __get_user(env->regs[5], &sc->arm_r5); 1951 __get_user(env->regs[6], &sc->arm_r6); 1952 __get_user(env->regs[7], &sc->arm_r7); 1953 __get_user(env->regs[8], &sc->arm_r8); 1954 __get_user(env->regs[9], &sc->arm_r9); 1955 __get_user(env->regs[10], &sc->arm_r10); 1956 __get_user(env->regs[11], &sc->arm_fp); 1957 __get_user(env->regs[12], &sc->arm_ip); 1958 __get_user(env->regs[13], &sc->arm_sp); 1959 __get_user(env->regs[14], &sc->arm_lr); 1960 __get_user(env->regs[15], &sc->arm_pc); 1961 #ifdef TARGET_CONFIG_CPU_32 1962 __get_user(cpsr, &sc->arm_cpsr); 1963 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 1964 #endif 1965 1966 err |= !valid_user_regs(env); 1967 1968 return err; 1969 } 1970 1971 static long do_sigreturn_v1(CPUARMState *env) 1972 { 1973 abi_ulong frame_addr; 1974 struct sigframe_v1 *frame = NULL; 1975 target_sigset_t set; 1976 sigset_t host_set; 1977 int i; 1978 1979 /* 1980 * Since we stacked the signal on a 64-bit boundary, 1981 * then 'sp' should be word aligned here. If it's 1982 * not, then the user is trying to mess with us. 1983 */ 1984 frame_addr = env->regs[13]; 1985 trace_user_do_sigreturn(env, frame_addr); 1986 if (frame_addr & 7) { 1987 goto badframe; 1988 } 1989 1990 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1991 goto badframe; 1992 } 1993 1994 __get_user(set.sig[0], &frame->sc.oldmask); 1995 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1996 __get_user(set.sig[i], &frame->extramask[i - 1]); 1997 } 1998 1999 target_to_host_sigset_internal(&host_set, &set); 2000 set_sigmask(&host_set); 2001 2002 if (restore_sigcontext(env, &frame->sc)) { 2003 goto badframe; 2004 } 2005 2006 #if 0 2007 /* Send SIGTRAP if we're single-stepping */ 2008 if (ptrace_cancel_bpt(current)) 2009 send_sig(SIGTRAP, current, 1); 2010 #endif 2011 unlock_user_struct(frame, frame_addr, 0); 2012 return -TARGET_QEMU_ESIGRETURN; 2013 2014 badframe: 2015 force_sig(TARGET_SIGSEGV); 2016 return -TARGET_QEMU_ESIGRETURN; 2017 } 2018 2019 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 2020 { 2021 int i; 2022 abi_ulong magic, sz; 2023 uint32_t fpscr, fpexc; 2024 struct target_vfp_sigframe *vfpframe; 2025 vfpframe = (struct target_vfp_sigframe *)regspace; 2026 2027 __get_user(magic, &vfpframe->magic); 2028 __get_user(sz, &vfpframe->size); 2029 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 2030 return 0; 2031 } 2032 for (i = 0; i < 32; i++) { 2033 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]); 2034 } 2035 __get_user(fpscr, &vfpframe->ufp.fpscr); 2036 vfp_set_fpscr(env, fpscr); 2037 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 2038 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 2039 * and the exception flag is cleared 2040 */ 2041 fpexc |= (1 << 30); 2042 fpexc &= ~((1 << 31) | (1 << 28)); 2043 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2044 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2045 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2046 return (abi_ulong*)(vfpframe + 1); 2047 } 2048 2049 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2050 abi_ulong *regspace) 2051 { 2052 int i; 2053 abi_ulong magic, sz; 2054 struct target_iwmmxt_sigframe *iwmmxtframe; 2055 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2056 2057 __get_user(magic, &iwmmxtframe->magic); 2058 __get_user(sz, &iwmmxtframe->size); 2059 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2060 return 0; 2061 } 2062 for (i = 0; i < 16; i++) { 2063 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2064 } 2065 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2066 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2067 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2068 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2069 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2070 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2071 return (abi_ulong*)(iwmmxtframe + 1); 2072 } 2073 2074 static int do_sigframe_return_v2(CPUARMState *env, 2075 target_ulong context_addr, 2076 struct target_ucontext_v2 *uc) 2077 { 2078 sigset_t host_set; 2079 abi_ulong *regspace; 2080 2081 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2082 set_sigmask(&host_set); 2083 2084 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2085 return 1; 2086 2087 /* Restore coprocessor signal frame */ 2088 regspace = uc->tuc_regspace; 2089 if (arm_feature(env, ARM_FEATURE_VFP)) { 2090 regspace = restore_sigframe_v2_vfp(env, regspace); 2091 if (!regspace) { 2092 return 1; 2093 } 2094 } 2095 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2096 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2097 if (!regspace) { 2098 return 1; 2099 } 2100 } 2101 2102 if (do_sigaltstack(context_addr 2103 + offsetof(struct target_ucontext_v2, tuc_stack), 2104 0, get_sp_from_cpustate(env)) == -EFAULT) { 2105 return 1; 2106 } 2107 2108 #if 0 2109 /* Send SIGTRAP if we're single-stepping */ 2110 if (ptrace_cancel_bpt(current)) 2111 send_sig(SIGTRAP, current, 1); 2112 #endif 2113 2114 return 0; 2115 } 2116 2117 static long do_sigreturn_v2(CPUARMState *env) 2118 { 2119 abi_ulong frame_addr; 2120 struct sigframe_v2 *frame = NULL; 2121 2122 /* 2123 * Since we stacked the signal on a 64-bit boundary, 2124 * then 'sp' should be word aligned here. If it's 2125 * not, then the user is trying to mess with us. 2126 */ 2127 frame_addr = env->regs[13]; 2128 trace_user_do_sigreturn(env, frame_addr); 2129 if (frame_addr & 7) { 2130 goto badframe; 2131 } 2132 2133 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2134 goto badframe; 2135 } 2136 2137 if (do_sigframe_return_v2(env, 2138 frame_addr 2139 + offsetof(struct sigframe_v2, uc), 2140 &frame->uc)) { 2141 goto badframe; 2142 } 2143 2144 unlock_user_struct(frame, frame_addr, 0); 2145 return -TARGET_QEMU_ESIGRETURN; 2146 2147 badframe: 2148 unlock_user_struct(frame, frame_addr, 0); 2149 force_sig(TARGET_SIGSEGV); 2150 return -TARGET_QEMU_ESIGRETURN; 2151 } 2152 2153 long do_sigreturn(CPUARMState *env) 2154 { 2155 if (get_osversion() >= 0x020612) { 2156 return do_sigreturn_v2(env); 2157 } else { 2158 return do_sigreturn_v1(env); 2159 } 2160 } 2161 2162 static long do_rt_sigreturn_v1(CPUARMState *env) 2163 { 2164 abi_ulong frame_addr; 2165 struct rt_sigframe_v1 *frame = NULL; 2166 sigset_t host_set; 2167 2168 /* 2169 * Since we stacked the signal on a 64-bit boundary, 2170 * then 'sp' should be word aligned here. If it's 2171 * not, then the user is trying to mess with us. 2172 */ 2173 frame_addr = env->regs[13]; 2174 trace_user_do_rt_sigreturn(env, frame_addr); 2175 if (frame_addr & 7) { 2176 goto badframe; 2177 } 2178 2179 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2180 goto badframe; 2181 } 2182 2183 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2184 set_sigmask(&host_set); 2185 2186 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2187 goto badframe; 2188 } 2189 2190 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2191 goto badframe; 2192 2193 #if 0 2194 /* Send SIGTRAP if we're single-stepping */ 2195 if (ptrace_cancel_bpt(current)) 2196 send_sig(SIGTRAP, current, 1); 2197 #endif 2198 unlock_user_struct(frame, frame_addr, 0); 2199 return -TARGET_QEMU_ESIGRETURN; 2200 2201 badframe: 2202 unlock_user_struct(frame, frame_addr, 0); 2203 force_sig(TARGET_SIGSEGV); 2204 return -TARGET_QEMU_ESIGRETURN; 2205 } 2206 2207 static long do_rt_sigreturn_v2(CPUARMState *env) 2208 { 2209 abi_ulong frame_addr; 2210 struct rt_sigframe_v2 *frame = NULL; 2211 2212 /* 2213 * Since we stacked the signal on a 64-bit boundary, 2214 * then 'sp' should be word aligned here. If it's 2215 * not, then the user is trying to mess with us. 2216 */ 2217 frame_addr = env->regs[13]; 2218 trace_user_do_rt_sigreturn(env, frame_addr); 2219 if (frame_addr & 7) { 2220 goto badframe; 2221 } 2222 2223 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2224 goto badframe; 2225 } 2226 2227 if (do_sigframe_return_v2(env, 2228 frame_addr 2229 + offsetof(struct rt_sigframe_v2, uc), 2230 &frame->uc)) { 2231 goto badframe; 2232 } 2233 2234 unlock_user_struct(frame, frame_addr, 0); 2235 return -TARGET_QEMU_ESIGRETURN; 2236 2237 badframe: 2238 unlock_user_struct(frame, frame_addr, 0); 2239 force_sig(TARGET_SIGSEGV); 2240 return -TARGET_QEMU_ESIGRETURN; 2241 } 2242 2243 long do_rt_sigreturn(CPUARMState *env) 2244 { 2245 if (get_osversion() >= 0x020612) { 2246 return do_rt_sigreturn_v2(env); 2247 } else { 2248 return do_rt_sigreturn_v1(env); 2249 } 2250 } 2251 2252 #elif defined(TARGET_SPARC) 2253 2254 #define __SUNOS_MAXWIN 31 2255 2256 /* This is what SunOS does, so shall I. */ 2257 struct target_sigcontext { 2258 abi_ulong sigc_onstack; /* state to restore */ 2259 2260 abi_ulong sigc_mask; /* sigmask to restore */ 2261 abi_ulong sigc_sp; /* stack pointer */ 2262 abi_ulong sigc_pc; /* program counter */ 2263 abi_ulong sigc_npc; /* next program counter */ 2264 abi_ulong sigc_psr; /* for condition codes etc */ 2265 abi_ulong sigc_g1; /* User uses these two registers */ 2266 abi_ulong sigc_o0; /* within the trampoline code. */ 2267 2268 /* Now comes information regarding the users window set 2269 * at the time of the signal. 2270 */ 2271 abi_ulong sigc_oswins; /* outstanding windows */ 2272 2273 /* stack ptrs for each regwin buf */ 2274 char *sigc_spbuf[__SUNOS_MAXWIN]; 2275 2276 /* Windows to restore after signal */ 2277 struct { 2278 abi_ulong locals[8]; 2279 abi_ulong ins[8]; 2280 } sigc_wbuf[__SUNOS_MAXWIN]; 2281 }; 2282 /* A Sparc stack frame */ 2283 struct sparc_stackf { 2284 abi_ulong locals[8]; 2285 abi_ulong ins[8]; 2286 /* It's simpler to treat fp and callers_pc as elements of ins[] 2287 * since we never need to access them ourselves. 2288 */ 2289 char *structptr; 2290 abi_ulong xargs[6]; 2291 abi_ulong xxargs[1]; 2292 }; 2293 2294 typedef struct { 2295 struct { 2296 abi_ulong psr; 2297 abi_ulong pc; 2298 abi_ulong npc; 2299 abi_ulong y; 2300 abi_ulong u_regs[16]; /* globals and ins */ 2301 } si_regs; 2302 int si_mask; 2303 } __siginfo_t; 2304 2305 typedef struct { 2306 abi_ulong si_float_regs[32]; 2307 unsigned long si_fsr; 2308 unsigned long si_fpqdepth; 2309 struct { 2310 unsigned long *insn_addr; 2311 unsigned long insn; 2312 } si_fpqueue [16]; 2313 } qemu_siginfo_fpu_t; 2314 2315 2316 struct target_signal_frame { 2317 struct sparc_stackf ss; 2318 __siginfo_t info; 2319 abi_ulong fpu_save; 2320 abi_ulong insns[2] __attribute__ ((aligned (8))); 2321 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2322 abi_ulong extra_size; /* Should be 0 */ 2323 qemu_siginfo_fpu_t fpu_state; 2324 }; 2325 struct target_rt_signal_frame { 2326 struct sparc_stackf ss; 2327 siginfo_t info; 2328 abi_ulong regs[20]; 2329 sigset_t mask; 2330 abi_ulong fpu_save; 2331 unsigned int insns[2]; 2332 stack_t stack; 2333 unsigned int extra_size; /* Should be 0 */ 2334 qemu_siginfo_fpu_t fpu_state; 2335 }; 2336 2337 #define UREG_O0 16 2338 #define UREG_O6 22 2339 #define UREG_I0 0 2340 #define UREG_I1 1 2341 #define UREG_I2 2 2342 #define UREG_I3 3 2343 #define UREG_I4 4 2344 #define UREG_I5 5 2345 #define UREG_I6 6 2346 #define UREG_I7 7 2347 #define UREG_L0 8 2348 #define UREG_FP UREG_I6 2349 #define UREG_SP UREG_O6 2350 2351 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2352 CPUSPARCState *env, 2353 unsigned long framesize) 2354 { 2355 abi_ulong sp; 2356 2357 sp = env->regwptr[UREG_FP]; 2358 2359 /* This is the X/Open sanctioned signal stack switching. */ 2360 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2361 if (!on_sig_stack(sp) 2362 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2363 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2364 } 2365 } 2366 return sp - framesize; 2367 } 2368 2369 static int 2370 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2371 { 2372 int err = 0, i; 2373 2374 __put_user(env->psr, &si->si_regs.psr); 2375 __put_user(env->pc, &si->si_regs.pc); 2376 __put_user(env->npc, &si->si_regs.npc); 2377 __put_user(env->y, &si->si_regs.y); 2378 for (i=0; i < 8; i++) { 2379 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2380 } 2381 for (i=0; i < 8; i++) { 2382 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2383 } 2384 __put_user(mask, &si->si_mask); 2385 return err; 2386 } 2387 2388 #if 0 2389 static int 2390 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2391 CPUSPARCState *env, unsigned long mask) 2392 { 2393 int err = 0; 2394 2395 __put_user(mask, &sc->sigc_mask); 2396 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2397 __put_user(env->pc, &sc->sigc_pc); 2398 __put_user(env->npc, &sc->sigc_npc); 2399 __put_user(env->psr, &sc->sigc_psr); 2400 __put_user(env->gregs[1], &sc->sigc_g1); 2401 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2402 2403 return err; 2404 } 2405 #endif 2406 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2407 2408 static void setup_frame(int sig, struct target_sigaction *ka, 2409 target_sigset_t *set, CPUSPARCState *env) 2410 { 2411 abi_ulong sf_addr; 2412 struct target_signal_frame *sf; 2413 int sigframe_size, err, i; 2414 2415 /* 1. Make sure everything is clean */ 2416 //synchronize_user_stack(); 2417 2418 sigframe_size = NF_ALIGNEDSZ; 2419 sf_addr = get_sigframe(ka, env, sigframe_size); 2420 trace_user_setup_frame(env, sf_addr); 2421 2422 sf = lock_user(VERIFY_WRITE, sf_addr, 2423 sizeof(struct target_signal_frame), 0); 2424 if (!sf) { 2425 goto sigsegv; 2426 } 2427 #if 0 2428 if (invalid_frame_pointer(sf, sigframe_size)) 2429 goto sigill_and_return; 2430 #endif 2431 /* 2. Save the current process state */ 2432 err = setup___siginfo(&sf->info, env, set->sig[0]); 2433 __put_user(0, &sf->extra_size); 2434 2435 //save_fpu_state(regs, &sf->fpu_state); 2436 //__put_user(&sf->fpu_state, &sf->fpu_save); 2437 2438 __put_user(set->sig[0], &sf->info.si_mask); 2439 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2440 __put_user(set->sig[i + 1], &sf->extramask[i]); 2441 } 2442 2443 for (i = 0; i < 8; i++) { 2444 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2445 } 2446 for (i = 0; i < 8; i++) { 2447 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2448 } 2449 if (err) 2450 goto sigsegv; 2451 2452 /* 3. signal handler back-trampoline and parameters */ 2453 env->regwptr[UREG_FP] = sf_addr; 2454 env->regwptr[UREG_I0] = sig; 2455 env->regwptr[UREG_I1] = sf_addr + 2456 offsetof(struct target_signal_frame, info); 2457 env->regwptr[UREG_I2] = sf_addr + 2458 offsetof(struct target_signal_frame, info); 2459 2460 /* 4. signal handler */ 2461 env->pc = ka->_sa_handler; 2462 env->npc = (env->pc + 4); 2463 /* 5. return to kernel instructions */ 2464 if (ka->sa_restorer) { 2465 env->regwptr[UREG_I7] = ka->sa_restorer; 2466 } else { 2467 uint32_t val32; 2468 2469 env->regwptr[UREG_I7] = sf_addr + 2470 offsetof(struct target_signal_frame, insns) - 2 * 4; 2471 2472 /* mov __NR_sigreturn, %g1 */ 2473 val32 = 0x821020d8; 2474 __put_user(val32, &sf->insns[0]); 2475 2476 /* t 0x10 */ 2477 val32 = 0x91d02010; 2478 __put_user(val32, &sf->insns[1]); 2479 if (err) 2480 goto sigsegv; 2481 2482 /* Flush instruction space. */ 2483 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2484 // tb_flush(env); 2485 } 2486 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2487 return; 2488 #if 0 2489 sigill_and_return: 2490 force_sig(TARGET_SIGILL); 2491 #endif 2492 sigsegv: 2493 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2494 force_sigsegv(sig); 2495 } 2496 2497 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2498 target_siginfo_t *info, 2499 target_sigset_t *set, CPUSPARCState *env) 2500 { 2501 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2502 } 2503 2504 long do_sigreturn(CPUSPARCState *env) 2505 { 2506 abi_ulong sf_addr; 2507 struct target_signal_frame *sf; 2508 uint32_t up_psr, pc, npc; 2509 target_sigset_t set; 2510 sigset_t host_set; 2511 int err=0, i; 2512 2513 sf_addr = env->regwptr[UREG_FP]; 2514 trace_user_do_sigreturn(env, sf_addr); 2515 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2516 goto segv_and_exit; 2517 } 2518 2519 /* 1. Make sure we are not getting garbage from the user */ 2520 2521 if (sf_addr & 3) 2522 goto segv_and_exit; 2523 2524 __get_user(pc, &sf->info.si_regs.pc); 2525 __get_user(npc, &sf->info.si_regs.npc); 2526 2527 if ((pc | npc) & 3) { 2528 goto segv_and_exit; 2529 } 2530 2531 /* 2. Restore the state */ 2532 __get_user(up_psr, &sf->info.si_regs.psr); 2533 2534 /* User can only change condition codes and FPU enabling in %psr. */ 2535 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 2536 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 2537 2538 env->pc = pc; 2539 env->npc = npc; 2540 __get_user(env->y, &sf->info.si_regs.y); 2541 for (i=0; i < 8; i++) { 2542 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 2543 } 2544 for (i=0; i < 8; i++) { 2545 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 2546 } 2547 2548 /* FIXME: implement FPU save/restore: 2549 * __get_user(fpu_save, &sf->fpu_save); 2550 * if (fpu_save) 2551 * err |= restore_fpu_state(env, fpu_save); 2552 */ 2553 2554 /* This is pretty much atomic, no amount locking would prevent 2555 * the races which exist anyways. 2556 */ 2557 __get_user(set.sig[0], &sf->info.si_mask); 2558 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2559 __get_user(set.sig[i], &sf->extramask[i - 1]); 2560 } 2561 2562 target_to_host_sigset_internal(&host_set, &set); 2563 set_sigmask(&host_set); 2564 2565 if (err) { 2566 goto segv_and_exit; 2567 } 2568 unlock_user_struct(sf, sf_addr, 0); 2569 return -TARGET_QEMU_ESIGRETURN; 2570 2571 segv_and_exit: 2572 unlock_user_struct(sf, sf_addr, 0); 2573 force_sig(TARGET_SIGSEGV); 2574 return -TARGET_QEMU_ESIGRETURN; 2575 } 2576 2577 long do_rt_sigreturn(CPUSPARCState *env) 2578 { 2579 trace_user_do_rt_sigreturn(env, 0); 2580 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 2581 return -TARGET_ENOSYS; 2582 } 2583 2584 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 2585 #define MC_TSTATE 0 2586 #define MC_PC 1 2587 #define MC_NPC 2 2588 #define MC_Y 3 2589 #define MC_G1 4 2590 #define MC_G2 5 2591 #define MC_G3 6 2592 #define MC_G4 7 2593 #define MC_G5 8 2594 #define MC_G6 9 2595 #define MC_G7 10 2596 #define MC_O0 11 2597 #define MC_O1 12 2598 #define MC_O2 13 2599 #define MC_O3 14 2600 #define MC_O4 15 2601 #define MC_O5 16 2602 #define MC_O6 17 2603 #define MC_O7 18 2604 #define MC_NGREG 19 2605 2606 typedef abi_ulong target_mc_greg_t; 2607 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG]; 2608 2609 struct target_mc_fq { 2610 abi_ulong *mcfq_addr; 2611 uint32_t mcfq_insn; 2612 }; 2613 2614 struct target_mc_fpu { 2615 union { 2616 uint32_t sregs[32]; 2617 uint64_t dregs[32]; 2618 //uint128_t qregs[16]; 2619 } mcfpu_fregs; 2620 abi_ulong mcfpu_fsr; 2621 abi_ulong mcfpu_fprs; 2622 abi_ulong mcfpu_gsr; 2623 struct target_mc_fq *mcfpu_fq; 2624 unsigned char mcfpu_qcnt; 2625 unsigned char mcfpu_qentsz; 2626 unsigned char mcfpu_enab; 2627 }; 2628 typedef struct target_mc_fpu target_mc_fpu_t; 2629 2630 typedef struct { 2631 target_mc_gregset_t mc_gregs; 2632 target_mc_greg_t mc_fp; 2633 target_mc_greg_t mc_i7; 2634 target_mc_fpu_t mc_fpregs; 2635 } target_mcontext_t; 2636 2637 struct target_ucontext { 2638 struct target_ucontext *tuc_link; 2639 abi_ulong tuc_flags; 2640 target_sigset_t tuc_sigmask; 2641 target_mcontext_t tuc_mcontext; 2642 }; 2643 2644 /* A V9 register window */ 2645 struct target_reg_window { 2646 abi_ulong locals[8]; 2647 abi_ulong ins[8]; 2648 }; 2649 2650 #define TARGET_STACK_BIAS 2047 2651 2652 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 2653 void sparc64_set_context(CPUSPARCState *env) 2654 { 2655 abi_ulong ucp_addr; 2656 struct target_ucontext *ucp; 2657 target_mc_gregset_t *grp; 2658 abi_ulong pc, npc, tstate; 2659 abi_ulong fp, i7, w_addr; 2660 unsigned int i; 2661 2662 ucp_addr = env->regwptr[UREG_I0]; 2663 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 2664 goto do_sigsegv; 2665 } 2666 grp = &ucp->tuc_mcontext.mc_gregs; 2667 __get_user(pc, &((*grp)[MC_PC])); 2668 __get_user(npc, &((*grp)[MC_NPC])); 2669 if ((pc | npc) & 3) { 2670 goto do_sigsegv; 2671 } 2672 if (env->regwptr[UREG_I1]) { 2673 target_sigset_t target_set; 2674 sigset_t set; 2675 2676 if (TARGET_NSIG_WORDS == 1) { 2677 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 2678 } else { 2679 abi_ulong *src, *dst; 2680 src = ucp->tuc_sigmask.sig; 2681 dst = target_set.sig; 2682 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2683 __get_user(*dst, src); 2684 } 2685 } 2686 target_to_host_sigset_internal(&set, &target_set); 2687 set_sigmask(&set); 2688 } 2689 env->pc = pc; 2690 env->npc = npc; 2691 __get_user(env->y, &((*grp)[MC_Y])); 2692 __get_user(tstate, &((*grp)[MC_TSTATE])); 2693 env->asi = (tstate >> 24) & 0xff; 2694 cpu_put_ccr(env, tstate >> 32); 2695 cpu_put_cwp64(env, tstate & 0x1f); 2696 __get_user(env->gregs[1], (&(*grp)[MC_G1])); 2697 __get_user(env->gregs[2], (&(*grp)[MC_G2])); 2698 __get_user(env->gregs[3], (&(*grp)[MC_G3])); 2699 __get_user(env->gregs[4], (&(*grp)[MC_G4])); 2700 __get_user(env->gregs[5], (&(*grp)[MC_G5])); 2701 __get_user(env->gregs[6], (&(*grp)[MC_G6])); 2702 __get_user(env->gregs[7], (&(*grp)[MC_G7])); 2703 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); 2704 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); 2705 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); 2706 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); 2707 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); 2708 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); 2709 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); 2710 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); 2711 2712 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 2713 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 2714 2715 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2716 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2717 abi_ulong) != 0) { 2718 goto do_sigsegv; 2719 } 2720 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2721 abi_ulong) != 0) { 2722 goto do_sigsegv; 2723 } 2724 /* FIXME this does not match how the kernel handles the FPU in 2725 * its sparc64_set_context implementation. In particular the FPU 2726 * is only restored if fenab is non-zero in: 2727 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 2728 */ 2729 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 2730 { 2731 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2732 for (i = 0; i < 64; i++, src++) { 2733 if (i & 1) { 2734 __get_user(env->fpr[i/2].l.lower, src); 2735 } else { 2736 __get_user(env->fpr[i/2].l.upper, src); 2737 } 2738 } 2739 } 2740 __get_user(env->fsr, 2741 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 2742 __get_user(env->gsr, 2743 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 2744 unlock_user_struct(ucp, ucp_addr, 0); 2745 return; 2746 do_sigsegv: 2747 unlock_user_struct(ucp, ucp_addr, 0); 2748 force_sig(TARGET_SIGSEGV); 2749 } 2750 2751 void sparc64_get_context(CPUSPARCState *env) 2752 { 2753 abi_ulong ucp_addr; 2754 struct target_ucontext *ucp; 2755 target_mc_gregset_t *grp; 2756 target_mcontext_t *mcp; 2757 abi_ulong fp, i7, w_addr; 2758 int err; 2759 unsigned int i; 2760 target_sigset_t target_set; 2761 sigset_t set; 2762 2763 ucp_addr = env->regwptr[UREG_I0]; 2764 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 2765 goto do_sigsegv; 2766 } 2767 2768 mcp = &ucp->tuc_mcontext; 2769 grp = &mcp->mc_gregs; 2770 2771 /* Skip over the trap instruction, first. */ 2772 env->pc = env->npc; 2773 env->npc += 4; 2774 2775 /* If we're only reading the signal mask then do_sigprocmask() 2776 * is guaranteed not to fail, which is important because we don't 2777 * have any way to signal a failure or restart this operation since 2778 * this is not a normal syscall. 2779 */ 2780 err = do_sigprocmask(0, NULL, &set); 2781 assert(err == 0); 2782 host_to_target_sigset_internal(&target_set, &set); 2783 if (TARGET_NSIG_WORDS == 1) { 2784 __put_user(target_set.sig[0], 2785 (abi_ulong *)&ucp->tuc_sigmask); 2786 } else { 2787 abi_ulong *src, *dst; 2788 src = target_set.sig; 2789 dst = ucp->tuc_sigmask.sig; 2790 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 2791 __put_user(*src, dst); 2792 } 2793 if (err) 2794 goto do_sigsegv; 2795 } 2796 2797 /* XXX: tstate must be saved properly */ 2798 // __put_user(env->tstate, &((*grp)[MC_TSTATE])); 2799 __put_user(env->pc, &((*grp)[MC_PC])); 2800 __put_user(env->npc, &((*grp)[MC_NPC])); 2801 __put_user(env->y, &((*grp)[MC_Y])); 2802 __put_user(env->gregs[1], &((*grp)[MC_G1])); 2803 __put_user(env->gregs[2], &((*grp)[MC_G2])); 2804 __put_user(env->gregs[3], &((*grp)[MC_G3])); 2805 __put_user(env->gregs[4], &((*grp)[MC_G4])); 2806 __put_user(env->gregs[5], &((*grp)[MC_G5])); 2807 __put_user(env->gregs[6], &((*grp)[MC_G6])); 2808 __put_user(env->gregs[7], &((*grp)[MC_G7])); 2809 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); 2810 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); 2811 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); 2812 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); 2813 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); 2814 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); 2815 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); 2816 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); 2817 2818 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 2819 fp = i7 = 0; 2820 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 2821 abi_ulong) != 0) { 2822 goto do_sigsegv; 2823 } 2824 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 2825 abi_ulong) != 0) { 2826 goto do_sigsegv; 2827 } 2828 __put_user(fp, &(mcp->mc_fp)); 2829 __put_user(i7, &(mcp->mc_i7)); 2830 2831 { 2832 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 2833 for (i = 0; i < 64; i++, dst++) { 2834 if (i & 1) { 2835 __put_user(env->fpr[i/2].l.lower, dst); 2836 } else { 2837 __put_user(env->fpr[i/2].l.upper, dst); 2838 } 2839 } 2840 } 2841 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 2842 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 2843 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 2844 2845 if (err) 2846 goto do_sigsegv; 2847 unlock_user_struct(ucp, ucp_addr, 1); 2848 return; 2849 do_sigsegv: 2850 unlock_user_struct(ucp, ucp_addr, 1); 2851 force_sig(TARGET_SIGSEGV); 2852 } 2853 #endif 2854 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 2855 2856 # if defined(TARGET_ABI_MIPSO32) 2857 struct target_sigcontext { 2858 uint32_t sc_regmask; /* Unused */ 2859 uint32_t sc_status; 2860 uint64_t sc_pc; 2861 uint64_t sc_regs[32]; 2862 uint64_t sc_fpregs[32]; 2863 uint32_t sc_ownedfp; /* Unused */ 2864 uint32_t sc_fpc_csr; 2865 uint32_t sc_fpc_eir; /* Unused */ 2866 uint32_t sc_used_math; 2867 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 2868 uint32_t pad0; 2869 uint64_t sc_mdhi; 2870 uint64_t sc_mdlo; 2871 target_ulong sc_hi1; /* Was sc_cause */ 2872 target_ulong sc_lo1; /* Was sc_badvaddr */ 2873 target_ulong sc_hi2; /* Was sc_sigset[4] */ 2874 target_ulong sc_lo2; 2875 target_ulong sc_hi3; 2876 target_ulong sc_lo3; 2877 }; 2878 # else /* N32 || N64 */ 2879 struct target_sigcontext { 2880 uint64_t sc_regs[32]; 2881 uint64_t sc_fpregs[32]; 2882 uint64_t sc_mdhi; 2883 uint64_t sc_hi1; 2884 uint64_t sc_hi2; 2885 uint64_t sc_hi3; 2886 uint64_t sc_mdlo; 2887 uint64_t sc_lo1; 2888 uint64_t sc_lo2; 2889 uint64_t sc_lo3; 2890 uint64_t sc_pc; 2891 uint32_t sc_fpc_csr; 2892 uint32_t sc_used_math; 2893 uint32_t sc_dsp; 2894 uint32_t sc_reserved; 2895 }; 2896 # endif /* O32 */ 2897 2898 struct sigframe { 2899 uint32_t sf_ass[4]; /* argument save space for o32 */ 2900 uint32_t sf_code[2]; /* signal trampoline */ 2901 struct target_sigcontext sf_sc; 2902 target_sigset_t sf_mask; 2903 }; 2904 2905 struct target_ucontext { 2906 target_ulong tuc_flags; 2907 target_ulong tuc_link; 2908 target_stack_t tuc_stack; 2909 target_ulong pad0; 2910 struct target_sigcontext tuc_mcontext; 2911 target_sigset_t tuc_sigmask; 2912 }; 2913 2914 struct target_rt_sigframe { 2915 uint32_t rs_ass[4]; /* argument save space for o32 */ 2916 uint32_t rs_code[2]; /* signal trampoline */ 2917 struct target_siginfo rs_info; 2918 struct target_ucontext rs_uc; 2919 }; 2920 2921 /* Install trampoline to jump back from signal handler */ 2922 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 2923 { 2924 int err = 0; 2925 2926 /* 2927 * Set up the return code ... 2928 * 2929 * li v0, __NR__foo_sigreturn 2930 * syscall 2931 */ 2932 2933 __put_user(0x24020000 + syscall, tramp + 0); 2934 __put_user(0x0000000c , tramp + 1); 2935 return err; 2936 } 2937 2938 static inline void setup_sigcontext(CPUMIPSState *regs, 2939 struct target_sigcontext *sc) 2940 { 2941 int i; 2942 2943 __put_user(exception_resume_pc(regs), &sc->sc_pc); 2944 regs->hflags &= ~MIPS_HFLAG_BMASK; 2945 2946 __put_user(0, &sc->sc_regs[0]); 2947 for (i = 1; i < 32; ++i) { 2948 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2949 } 2950 2951 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2952 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2953 2954 /* Rather than checking for dsp existence, always copy. The storage 2955 would just be garbage otherwise. */ 2956 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 2957 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 2958 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 2959 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 2960 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 2961 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 2962 { 2963 uint32_t dsp = cpu_rddsp(0x3ff, regs); 2964 __put_user(dsp, &sc->sc_dsp); 2965 } 2966 2967 __put_user(1, &sc->sc_used_math); 2968 2969 for (i = 0; i < 32; ++i) { 2970 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 2971 } 2972 } 2973 2974 static inline void 2975 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 2976 { 2977 int i; 2978 2979 __get_user(regs->CP0_EPC, &sc->sc_pc); 2980 2981 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 2982 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 2983 2984 for (i = 1; i < 32; ++i) { 2985 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 2986 } 2987 2988 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 2989 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 2990 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 2991 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 2992 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 2993 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 2994 { 2995 uint32_t dsp; 2996 __get_user(dsp, &sc->sc_dsp); 2997 cpu_wrdsp(dsp, 0x3ff, regs); 2998 } 2999 3000 for (i = 0; i < 32; ++i) { 3001 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 3002 } 3003 } 3004 3005 /* 3006 * Determine which stack to use.. 3007 */ 3008 static inline abi_ulong 3009 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 3010 { 3011 unsigned long sp; 3012 3013 /* Default to using normal stack */ 3014 sp = regs->active_tc.gpr[29]; 3015 3016 /* 3017 * FPU emulator may have its own trampoline active just 3018 * above the user stack, 16-bytes before the next lowest 3019 * 16 byte boundary. Try to avoid trashing it. 3020 */ 3021 sp -= 32; 3022 3023 /* This is the X/Open sanctioned signal stack switching. */ 3024 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 3025 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3026 } 3027 3028 return (sp - frame_size) & ~7; 3029 } 3030 3031 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 3032 { 3033 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 3034 env->hflags &= ~MIPS_HFLAG_M16; 3035 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 3036 env->active_tc.PC &= ~(target_ulong) 1; 3037 } 3038 } 3039 3040 # if defined(TARGET_ABI_MIPSO32) 3041 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 3042 static void setup_frame(int sig, struct target_sigaction * ka, 3043 target_sigset_t *set, CPUMIPSState *regs) 3044 { 3045 struct sigframe *frame; 3046 abi_ulong frame_addr; 3047 int i; 3048 3049 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 3050 trace_user_setup_frame(regs, frame_addr); 3051 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3052 goto give_sigsegv; 3053 } 3054 3055 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3056 3057 setup_sigcontext(regs, &frame->sf_sc); 3058 3059 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3060 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3061 } 3062 3063 /* 3064 * Arguments to signal handler: 3065 * 3066 * a0 = signal number 3067 * a1 = 0 (should be cause) 3068 * a2 = pointer to struct sigcontext 3069 * 3070 * $25 and PC point to the signal handler, $29 points to the 3071 * struct sigframe. 3072 */ 3073 regs->active_tc.gpr[ 4] = sig; 3074 regs->active_tc.gpr[ 5] = 0; 3075 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3076 regs->active_tc.gpr[29] = frame_addr; 3077 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3078 /* The original kernel code sets CP0_EPC to the handler 3079 * since it returns to userland using eret 3080 * we cannot do this here, and we must set PC directly */ 3081 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3082 mips_set_hflags_isa_mode_from_pc(regs); 3083 unlock_user_struct(frame, frame_addr, 1); 3084 return; 3085 3086 give_sigsegv: 3087 force_sigsegv(sig); 3088 } 3089 3090 long do_sigreturn(CPUMIPSState *regs) 3091 { 3092 struct sigframe *frame; 3093 abi_ulong frame_addr; 3094 sigset_t blocked; 3095 target_sigset_t target_set; 3096 int i; 3097 3098 frame_addr = regs->active_tc.gpr[29]; 3099 trace_user_do_sigreturn(regs, frame_addr); 3100 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3101 goto badframe; 3102 3103 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3104 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3105 } 3106 3107 target_to_host_sigset_internal(&blocked, &target_set); 3108 set_sigmask(&blocked); 3109 3110 restore_sigcontext(regs, &frame->sf_sc); 3111 3112 #if 0 3113 /* 3114 * Don't let your children do this ... 3115 */ 3116 __asm__ __volatile__( 3117 "move\t$29, %0\n\t" 3118 "j\tsyscall_exit" 3119 :/* no outputs */ 3120 :"r" (®s)); 3121 /* Unreached */ 3122 #endif 3123 3124 regs->active_tc.PC = regs->CP0_EPC; 3125 mips_set_hflags_isa_mode_from_pc(regs); 3126 /* I am not sure this is right, but it seems to work 3127 * maybe a problem with nested signals ? */ 3128 regs->CP0_EPC = 0; 3129 return -TARGET_QEMU_ESIGRETURN; 3130 3131 badframe: 3132 force_sig(TARGET_SIGSEGV); 3133 return -TARGET_QEMU_ESIGRETURN; 3134 } 3135 # endif /* O32 */ 3136 3137 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3138 target_siginfo_t *info, 3139 target_sigset_t *set, CPUMIPSState *env) 3140 { 3141 struct target_rt_sigframe *frame; 3142 abi_ulong frame_addr; 3143 int i; 3144 3145 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3146 trace_user_setup_rt_frame(env, frame_addr); 3147 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3148 goto give_sigsegv; 3149 } 3150 3151 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3152 3153 tswap_siginfo(&frame->rs_info, info); 3154 3155 __put_user(0, &frame->rs_uc.tuc_flags); 3156 __put_user(0, &frame->rs_uc.tuc_link); 3157 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3158 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3159 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3160 &frame->rs_uc.tuc_stack.ss_flags); 3161 3162 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3163 3164 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3165 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3166 } 3167 3168 /* 3169 * Arguments to signal handler: 3170 * 3171 * a0 = signal number 3172 * a1 = pointer to siginfo_t 3173 * a2 = pointer to struct ucontext 3174 * 3175 * $25 and PC point to the signal handler, $29 points to the 3176 * struct sigframe. 3177 */ 3178 env->active_tc.gpr[ 4] = sig; 3179 env->active_tc.gpr[ 5] = frame_addr 3180 + offsetof(struct target_rt_sigframe, rs_info); 3181 env->active_tc.gpr[ 6] = frame_addr 3182 + offsetof(struct target_rt_sigframe, rs_uc); 3183 env->active_tc.gpr[29] = frame_addr; 3184 env->active_tc.gpr[31] = frame_addr 3185 + offsetof(struct target_rt_sigframe, rs_code); 3186 /* The original kernel code sets CP0_EPC to the handler 3187 * since it returns to userland using eret 3188 * we cannot do this here, and we must set PC directly */ 3189 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3190 mips_set_hflags_isa_mode_from_pc(env); 3191 unlock_user_struct(frame, frame_addr, 1); 3192 return; 3193 3194 give_sigsegv: 3195 unlock_user_struct(frame, frame_addr, 1); 3196 force_sigsegv(sig); 3197 } 3198 3199 long do_rt_sigreturn(CPUMIPSState *env) 3200 { 3201 struct target_rt_sigframe *frame; 3202 abi_ulong frame_addr; 3203 sigset_t blocked; 3204 3205 frame_addr = env->active_tc.gpr[29]; 3206 trace_user_do_rt_sigreturn(env, frame_addr); 3207 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3208 goto badframe; 3209 } 3210 3211 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3212 set_sigmask(&blocked); 3213 3214 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3215 3216 if (do_sigaltstack(frame_addr + 3217 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3218 0, get_sp_from_cpustate(env)) == -EFAULT) 3219 goto badframe; 3220 3221 env->active_tc.PC = env->CP0_EPC; 3222 mips_set_hflags_isa_mode_from_pc(env); 3223 /* I am not sure this is right, but it seems to work 3224 * maybe a problem with nested signals ? */ 3225 env->CP0_EPC = 0; 3226 return -TARGET_QEMU_ESIGRETURN; 3227 3228 badframe: 3229 force_sig(TARGET_SIGSEGV); 3230 return -TARGET_QEMU_ESIGRETURN; 3231 } 3232 3233 #elif defined(TARGET_SH4) 3234 3235 /* 3236 * code and data structures from linux kernel: 3237 * include/asm-sh/sigcontext.h 3238 * arch/sh/kernel/signal.c 3239 */ 3240 3241 struct target_sigcontext { 3242 target_ulong oldmask; 3243 3244 /* CPU registers */ 3245 target_ulong sc_gregs[16]; 3246 target_ulong sc_pc; 3247 target_ulong sc_pr; 3248 target_ulong sc_sr; 3249 target_ulong sc_gbr; 3250 target_ulong sc_mach; 3251 target_ulong sc_macl; 3252 3253 /* FPU registers */ 3254 target_ulong sc_fpregs[16]; 3255 target_ulong sc_xfpregs[16]; 3256 unsigned int sc_fpscr; 3257 unsigned int sc_fpul; 3258 unsigned int sc_ownedfp; 3259 }; 3260 3261 struct target_sigframe 3262 { 3263 struct target_sigcontext sc; 3264 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3265 uint16_t retcode[3]; 3266 }; 3267 3268 3269 struct target_ucontext { 3270 target_ulong tuc_flags; 3271 struct target_ucontext *tuc_link; 3272 target_stack_t tuc_stack; 3273 struct target_sigcontext tuc_mcontext; 3274 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3275 }; 3276 3277 struct target_rt_sigframe 3278 { 3279 struct target_siginfo info; 3280 struct target_ucontext uc; 3281 uint16_t retcode[3]; 3282 }; 3283 3284 3285 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3286 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3287 3288 static abi_ulong get_sigframe(struct target_sigaction *ka, 3289 unsigned long sp, size_t frame_size) 3290 { 3291 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3292 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3293 } 3294 3295 return (sp - frame_size) & -8ul; 3296 } 3297 3298 static void setup_sigcontext(struct target_sigcontext *sc, 3299 CPUSH4State *regs, unsigned long mask) 3300 { 3301 int i; 3302 3303 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3304 COPY(gregs[0]); COPY(gregs[1]); 3305 COPY(gregs[2]); COPY(gregs[3]); 3306 COPY(gregs[4]); COPY(gregs[5]); 3307 COPY(gregs[6]); COPY(gregs[7]); 3308 COPY(gregs[8]); COPY(gregs[9]); 3309 COPY(gregs[10]); COPY(gregs[11]); 3310 COPY(gregs[12]); COPY(gregs[13]); 3311 COPY(gregs[14]); COPY(gregs[15]); 3312 COPY(gbr); COPY(mach); 3313 COPY(macl); COPY(pr); 3314 COPY(sr); COPY(pc); 3315 #undef COPY 3316 3317 for (i=0; i<16; i++) { 3318 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3319 } 3320 __put_user(regs->fpscr, &sc->sc_fpscr); 3321 __put_user(regs->fpul, &sc->sc_fpul); 3322 3323 /* non-iBCS2 extensions.. */ 3324 __put_user(mask, &sc->oldmask); 3325 } 3326 3327 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3328 { 3329 int i; 3330 3331 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3332 COPY(gregs[0]); COPY(gregs[1]); 3333 COPY(gregs[2]); COPY(gregs[3]); 3334 COPY(gregs[4]); COPY(gregs[5]); 3335 COPY(gregs[6]); COPY(gregs[7]); 3336 COPY(gregs[8]); COPY(gregs[9]); 3337 COPY(gregs[10]); COPY(gregs[11]); 3338 COPY(gregs[12]); COPY(gregs[13]); 3339 COPY(gregs[14]); COPY(gregs[15]); 3340 COPY(gbr); COPY(mach); 3341 COPY(macl); COPY(pr); 3342 COPY(sr); COPY(pc); 3343 #undef COPY 3344 3345 for (i=0; i<16; i++) { 3346 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3347 } 3348 __get_user(regs->fpscr, &sc->sc_fpscr); 3349 __get_user(regs->fpul, &sc->sc_fpul); 3350 3351 regs->tra = -1; /* disable syscall checks */ 3352 } 3353 3354 static void setup_frame(int sig, struct target_sigaction *ka, 3355 target_sigset_t *set, CPUSH4State *regs) 3356 { 3357 struct target_sigframe *frame; 3358 abi_ulong frame_addr; 3359 int i; 3360 3361 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3362 trace_user_setup_frame(regs, frame_addr); 3363 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3364 goto give_sigsegv; 3365 } 3366 3367 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3368 3369 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3370 __put_user(set->sig[i + 1], &frame->extramask[i]); 3371 } 3372 3373 /* Set up to return from userspace. If provided, use a stub 3374 already in userspace. */ 3375 if (ka->sa_flags & TARGET_SA_RESTORER) { 3376 regs->pr = (unsigned long) ka->sa_restorer; 3377 } else { 3378 /* Generate return code (system call to sigreturn) */ 3379 abi_ulong retcode_addr = frame_addr + 3380 offsetof(struct target_sigframe, retcode); 3381 __put_user(MOVW(2), &frame->retcode[0]); 3382 __put_user(TRAP_NOARG, &frame->retcode[1]); 3383 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3384 regs->pr = (unsigned long) retcode_addr; 3385 } 3386 3387 /* Set up registers for signal handler */ 3388 regs->gregs[15] = frame_addr; 3389 regs->gregs[4] = sig; /* Arg for signal handler */ 3390 regs->gregs[5] = 0; 3391 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3392 regs->pc = (unsigned long) ka->_sa_handler; 3393 3394 unlock_user_struct(frame, frame_addr, 1); 3395 return; 3396 3397 give_sigsegv: 3398 unlock_user_struct(frame, frame_addr, 1); 3399 force_sigsegv(sig); 3400 } 3401 3402 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3403 target_siginfo_t *info, 3404 target_sigset_t *set, CPUSH4State *regs) 3405 { 3406 struct target_rt_sigframe *frame; 3407 abi_ulong frame_addr; 3408 int i; 3409 3410 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3411 trace_user_setup_rt_frame(regs, frame_addr); 3412 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3413 goto give_sigsegv; 3414 } 3415 3416 tswap_siginfo(&frame->info, info); 3417 3418 /* Create the ucontext. */ 3419 __put_user(0, &frame->uc.tuc_flags); 3420 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3421 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3422 &frame->uc.tuc_stack.ss_sp); 3423 __put_user(sas_ss_flags(regs->gregs[15]), 3424 &frame->uc.tuc_stack.ss_flags); 3425 __put_user(target_sigaltstack_used.ss_size, 3426 &frame->uc.tuc_stack.ss_size); 3427 setup_sigcontext(&frame->uc.tuc_mcontext, 3428 regs, set->sig[0]); 3429 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3430 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3431 } 3432 3433 /* Set up to return from userspace. If provided, use a stub 3434 already in userspace. */ 3435 if (ka->sa_flags & TARGET_SA_RESTORER) { 3436 regs->pr = (unsigned long) ka->sa_restorer; 3437 } else { 3438 /* Generate return code (system call to sigreturn) */ 3439 abi_ulong retcode_addr = frame_addr + 3440 offsetof(struct target_rt_sigframe, retcode); 3441 __put_user(MOVW(2), &frame->retcode[0]); 3442 __put_user(TRAP_NOARG, &frame->retcode[1]); 3443 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3444 regs->pr = (unsigned long) retcode_addr; 3445 } 3446 3447 /* Set up registers for signal handler */ 3448 regs->gregs[15] = frame_addr; 3449 regs->gregs[4] = sig; /* Arg for signal handler */ 3450 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3451 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3452 regs->pc = (unsigned long) ka->_sa_handler; 3453 3454 unlock_user_struct(frame, frame_addr, 1); 3455 return; 3456 3457 give_sigsegv: 3458 unlock_user_struct(frame, frame_addr, 1); 3459 force_sigsegv(sig); 3460 } 3461 3462 long do_sigreturn(CPUSH4State *regs) 3463 { 3464 struct target_sigframe *frame; 3465 abi_ulong frame_addr; 3466 sigset_t blocked; 3467 target_sigset_t target_set; 3468 int i; 3469 int err = 0; 3470 3471 frame_addr = regs->gregs[15]; 3472 trace_user_do_sigreturn(regs, frame_addr); 3473 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3474 goto badframe; 3475 } 3476 3477 __get_user(target_set.sig[0], &frame->sc.oldmask); 3478 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3479 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3480 } 3481 3482 if (err) 3483 goto badframe; 3484 3485 target_to_host_sigset_internal(&blocked, &target_set); 3486 set_sigmask(&blocked); 3487 3488 restore_sigcontext(regs, &frame->sc); 3489 3490 unlock_user_struct(frame, frame_addr, 0); 3491 return -TARGET_QEMU_ESIGRETURN; 3492 3493 badframe: 3494 unlock_user_struct(frame, frame_addr, 0); 3495 force_sig(TARGET_SIGSEGV); 3496 return -TARGET_QEMU_ESIGRETURN; 3497 } 3498 3499 long do_rt_sigreturn(CPUSH4State *regs) 3500 { 3501 struct target_rt_sigframe *frame; 3502 abi_ulong frame_addr; 3503 sigset_t blocked; 3504 3505 frame_addr = regs->gregs[15]; 3506 trace_user_do_rt_sigreturn(regs, frame_addr); 3507 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3508 goto badframe; 3509 } 3510 3511 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 3512 set_sigmask(&blocked); 3513 3514 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 3515 3516 if (do_sigaltstack(frame_addr + 3517 offsetof(struct target_rt_sigframe, uc.tuc_stack), 3518 0, get_sp_from_cpustate(regs)) == -EFAULT) { 3519 goto badframe; 3520 } 3521 3522 unlock_user_struct(frame, frame_addr, 0); 3523 return -TARGET_QEMU_ESIGRETURN; 3524 3525 badframe: 3526 unlock_user_struct(frame, frame_addr, 0); 3527 force_sig(TARGET_SIGSEGV); 3528 return -TARGET_QEMU_ESIGRETURN; 3529 } 3530 #elif defined(TARGET_MICROBLAZE) 3531 3532 struct target_sigcontext { 3533 struct target_pt_regs regs; /* needs to be first */ 3534 uint32_t oldmask; 3535 }; 3536 3537 struct target_stack_t { 3538 abi_ulong ss_sp; 3539 int ss_flags; 3540 unsigned int ss_size; 3541 }; 3542 3543 struct target_ucontext { 3544 abi_ulong tuc_flags; 3545 abi_ulong tuc_link; 3546 struct target_stack_t tuc_stack; 3547 struct target_sigcontext tuc_mcontext; 3548 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 3549 }; 3550 3551 /* Signal frames. */ 3552 struct target_signal_frame { 3553 struct target_ucontext uc; 3554 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3555 uint32_t tramp[2]; 3556 }; 3557 3558 struct rt_signal_frame { 3559 siginfo_t info; 3560 struct ucontext uc; 3561 uint32_t tramp[2]; 3562 }; 3563 3564 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3565 { 3566 __put_user(env->regs[0], &sc->regs.r0); 3567 __put_user(env->regs[1], &sc->regs.r1); 3568 __put_user(env->regs[2], &sc->regs.r2); 3569 __put_user(env->regs[3], &sc->regs.r3); 3570 __put_user(env->regs[4], &sc->regs.r4); 3571 __put_user(env->regs[5], &sc->regs.r5); 3572 __put_user(env->regs[6], &sc->regs.r6); 3573 __put_user(env->regs[7], &sc->regs.r7); 3574 __put_user(env->regs[8], &sc->regs.r8); 3575 __put_user(env->regs[9], &sc->regs.r9); 3576 __put_user(env->regs[10], &sc->regs.r10); 3577 __put_user(env->regs[11], &sc->regs.r11); 3578 __put_user(env->regs[12], &sc->regs.r12); 3579 __put_user(env->regs[13], &sc->regs.r13); 3580 __put_user(env->regs[14], &sc->regs.r14); 3581 __put_user(env->regs[15], &sc->regs.r15); 3582 __put_user(env->regs[16], &sc->regs.r16); 3583 __put_user(env->regs[17], &sc->regs.r17); 3584 __put_user(env->regs[18], &sc->regs.r18); 3585 __put_user(env->regs[19], &sc->regs.r19); 3586 __put_user(env->regs[20], &sc->regs.r20); 3587 __put_user(env->regs[21], &sc->regs.r21); 3588 __put_user(env->regs[22], &sc->regs.r22); 3589 __put_user(env->regs[23], &sc->regs.r23); 3590 __put_user(env->regs[24], &sc->regs.r24); 3591 __put_user(env->regs[25], &sc->regs.r25); 3592 __put_user(env->regs[26], &sc->regs.r26); 3593 __put_user(env->regs[27], &sc->regs.r27); 3594 __put_user(env->regs[28], &sc->regs.r28); 3595 __put_user(env->regs[29], &sc->regs.r29); 3596 __put_user(env->regs[30], &sc->regs.r30); 3597 __put_user(env->regs[31], &sc->regs.r31); 3598 __put_user(env->sregs[SR_PC], &sc->regs.pc); 3599 } 3600 3601 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 3602 { 3603 __get_user(env->regs[0], &sc->regs.r0); 3604 __get_user(env->regs[1], &sc->regs.r1); 3605 __get_user(env->regs[2], &sc->regs.r2); 3606 __get_user(env->regs[3], &sc->regs.r3); 3607 __get_user(env->regs[4], &sc->regs.r4); 3608 __get_user(env->regs[5], &sc->regs.r5); 3609 __get_user(env->regs[6], &sc->regs.r6); 3610 __get_user(env->regs[7], &sc->regs.r7); 3611 __get_user(env->regs[8], &sc->regs.r8); 3612 __get_user(env->regs[9], &sc->regs.r9); 3613 __get_user(env->regs[10], &sc->regs.r10); 3614 __get_user(env->regs[11], &sc->regs.r11); 3615 __get_user(env->regs[12], &sc->regs.r12); 3616 __get_user(env->regs[13], &sc->regs.r13); 3617 __get_user(env->regs[14], &sc->regs.r14); 3618 __get_user(env->regs[15], &sc->regs.r15); 3619 __get_user(env->regs[16], &sc->regs.r16); 3620 __get_user(env->regs[17], &sc->regs.r17); 3621 __get_user(env->regs[18], &sc->regs.r18); 3622 __get_user(env->regs[19], &sc->regs.r19); 3623 __get_user(env->regs[20], &sc->regs.r20); 3624 __get_user(env->regs[21], &sc->regs.r21); 3625 __get_user(env->regs[22], &sc->regs.r22); 3626 __get_user(env->regs[23], &sc->regs.r23); 3627 __get_user(env->regs[24], &sc->regs.r24); 3628 __get_user(env->regs[25], &sc->regs.r25); 3629 __get_user(env->regs[26], &sc->regs.r26); 3630 __get_user(env->regs[27], &sc->regs.r27); 3631 __get_user(env->regs[28], &sc->regs.r28); 3632 __get_user(env->regs[29], &sc->regs.r29); 3633 __get_user(env->regs[30], &sc->regs.r30); 3634 __get_user(env->regs[31], &sc->regs.r31); 3635 __get_user(env->sregs[SR_PC], &sc->regs.pc); 3636 } 3637 3638 static abi_ulong get_sigframe(struct target_sigaction *ka, 3639 CPUMBState *env, int frame_size) 3640 { 3641 abi_ulong sp = env->regs[1]; 3642 3643 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 3644 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3645 } 3646 3647 return ((sp - frame_size) & -8UL); 3648 } 3649 3650 static void setup_frame(int sig, struct target_sigaction *ka, 3651 target_sigset_t *set, CPUMBState *env) 3652 { 3653 struct target_signal_frame *frame; 3654 abi_ulong frame_addr; 3655 int i; 3656 3657 frame_addr = get_sigframe(ka, env, sizeof *frame); 3658 trace_user_setup_frame(env, frame_addr); 3659 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3660 goto badframe; 3661 3662 /* Save the mask. */ 3663 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 3664 3665 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3666 __put_user(set->sig[i], &frame->extramask[i - 1]); 3667 } 3668 3669 setup_sigcontext(&frame->uc.tuc_mcontext, env); 3670 3671 /* Set up to return from userspace. If provided, use a stub 3672 already in userspace. */ 3673 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 3674 if (ka->sa_flags & TARGET_SA_RESTORER) { 3675 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 3676 } else { 3677 uint32_t t; 3678 /* Note, these encodings are _big endian_! */ 3679 /* addi r12, r0, __NR_sigreturn */ 3680 t = 0x31800000UL | TARGET_NR_sigreturn; 3681 __put_user(t, frame->tramp + 0); 3682 /* brki r14, 0x8 */ 3683 t = 0xb9cc0008UL; 3684 __put_user(t, frame->tramp + 1); 3685 3686 /* Return from sighandler will jump to the tramp. 3687 Negative 8 offset because return is rtsd r15, 8 */ 3688 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 3689 - 8; 3690 } 3691 3692 /* Set up registers for signal handler */ 3693 env->regs[1] = frame_addr; 3694 /* Signal handler args: */ 3695 env->regs[5] = sig; /* Arg 0: signum */ 3696 env->regs[6] = 0; 3697 /* arg 1: sigcontext */ 3698 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 3699 3700 /* Offset of 4 to handle microblaze rtid r14, 0 */ 3701 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 3702 3703 unlock_user_struct(frame, frame_addr, 1); 3704 return; 3705 badframe: 3706 force_sigsegv(sig); 3707 } 3708 3709 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3710 target_siginfo_t *info, 3711 target_sigset_t *set, CPUMBState *env) 3712 { 3713 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 3714 } 3715 3716 long do_sigreturn(CPUMBState *env) 3717 { 3718 struct target_signal_frame *frame; 3719 abi_ulong frame_addr; 3720 target_sigset_t target_set; 3721 sigset_t set; 3722 int i; 3723 3724 frame_addr = env->regs[R_SP]; 3725 trace_user_do_sigreturn(env, frame_addr); 3726 /* Make sure the guest isn't playing games. */ 3727 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 3728 goto badframe; 3729 3730 /* Restore blocked signals */ 3731 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 3732 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3733 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3734 } 3735 target_to_host_sigset_internal(&set, &target_set); 3736 set_sigmask(&set); 3737 3738 restore_sigcontext(&frame->uc.tuc_mcontext, env); 3739 /* We got here through a sigreturn syscall, our path back is via an 3740 rtb insn so setup r14 for that. */ 3741 env->regs[14] = env->sregs[SR_PC]; 3742 3743 unlock_user_struct(frame, frame_addr, 0); 3744 return -TARGET_QEMU_ESIGRETURN; 3745 badframe: 3746 force_sig(TARGET_SIGSEGV); 3747 return -TARGET_QEMU_ESIGRETURN; 3748 } 3749 3750 long do_rt_sigreturn(CPUMBState *env) 3751 { 3752 trace_user_do_rt_sigreturn(env, 0); 3753 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 3754 return -TARGET_ENOSYS; 3755 } 3756 3757 #elif defined(TARGET_CRIS) 3758 3759 struct target_sigcontext { 3760 struct target_pt_regs regs; /* needs to be first */ 3761 uint32_t oldmask; 3762 uint32_t usp; /* usp before stacking this gunk on it */ 3763 }; 3764 3765 /* Signal frames. */ 3766 struct target_signal_frame { 3767 struct target_sigcontext sc; 3768 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 3769 uint16_t retcode[4]; /* Trampoline code. */ 3770 }; 3771 3772 struct rt_signal_frame { 3773 siginfo_t *pinfo; 3774 void *puc; 3775 siginfo_t info; 3776 struct ucontext uc; 3777 uint16_t retcode[4]; /* Trampoline code. */ 3778 }; 3779 3780 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3781 { 3782 __put_user(env->regs[0], &sc->regs.r0); 3783 __put_user(env->regs[1], &sc->regs.r1); 3784 __put_user(env->regs[2], &sc->regs.r2); 3785 __put_user(env->regs[3], &sc->regs.r3); 3786 __put_user(env->regs[4], &sc->regs.r4); 3787 __put_user(env->regs[5], &sc->regs.r5); 3788 __put_user(env->regs[6], &sc->regs.r6); 3789 __put_user(env->regs[7], &sc->regs.r7); 3790 __put_user(env->regs[8], &sc->regs.r8); 3791 __put_user(env->regs[9], &sc->regs.r9); 3792 __put_user(env->regs[10], &sc->regs.r10); 3793 __put_user(env->regs[11], &sc->regs.r11); 3794 __put_user(env->regs[12], &sc->regs.r12); 3795 __put_user(env->regs[13], &sc->regs.r13); 3796 __put_user(env->regs[14], &sc->usp); 3797 __put_user(env->regs[15], &sc->regs.acr); 3798 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 3799 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 3800 __put_user(env->pc, &sc->regs.erp); 3801 } 3802 3803 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 3804 { 3805 __get_user(env->regs[0], &sc->regs.r0); 3806 __get_user(env->regs[1], &sc->regs.r1); 3807 __get_user(env->regs[2], &sc->regs.r2); 3808 __get_user(env->regs[3], &sc->regs.r3); 3809 __get_user(env->regs[4], &sc->regs.r4); 3810 __get_user(env->regs[5], &sc->regs.r5); 3811 __get_user(env->regs[6], &sc->regs.r6); 3812 __get_user(env->regs[7], &sc->regs.r7); 3813 __get_user(env->regs[8], &sc->regs.r8); 3814 __get_user(env->regs[9], &sc->regs.r9); 3815 __get_user(env->regs[10], &sc->regs.r10); 3816 __get_user(env->regs[11], &sc->regs.r11); 3817 __get_user(env->regs[12], &sc->regs.r12); 3818 __get_user(env->regs[13], &sc->regs.r13); 3819 __get_user(env->regs[14], &sc->usp); 3820 __get_user(env->regs[15], &sc->regs.acr); 3821 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 3822 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 3823 __get_user(env->pc, &sc->regs.erp); 3824 } 3825 3826 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 3827 { 3828 abi_ulong sp; 3829 /* Align the stack downwards to 4. */ 3830 sp = (env->regs[R_SP] & ~3); 3831 return sp - framesize; 3832 } 3833 3834 static void setup_frame(int sig, struct target_sigaction *ka, 3835 target_sigset_t *set, CPUCRISState *env) 3836 { 3837 struct target_signal_frame *frame; 3838 abi_ulong frame_addr; 3839 int i; 3840 3841 frame_addr = get_sigframe(env, sizeof *frame); 3842 trace_user_setup_frame(env, frame_addr); 3843 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 3844 goto badframe; 3845 3846 /* 3847 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 3848 * use this trampoline anymore but it sets it up for GDB. 3849 * In QEMU, using the trampoline simplifies things a bit so we use it. 3850 * 3851 * This is movu.w __NR_sigreturn, r9; break 13; 3852 */ 3853 __put_user(0x9c5f, frame->retcode+0); 3854 __put_user(TARGET_NR_sigreturn, 3855 frame->retcode + 1); 3856 __put_user(0xe93d, frame->retcode + 2); 3857 3858 /* Save the mask. */ 3859 __put_user(set->sig[0], &frame->sc.oldmask); 3860 3861 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3862 __put_user(set->sig[i], &frame->extramask[i - 1]); 3863 } 3864 3865 setup_sigcontext(&frame->sc, env); 3866 3867 /* Move the stack and setup the arguments for the handler. */ 3868 env->regs[R_SP] = frame_addr; 3869 env->regs[10] = sig; 3870 env->pc = (unsigned long) ka->_sa_handler; 3871 /* Link SRP so the guest returns through the trampoline. */ 3872 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 3873 3874 unlock_user_struct(frame, frame_addr, 1); 3875 return; 3876 badframe: 3877 force_sigsegv(sig); 3878 } 3879 3880 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3881 target_siginfo_t *info, 3882 target_sigset_t *set, CPUCRISState *env) 3883 { 3884 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 3885 } 3886 3887 long do_sigreturn(CPUCRISState *env) 3888 { 3889 struct target_signal_frame *frame; 3890 abi_ulong frame_addr; 3891 target_sigset_t target_set; 3892 sigset_t set; 3893 int i; 3894 3895 frame_addr = env->regs[R_SP]; 3896 trace_user_do_sigreturn(env, frame_addr); 3897 /* Make sure the guest isn't playing games. */ 3898 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 3899 goto badframe; 3900 } 3901 3902 /* Restore blocked signals */ 3903 __get_user(target_set.sig[0], &frame->sc.oldmask); 3904 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3905 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3906 } 3907 target_to_host_sigset_internal(&set, &target_set); 3908 set_sigmask(&set); 3909 3910 restore_sigcontext(&frame->sc, env); 3911 unlock_user_struct(frame, frame_addr, 0); 3912 return -TARGET_QEMU_ESIGRETURN; 3913 badframe: 3914 force_sig(TARGET_SIGSEGV); 3915 return -TARGET_QEMU_ESIGRETURN; 3916 } 3917 3918 long do_rt_sigreturn(CPUCRISState *env) 3919 { 3920 trace_user_do_rt_sigreturn(env, 0); 3921 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 3922 return -TARGET_ENOSYS; 3923 } 3924 3925 #elif defined(TARGET_NIOS2) 3926 3927 #define MCONTEXT_VERSION 2 3928 3929 struct target_sigcontext { 3930 int version; 3931 unsigned long gregs[32]; 3932 }; 3933 3934 struct target_ucontext { 3935 abi_ulong tuc_flags; 3936 abi_ulong tuc_link; 3937 target_stack_t tuc_stack; 3938 struct target_sigcontext tuc_mcontext; 3939 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3940 }; 3941 3942 struct target_rt_sigframe { 3943 struct target_siginfo info; 3944 struct target_ucontext uc; 3945 }; 3946 3947 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka) 3948 { 3949 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) { 3950 #ifdef CONFIG_STACK_GROWSUP 3951 return target_sigaltstack_used.ss_sp; 3952 #else 3953 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3954 #endif 3955 } 3956 return sp; 3957 } 3958 3959 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env) 3960 { 3961 unsigned long *gregs = uc->tuc_mcontext.gregs; 3962 3963 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version); 3964 __put_user(env->regs[1], &gregs[0]); 3965 __put_user(env->regs[2], &gregs[1]); 3966 __put_user(env->regs[3], &gregs[2]); 3967 __put_user(env->regs[4], &gregs[3]); 3968 __put_user(env->regs[5], &gregs[4]); 3969 __put_user(env->regs[6], &gregs[5]); 3970 __put_user(env->regs[7], &gregs[6]); 3971 __put_user(env->regs[8], &gregs[7]); 3972 __put_user(env->regs[9], &gregs[8]); 3973 __put_user(env->regs[10], &gregs[9]); 3974 __put_user(env->regs[11], &gregs[10]); 3975 __put_user(env->regs[12], &gregs[11]); 3976 __put_user(env->regs[13], &gregs[12]); 3977 __put_user(env->regs[14], &gregs[13]); 3978 __put_user(env->regs[15], &gregs[14]); 3979 __put_user(env->regs[16], &gregs[15]); 3980 __put_user(env->regs[17], &gregs[16]); 3981 __put_user(env->regs[18], &gregs[17]); 3982 __put_user(env->regs[19], &gregs[18]); 3983 __put_user(env->regs[20], &gregs[19]); 3984 __put_user(env->regs[21], &gregs[20]); 3985 __put_user(env->regs[22], &gregs[21]); 3986 __put_user(env->regs[23], &gregs[22]); 3987 __put_user(env->regs[R_RA], &gregs[23]); 3988 __put_user(env->regs[R_FP], &gregs[24]); 3989 __put_user(env->regs[R_GP], &gregs[25]); 3990 __put_user(env->regs[R_EA], &gregs[27]); 3991 __put_user(env->regs[R_SP], &gregs[28]); 3992 3993 return 0; 3994 } 3995 3996 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc, 3997 int *pr2) 3998 { 3999 int temp; 4000 abi_ulong off, frame_addr = env->regs[R_SP]; 4001 unsigned long *gregs = uc->tuc_mcontext.gregs; 4002 int err; 4003 4004 /* Always make any pending restarted system calls return -EINTR */ 4005 /* current->restart_block.fn = do_no_restart_syscall; */ 4006 4007 __get_user(temp, &uc->tuc_mcontext.version); 4008 if (temp != MCONTEXT_VERSION) { 4009 return 1; 4010 } 4011 4012 /* restore passed registers */ 4013 __get_user(env->regs[1], &gregs[0]); 4014 __get_user(env->regs[2], &gregs[1]); 4015 __get_user(env->regs[3], &gregs[2]); 4016 __get_user(env->regs[4], &gregs[3]); 4017 __get_user(env->regs[5], &gregs[4]); 4018 __get_user(env->regs[6], &gregs[5]); 4019 __get_user(env->regs[7], &gregs[6]); 4020 __get_user(env->regs[8], &gregs[7]); 4021 __get_user(env->regs[9], &gregs[8]); 4022 __get_user(env->regs[10], &gregs[9]); 4023 __get_user(env->regs[11], &gregs[10]); 4024 __get_user(env->regs[12], &gregs[11]); 4025 __get_user(env->regs[13], &gregs[12]); 4026 __get_user(env->regs[14], &gregs[13]); 4027 __get_user(env->regs[15], &gregs[14]); 4028 __get_user(env->regs[16], &gregs[15]); 4029 __get_user(env->regs[17], &gregs[16]); 4030 __get_user(env->regs[18], &gregs[17]); 4031 __get_user(env->regs[19], &gregs[18]); 4032 __get_user(env->regs[20], &gregs[19]); 4033 __get_user(env->regs[21], &gregs[20]); 4034 __get_user(env->regs[22], &gregs[21]); 4035 __get_user(env->regs[23], &gregs[22]); 4036 /* gregs[23] is handled below */ 4037 /* Verify, should this be settable */ 4038 __get_user(env->regs[R_FP], &gregs[24]); 4039 /* Verify, should this be settable */ 4040 __get_user(env->regs[R_GP], &gregs[25]); 4041 /* Not really necessary no user settable bits */ 4042 __get_user(temp, &gregs[26]); 4043 __get_user(env->regs[R_EA], &gregs[27]); 4044 4045 __get_user(env->regs[R_RA], &gregs[23]); 4046 __get_user(env->regs[R_SP], &gregs[28]); 4047 4048 off = offsetof(struct target_rt_sigframe, uc.tuc_stack); 4049 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env)); 4050 if (err == -EFAULT) { 4051 return 1; 4052 } 4053 4054 *pr2 = env->regs[2]; 4055 return 0; 4056 } 4057 4058 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env, 4059 size_t frame_size) 4060 { 4061 unsigned long usp; 4062 4063 /* Default to using normal stack. */ 4064 usp = env->regs[R_SP]; 4065 4066 /* This is the X/Open sanctioned signal stack switching. */ 4067 usp = sigsp(usp, ka); 4068 4069 /* Verify, is it 32 or 64 bit aligned */ 4070 return (void *)((usp - frame_size) & -8UL); 4071 } 4072 4073 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4074 target_siginfo_t *info, 4075 target_sigset_t *set, 4076 CPUNios2State *env) 4077 { 4078 struct target_rt_sigframe *frame; 4079 int i, err = 0; 4080 4081 frame = get_sigframe(ka, env, sizeof(*frame)); 4082 4083 if (ka->sa_flags & SA_SIGINFO) { 4084 tswap_siginfo(&frame->info, info); 4085 } 4086 4087 /* Create the ucontext. */ 4088 __put_user(0, &frame->uc.tuc_flags); 4089 __put_user(0, &frame->uc.tuc_link); 4090 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4091 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags); 4092 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4093 err |= rt_setup_ucontext(&frame->uc, env); 4094 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4095 __put_user((abi_ulong)set->sig[i], 4096 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4097 } 4098 4099 if (err) { 4100 goto give_sigsegv; 4101 } 4102 4103 /* Set up to return from userspace; jump to fixed address sigreturn 4104 trampoline on kuser page. */ 4105 env->regs[R_RA] = (unsigned long) (0x1044); 4106 4107 /* Set up registers for signal handler */ 4108 env->regs[R_SP] = (unsigned long) frame; 4109 env->regs[4] = (unsigned long) sig; 4110 env->regs[5] = (unsigned long) &frame->info; 4111 env->regs[6] = (unsigned long) &frame->uc; 4112 env->regs[R_EA] = (unsigned long) ka->_sa_handler; 4113 return; 4114 4115 give_sigsegv: 4116 if (sig == TARGET_SIGSEGV) { 4117 ka->_sa_handler = TARGET_SIG_DFL; 4118 } 4119 force_sigsegv(sig); 4120 return; 4121 } 4122 4123 long do_sigreturn(CPUNios2State *env) 4124 { 4125 trace_user_do_sigreturn(env, 0); 4126 fprintf(stderr, "do_sigreturn: not implemented\n"); 4127 return -TARGET_ENOSYS; 4128 } 4129 4130 long do_rt_sigreturn(CPUNios2State *env) 4131 { 4132 /* Verify, can we follow the stack back */ 4133 abi_ulong frame_addr = env->regs[R_SP]; 4134 struct target_rt_sigframe *frame; 4135 sigset_t set; 4136 int rval; 4137 4138 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4139 goto badframe; 4140 } 4141 4142 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4143 do_sigprocmask(SIG_SETMASK, &set, NULL); 4144 4145 if (rt_restore_ucontext(env, &frame->uc, &rval)) { 4146 goto badframe; 4147 } 4148 4149 unlock_user_struct(frame, frame_addr, 0); 4150 return rval; 4151 4152 badframe: 4153 unlock_user_struct(frame, frame_addr, 0); 4154 force_sig(TARGET_SIGSEGV); 4155 return 0; 4156 } 4157 /* TARGET_NIOS2 */ 4158 4159 #elif defined(TARGET_OPENRISC) 4160 4161 struct target_sigcontext { 4162 struct target_pt_regs regs; 4163 abi_ulong oldmask; 4164 abi_ulong usp; 4165 }; 4166 4167 struct target_ucontext { 4168 abi_ulong tuc_flags; 4169 abi_ulong tuc_link; 4170 target_stack_t tuc_stack; 4171 struct target_sigcontext tuc_mcontext; 4172 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4173 }; 4174 4175 struct target_rt_sigframe { 4176 abi_ulong pinfo; 4177 uint64_t puc; 4178 struct target_siginfo info; 4179 struct target_sigcontext sc; 4180 struct target_ucontext uc; 4181 unsigned char retcode[16]; /* trampoline code */ 4182 }; 4183 4184 /* This is the asm-generic/ucontext.h version */ 4185 #if 0 4186 static int restore_sigcontext(CPUOpenRISCState *regs, 4187 struct target_sigcontext *sc) 4188 { 4189 unsigned int err = 0; 4190 unsigned long old_usp; 4191 4192 /* Alwys make any pending restarted system call return -EINTR */ 4193 current_thread_info()->restart_block.fn = do_no_restart_syscall; 4194 4195 /* restore the regs from &sc->regs (same as sc, since regs is first) 4196 * (sc is already checked for VERIFY_READ since the sigframe was 4197 * checked in sys_sigreturn previously) 4198 */ 4199 4200 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 4201 goto badframe; 4202 } 4203 4204 /* make sure the U-flag is set so user-mode cannot fool us */ 4205 4206 regs->sr &= ~SR_SM; 4207 4208 /* restore the old USP as it was before we stacked the sc etc. 4209 * (we cannot just pop the sigcontext since we aligned the sp and 4210 * stuff after pushing it) 4211 */ 4212 4213 __get_user(old_usp, &sc->usp); 4214 phx_signal("old_usp 0x%lx", old_usp); 4215 4216 __PHX__ REALLY /* ??? */ 4217 wrusp(old_usp); 4218 regs->gpr[1] = old_usp; 4219 4220 /* TODO: the other ports use regs->orig_XX to disable syscall checks 4221 * after this completes, but we don't use that mechanism. maybe we can 4222 * use it now ? 4223 */ 4224 4225 return err; 4226 4227 badframe: 4228 return 1; 4229 } 4230 #endif 4231 4232 /* Set up a signal frame. */ 4233 4234 static void setup_sigcontext(struct target_sigcontext *sc, 4235 CPUOpenRISCState *regs, 4236 unsigned long mask) 4237 { 4238 unsigned long usp = regs->gpr[1]; 4239 4240 /* copy the regs. they are first in sc so we can use sc directly */ 4241 4242 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 4243 4244 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 4245 the signal handler. The frametype will be restored to its previous 4246 value in restore_sigcontext. */ 4247 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 4248 4249 /* then some other stuff */ 4250 __put_user(mask, &sc->oldmask); 4251 __put_user(usp, &sc->usp); 4252 } 4253 4254 static inline unsigned long align_sigframe(unsigned long sp) 4255 { 4256 return sp & ~3UL; 4257 } 4258 4259 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 4260 CPUOpenRISCState *regs, 4261 size_t frame_size) 4262 { 4263 unsigned long sp = regs->gpr[1]; 4264 int onsigstack = on_sig_stack(sp); 4265 4266 /* redzone */ 4267 /* This is the X/Open sanctioned signal stack switching. */ 4268 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 4269 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4270 } 4271 4272 sp = align_sigframe(sp - frame_size); 4273 4274 /* 4275 * If we are on the alternate signal stack and would overflow it, don't. 4276 * Return an always-bogus address instead so we will die with SIGSEGV. 4277 */ 4278 4279 if (onsigstack && !likely(on_sig_stack(sp))) { 4280 return -1L; 4281 } 4282 4283 return sp; 4284 } 4285 4286 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4287 target_siginfo_t *info, 4288 target_sigset_t *set, CPUOpenRISCState *env) 4289 { 4290 int err = 0; 4291 abi_ulong frame_addr; 4292 unsigned long return_ip; 4293 struct target_rt_sigframe *frame; 4294 abi_ulong info_addr, uc_addr; 4295 4296 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4297 trace_user_setup_rt_frame(env, frame_addr); 4298 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4299 goto give_sigsegv; 4300 } 4301 4302 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4303 __put_user(info_addr, &frame->pinfo); 4304 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4305 __put_user(uc_addr, &frame->puc); 4306 4307 if (ka->sa_flags & SA_SIGINFO) { 4308 tswap_siginfo(&frame->info, info); 4309 } 4310 4311 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ 4312 __put_user(0, &frame->uc.tuc_flags); 4313 __put_user(0, &frame->uc.tuc_link); 4314 __put_user(target_sigaltstack_used.ss_sp, 4315 &frame->uc.tuc_stack.ss_sp); 4316 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); 4317 __put_user(target_sigaltstack_used.ss_size, 4318 &frame->uc.tuc_stack.ss_size); 4319 setup_sigcontext(&frame->sc, env, set->sig[0]); 4320 4321 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4322 4323 /* trampoline - the desired return ip is the retcode itself */ 4324 return_ip = (unsigned long)&frame->retcode; 4325 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4326 __put_user(0xa960, (short *)(frame->retcode + 0)); 4327 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4328 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4329 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4330 4331 if (err) { 4332 goto give_sigsegv; 4333 } 4334 4335 /* TODO what is the current->exec_domain stuff and invmap ? */ 4336 4337 /* Set up registers for signal handler */ 4338 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4339 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ 4340 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ 4341 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ 4342 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ 4343 4344 /* actually move the usp to reflect the stacked frame */ 4345 env->gpr[1] = (unsigned long)frame; 4346 4347 return; 4348 4349 give_sigsegv: 4350 unlock_user_struct(frame, frame_addr, 1); 4351 force_sigsegv(sig); 4352 } 4353 4354 long do_sigreturn(CPUOpenRISCState *env) 4355 { 4356 trace_user_do_sigreturn(env, 0); 4357 fprintf(stderr, "do_sigreturn: not implemented\n"); 4358 return -TARGET_ENOSYS; 4359 } 4360 4361 long do_rt_sigreturn(CPUOpenRISCState *env) 4362 { 4363 trace_user_do_rt_sigreturn(env, 0); 4364 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4365 return -TARGET_ENOSYS; 4366 } 4367 /* TARGET_OPENRISC */ 4368 4369 #elif defined(TARGET_S390X) 4370 4371 #define __NUM_GPRS 16 4372 #define __NUM_FPRS 16 4373 #define __NUM_ACRS 16 4374 4375 #define S390_SYSCALL_SIZE 2 4376 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4377 4378 #define _SIGCONTEXT_NSIG 64 4379 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4380 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4381 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4382 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4383 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4384 4385 typedef struct { 4386 target_psw_t psw; 4387 target_ulong gprs[__NUM_GPRS]; 4388 unsigned int acrs[__NUM_ACRS]; 4389 } target_s390_regs_common; 4390 4391 typedef struct { 4392 unsigned int fpc; 4393 double fprs[__NUM_FPRS]; 4394 } target_s390_fp_regs; 4395 4396 typedef struct { 4397 target_s390_regs_common regs; 4398 target_s390_fp_regs fpregs; 4399 } target_sigregs; 4400 4401 struct target_sigcontext { 4402 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4403 target_sigregs *sregs; 4404 }; 4405 4406 typedef struct { 4407 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4408 struct target_sigcontext sc; 4409 target_sigregs sregs; 4410 int signo; 4411 uint8_t retcode[S390_SYSCALL_SIZE]; 4412 } sigframe; 4413 4414 struct target_ucontext { 4415 target_ulong tuc_flags; 4416 struct target_ucontext *tuc_link; 4417 target_stack_t tuc_stack; 4418 target_sigregs tuc_mcontext; 4419 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4420 }; 4421 4422 typedef struct { 4423 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4424 uint8_t retcode[S390_SYSCALL_SIZE]; 4425 struct target_siginfo info; 4426 struct target_ucontext uc; 4427 } rt_sigframe; 4428 4429 static inline abi_ulong 4430 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4431 { 4432 abi_ulong sp; 4433 4434 /* Default to using normal stack */ 4435 sp = env->regs[15]; 4436 4437 /* This is the X/Open sanctioned signal stack switching. */ 4438 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4439 if (!sas_ss_flags(sp)) { 4440 sp = target_sigaltstack_used.ss_sp + 4441 target_sigaltstack_used.ss_size; 4442 } 4443 } 4444 4445 /* This is the legacy signal stack switching. */ 4446 else if (/* FIXME !user_mode(regs) */ 0 && 4447 !(ka->sa_flags & TARGET_SA_RESTORER) && 4448 ka->sa_restorer) { 4449 sp = (abi_ulong) ka->sa_restorer; 4450 } 4451 4452 return (sp - frame_size) & -8ul; 4453 } 4454 4455 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4456 { 4457 int i; 4458 //save_access_regs(current->thread.acrs); FIXME 4459 4460 /* Copy a 'clean' PSW mask to the user to avoid leaking 4461 information about whether PER is currently on. */ 4462 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4463 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4464 for (i = 0; i < 16; i++) { 4465 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4466 } 4467 for (i = 0; i < 16; i++) { 4468 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4469 } 4470 /* 4471 * We have to store the fp registers to current->thread.fp_regs 4472 * to merge them with the emulated registers. 4473 */ 4474 //save_fp_regs(¤t->thread.fp_regs); FIXME 4475 for (i = 0; i < 16; i++) { 4476 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4477 } 4478 } 4479 4480 static void setup_frame(int sig, struct target_sigaction *ka, 4481 target_sigset_t *set, CPUS390XState *env) 4482 { 4483 sigframe *frame; 4484 abi_ulong frame_addr; 4485 4486 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4487 trace_user_setup_frame(env, frame_addr); 4488 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4489 goto give_sigsegv; 4490 } 4491 4492 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4493 4494 save_sigregs(env, &frame->sregs); 4495 4496 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4497 (abi_ulong *)&frame->sc.sregs); 4498 4499 /* Set up to return from userspace. If provided, use a stub 4500 already in userspace. */ 4501 if (ka->sa_flags & TARGET_SA_RESTORER) { 4502 env->regs[14] = (unsigned long) 4503 ka->sa_restorer | PSW_ADDR_AMODE; 4504 } else { 4505 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 4506 | PSW_ADDR_AMODE; 4507 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 4508 (uint16_t *)(frame->retcode)); 4509 } 4510 4511 /* Set up backchain. */ 4512 __put_user(env->regs[15], (abi_ulong *) frame); 4513 4514 /* Set up registers for signal handler */ 4515 env->regs[15] = frame_addr; 4516 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4517 4518 env->regs[2] = sig; //map_signal(sig); 4519 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 4520 4521 /* We forgot to include these in the sigcontext. 4522 To avoid breaking binary compatibility, they are passed as args. */ 4523 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 4524 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 4525 4526 /* Place signal number on stack to allow backtrace from handler. */ 4527 __put_user(env->regs[2], &frame->signo); 4528 unlock_user_struct(frame, frame_addr, 1); 4529 return; 4530 4531 give_sigsegv: 4532 force_sigsegv(sig); 4533 } 4534 4535 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4536 target_siginfo_t *info, 4537 target_sigset_t *set, CPUS390XState *env) 4538 { 4539 int i; 4540 rt_sigframe *frame; 4541 abi_ulong frame_addr; 4542 4543 frame_addr = get_sigframe(ka, env, sizeof *frame); 4544 trace_user_setup_rt_frame(env, frame_addr); 4545 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4546 goto give_sigsegv; 4547 } 4548 4549 tswap_siginfo(&frame->info, info); 4550 4551 /* Create the ucontext. */ 4552 __put_user(0, &frame->uc.tuc_flags); 4553 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 4554 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4555 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 4556 &frame->uc.tuc_stack.ss_flags); 4557 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4558 save_sigregs(env, &frame->uc.tuc_mcontext); 4559 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4560 __put_user((abi_ulong)set->sig[i], 4561 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4562 } 4563 4564 /* Set up to return from userspace. If provided, use a stub 4565 already in userspace. */ 4566 if (ka->sa_flags & TARGET_SA_RESTORER) { 4567 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 4568 } else { 4569 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 4570 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 4571 (uint16_t *)(frame->retcode)); 4572 } 4573 4574 /* Set up backchain. */ 4575 __put_user(env->regs[15], (abi_ulong *) frame); 4576 4577 /* Set up registers for signal handler */ 4578 env->regs[15] = frame_addr; 4579 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 4580 4581 env->regs[2] = sig; //map_signal(sig); 4582 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 4583 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 4584 return; 4585 4586 give_sigsegv: 4587 force_sigsegv(sig); 4588 } 4589 4590 static int 4591 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 4592 { 4593 int err = 0; 4594 int i; 4595 4596 for (i = 0; i < 16; i++) { 4597 __get_user(env->regs[i], &sc->regs.gprs[i]); 4598 } 4599 4600 __get_user(env->psw.mask, &sc->regs.psw.mask); 4601 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 4602 (unsigned long long)env->psw.addr); 4603 __get_user(env->psw.addr, &sc->regs.psw.addr); 4604 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 4605 4606 for (i = 0; i < 16; i++) { 4607 __get_user(env->aregs[i], &sc->regs.acrs[i]); 4608 } 4609 for (i = 0; i < 16; i++) { 4610 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 4611 } 4612 4613 return err; 4614 } 4615 4616 long do_sigreturn(CPUS390XState *env) 4617 { 4618 sigframe *frame; 4619 abi_ulong frame_addr = env->regs[15]; 4620 target_sigset_t target_set; 4621 sigset_t set; 4622 4623 trace_user_do_sigreturn(env, frame_addr); 4624 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4625 goto badframe; 4626 } 4627 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 4628 4629 target_to_host_sigset_internal(&set, &target_set); 4630 set_sigmask(&set); /* ~_BLOCKABLE? */ 4631 4632 if (restore_sigregs(env, &frame->sregs)) { 4633 goto badframe; 4634 } 4635 4636 unlock_user_struct(frame, frame_addr, 0); 4637 return -TARGET_QEMU_ESIGRETURN; 4638 4639 badframe: 4640 force_sig(TARGET_SIGSEGV); 4641 return -TARGET_QEMU_ESIGRETURN; 4642 } 4643 4644 long do_rt_sigreturn(CPUS390XState *env) 4645 { 4646 rt_sigframe *frame; 4647 abi_ulong frame_addr = env->regs[15]; 4648 sigset_t set; 4649 4650 trace_user_do_rt_sigreturn(env, frame_addr); 4651 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4652 goto badframe; 4653 } 4654 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4655 4656 set_sigmask(&set); /* ~_BLOCKABLE? */ 4657 4658 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 4659 goto badframe; 4660 } 4661 4662 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 4663 get_sp_from_cpustate(env)) == -EFAULT) { 4664 goto badframe; 4665 } 4666 unlock_user_struct(frame, frame_addr, 0); 4667 return -TARGET_QEMU_ESIGRETURN; 4668 4669 badframe: 4670 unlock_user_struct(frame, frame_addr, 0); 4671 force_sig(TARGET_SIGSEGV); 4672 return -TARGET_QEMU_ESIGRETURN; 4673 } 4674 4675 #elif defined(TARGET_PPC) 4676 4677 /* Size of dummy stack frame allocated when calling signal handler. 4678 See arch/powerpc/include/asm/ptrace.h. */ 4679 #if defined(TARGET_PPC64) 4680 #define SIGNAL_FRAMESIZE 128 4681 #else 4682 #define SIGNAL_FRAMESIZE 64 4683 #endif 4684 4685 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 4686 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 4687 struct target_mcontext { 4688 target_ulong mc_gregs[48]; 4689 /* Includes fpscr. */ 4690 uint64_t mc_fregs[33]; 4691 #if defined(TARGET_PPC64) 4692 /* Pointer to the vector regs */ 4693 target_ulong v_regs; 4694 #else 4695 target_ulong mc_pad[2]; 4696 #endif 4697 /* We need to handle Altivec and SPE at the same time, which no 4698 kernel needs to do. Fortunately, the kernel defines this bit to 4699 be Altivec-register-large all the time, rather than trying to 4700 twiddle it based on the specific platform. */ 4701 union { 4702 /* SPE vector registers. One extra for SPEFSCR. */ 4703 uint32_t spe[33]; 4704 /* Altivec vector registers. The packing of VSCR and VRSAVE 4705 varies depending on whether we're PPC64 or not: PPC64 splits 4706 them apart; PPC32 stuffs them together. 4707 We also need to account for the VSX registers on PPC64 4708 */ 4709 #if defined(TARGET_PPC64) 4710 #define QEMU_NVRREG (34 + 16) 4711 /* On ppc64, this mcontext structure is naturally *unaligned*, 4712 * or rather it is aligned on a 8 bytes boundary but not on 4713 * a 16 bytes one. This pad fixes it up. This is also why the 4714 * vector regs are referenced by the v_regs pointer above so 4715 * any amount of padding can be added here 4716 */ 4717 target_ulong pad; 4718 #else 4719 /* On ppc32, we are already aligned to 16 bytes */ 4720 #define QEMU_NVRREG 33 4721 #endif 4722 /* We cannot use ppc_avr_t here as we do *not* want the implied 4723 * 16-bytes alignment that would result from it. This would have 4724 * the effect of making the whole struct target_mcontext aligned 4725 * which breaks the layout of struct target_ucontext on ppc64. 4726 */ 4727 uint64_t altivec[QEMU_NVRREG][2]; 4728 #undef QEMU_NVRREG 4729 } mc_vregs; 4730 }; 4731 4732 /* See arch/powerpc/include/asm/sigcontext.h. */ 4733 struct target_sigcontext { 4734 target_ulong _unused[4]; 4735 int32_t signal; 4736 #if defined(TARGET_PPC64) 4737 int32_t pad0; 4738 #endif 4739 target_ulong handler; 4740 target_ulong oldmask; 4741 target_ulong regs; /* struct pt_regs __user * */ 4742 #if defined(TARGET_PPC64) 4743 struct target_mcontext mcontext; 4744 #endif 4745 }; 4746 4747 /* Indices for target_mcontext.mc_gregs, below. 4748 See arch/powerpc/include/asm/ptrace.h for details. */ 4749 enum { 4750 TARGET_PT_R0 = 0, 4751 TARGET_PT_R1 = 1, 4752 TARGET_PT_R2 = 2, 4753 TARGET_PT_R3 = 3, 4754 TARGET_PT_R4 = 4, 4755 TARGET_PT_R5 = 5, 4756 TARGET_PT_R6 = 6, 4757 TARGET_PT_R7 = 7, 4758 TARGET_PT_R8 = 8, 4759 TARGET_PT_R9 = 9, 4760 TARGET_PT_R10 = 10, 4761 TARGET_PT_R11 = 11, 4762 TARGET_PT_R12 = 12, 4763 TARGET_PT_R13 = 13, 4764 TARGET_PT_R14 = 14, 4765 TARGET_PT_R15 = 15, 4766 TARGET_PT_R16 = 16, 4767 TARGET_PT_R17 = 17, 4768 TARGET_PT_R18 = 18, 4769 TARGET_PT_R19 = 19, 4770 TARGET_PT_R20 = 20, 4771 TARGET_PT_R21 = 21, 4772 TARGET_PT_R22 = 22, 4773 TARGET_PT_R23 = 23, 4774 TARGET_PT_R24 = 24, 4775 TARGET_PT_R25 = 25, 4776 TARGET_PT_R26 = 26, 4777 TARGET_PT_R27 = 27, 4778 TARGET_PT_R28 = 28, 4779 TARGET_PT_R29 = 29, 4780 TARGET_PT_R30 = 30, 4781 TARGET_PT_R31 = 31, 4782 TARGET_PT_NIP = 32, 4783 TARGET_PT_MSR = 33, 4784 TARGET_PT_ORIG_R3 = 34, 4785 TARGET_PT_CTR = 35, 4786 TARGET_PT_LNK = 36, 4787 TARGET_PT_XER = 37, 4788 TARGET_PT_CCR = 38, 4789 /* Yes, there are two registers with #39. One is 64-bit only. */ 4790 TARGET_PT_MQ = 39, 4791 TARGET_PT_SOFTE = 39, 4792 TARGET_PT_TRAP = 40, 4793 TARGET_PT_DAR = 41, 4794 TARGET_PT_DSISR = 42, 4795 TARGET_PT_RESULT = 43, 4796 TARGET_PT_REGS_COUNT = 44 4797 }; 4798 4799 4800 struct target_ucontext { 4801 target_ulong tuc_flags; 4802 target_ulong tuc_link; /* struct ucontext __user * */ 4803 struct target_sigaltstack tuc_stack; 4804 #if !defined(TARGET_PPC64) 4805 int32_t tuc_pad[7]; 4806 target_ulong tuc_regs; /* struct mcontext __user * 4807 points to uc_mcontext field */ 4808 #endif 4809 target_sigset_t tuc_sigmask; 4810 #if defined(TARGET_PPC64) 4811 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 4812 struct target_sigcontext tuc_sigcontext; 4813 #else 4814 int32_t tuc_maskext[30]; 4815 int32_t tuc_pad2[3]; 4816 struct target_mcontext tuc_mcontext; 4817 #endif 4818 }; 4819 4820 /* See arch/powerpc/kernel/signal_32.c. */ 4821 struct target_sigframe { 4822 struct target_sigcontext sctx; 4823 struct target_mcontext mctx; 4824 int32_t abigap[56]; 4825 }; 4826 4827 #if defined(TARGET_PPC64) 4828 4829 #define TARGET_TRAMP_SIZE 6 4830 4831 struct target_rt_sigframe { 4832 /* sys_rt_sigreturn requires the ucontext be the first field */ 4833 struct target_ucontext uc; 4834 target_ulong _unused[2]; 4835 uint32_t trampoline[TARGET_TRAMP_SIZE]; 4836 target_ulong pinfo; /* struct siginfo __user * */ 4837 target_ulong puc; /* void __user * */ 4838 struct target_siginfo info; 4839 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 4840 char abigap[288]; 4841 } __attribute__((aligned(16))); 4842 4843 #else 4844 4845 struct target_rt_sigframe { 4846 struct target_siginfo info; 4847 struct target_ucontext uc; 4848 int32_t abigap[56]; 4849 }; 4850 4851 #endif 4852 4853 #if defined(TARGET_PPC64) 4854 4855 struct target_func_ptr { 4856 target_ulong entry; 4857 target_ulong toc; 4858 }; 4859 4860 #endif 4861 4862 /* We use the mc_pad field for the signal return trampoline. */ 4863 #define tramp mc_pad 4864 4865 /* See arch/powerpc/kernel/signal.c. */ 4866 static target_ulong get_sigframe(struct target_sigaction *ka, 4867 CPUPPCState *env, 4868 int frame_size) 4869 { 4870 target_ulong oldsp; 4871 4872 oldsp = env->gpr[1]; 4873 4874 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 4875 (sas_ss_flags(oldsp) == 0)) { 4876 oldsp = (target_sigaltstack_used.ss_sp 4877 + target_sigaltstack_used.ss_size); 4878 } 4879 4880 return (oldsp - frame_size) & ~0xFUL; 4881 } 4882 4883 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \ 4884 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN))) 4885 #define PPC_VEC_HI 0 4886 #define PPC_VEC_LO 1 4887 #else 4888 #define PPC_VEC_HI 1 4889 #define PPC_VEC_LO 0 4890 #endif 4891 4892 4893 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 4894 { 4895 target_ulong msr = env->msr; 4896 int i; 4897 target_ulong ccr = 0; 4898 4899 /* In general, the kernel attempts to be intelligent about what it 4900 needs to save for Altivec/FP/SPE registers. We don't care that 4901 much, so we just go ahead and save everything. */ 4902 4903 /* Save general registers. */ 4904 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4905 __put_user(env->gpr[i], &frame->mc_gregs[i]); 4906 } 4907 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 4908 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 4909 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 4910 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 4911 4912 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 4913 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 4914 } 4915 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 4916 4917 /* Save Altivec registers if necessary. */ 4918 if (env->insns_flags & PPC_ALTIVEC) { 4919 uint32_t *vrsave; 4920 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 4921 ppc_avr_t *avr = &env->avr[i]; 4922 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i]; 4923 4924 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 4925 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 4926 } 4927 /* Set MSR_VR in the saved MSR value to indicate that 4928 frame->mc_vregs contains valid data. */ 4929 msr |= MSR_VR; 4930 #if defined(TARGET_PPC64) 4931 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33]; 4932 /* 64-bit needs to put a pointer to the vectors in the frame */ 4933 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs); 4934 #else 4935 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32]; 4936 #endif 4937 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave); 4938 } 4939 4940 /* Save VSX second halves */ 4941 if (env->insns_flags2 & PPC2_VSX) { 4942 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 4943 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) { 4944 __put_user(env->vsr[i], &vsregs[i]); 4945 } 4946 } 4947 4948 /* Save floating point registers. */ 4949 if (env->insns_flags & PPC_FLOAT) { 4950 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 4951 __put_user(env->fpr[i], &frame->mc_fregs[i]); 4952 } 4953 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 4954 } 4955 4956 /* Save SPE registers. The kernel only saves the high half. */ 4957 if (env->insns_flags & PPC_SPE) { 4958 #if defined(TARGET_PPC64) 4959 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 4960 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 4961 } 4962 #else 4963 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 4964 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 4965 } 4966 #endif 4967 /* Set MSR_SPE in the saved MSR value to indicate that 4968 frame->mc_vregs contains valid data. */ 4969 msr |= MSR_SPE; 4970 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 4971 } 4972 4973 /* Store MSR. */ 4974 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 4975 } 4976 4977 static void encode_trampoline(int sigret, uint32_t *tramp) 4978 { 4979 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 4980 if (sigret) { 4981 __put_user(0x38000000 | sigret, &tramp[0]); 4982 __put_user(0x44000002, &tramp[1]); 4983 } 4984 } 4985 4986 static void restore_user_regs(CPUPPCState *env, 4987 struct target_mcontext *frame, int sig) 4988 { 4989 target_ulong save_r2 = 0; 4990 target_ulong msr; 4991 target_ulong ccr; 4992 4993 int i; 4994 4995 if (!sig) { 4996 save_r2 = env->gpr[2]; 4997 } 4998 4999 /* Restore general registers. */ 5000 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5001 __get_user(env->gpr[i], &frame->mc_gregs[i]); 5002 } 5003 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 5004 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 5005 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 5006 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 5007 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 5008 5009 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 5010 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 5011 } 5012 5013 if (!sig) { 5014 env->gpr[2] = save_r2; 5015 } 5016 /* Restore MSR. */ 5017 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 5018 5019 /* If doing signal return, restore the previous little-endian mode. */ 5020 if (sig) 5021 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 5022 5023 /* Restore Altivec registers if necessary. */ 5024 if (env->insns_flags & PPC_ALTIVEC) { 5025 ppc_avr_t *v_regs; 5026 uint32_t *vrsave; 5027 #if defined(TARGET_PPC64) 5028 uint64_t v_addr; 5029 /* 64-bit needs to recover the pointer to the vectors from the frame */ 5030 __get_user(v_addr, &frame->v_regs); 5031 v_regs = g2h(v_addr); 5032 #else 5033 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec; 5034 #endif 5035 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 5036 ppc_avr_t *avr = &env->avr[i]; 5037 ppc_avr_t *vreg = &v_regs[i]; 5038 5039 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 5040 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 5041 } 5042 /* Set MSR_VEC in the saved MSR value to indicate that 5043 frame->mc_vregs contains valid data. */ 5044 #if defined(TARGET_PPC64) 5045 vrsave = (uint32_t *)&v_regs[33]; 5046 #else 5047 vrsave = (uint32_t *)&v_regs[32]; 5048 #endif 5049 __get_user(env->spr[SPR_VRSAVE], vrsave); 5050 } 5051 5052 /* Restore VSX second halves */ 5053 if (env->insns_flags2 & PPC2_VSX) { 5054 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 5055 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) { 5056 __get_user(env->vsr[i], &vsregs[i]); 5057 } 5058 } 5059 5060 /* Restore floating point registers. */ 5061 if (env->insns_flags & PPC_FLOAT) { 5062 uint64_t fpscr; 5063 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 5064 __get_user(env->fpr[i], &frame->mc_fregs[i]); 5065 } 5066 __get_user(fpscr, &frame->mc_fregs[32]); 5067 env->fpscr = (uint32_t) fpscr; 5068 } 5069 5070 /* Save SPE registers. The kernel only saves the high half. */ 5071 if (env->insns_flags & PPC_SPE) { 5072 #if defined(TARGET_PPC64) 5073 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5074 uint32_t hi; 5075 5076 __get_user(hi, &frame->mc_vregs.spe[i]); 5077 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 5078 } 5079 #else 5080 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 5081 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 5082 } 5083 #endif 5084 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 5085 } 5086 } 5087 5088 #if !defined(TARGET_PPC64) 5089 static void setup_frame(int sig, struct target_sigaction *ka, 5090 target_sigset_t *set, CPUPPCState *env) 5091 { 5092 struct target_sigframe *frame; 5093 struct target_sigcontext *sc; 5094 target_ulong frame_addr, newsp; 5095 int err = 0; 5096 5097 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5098 trace_user_setup_frame(env, frame_addr); 5099 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 5100 goto sigsegv; 5101 sc = &frame->sctx; 5102 5103 __put_user(ka->_sa_handler, &sc->handler); 5104 __put_user(set->sig[0], &sc->oldmask); 5105 __put_user(set->sig[1], &sc->_unused[3]); 5106 __put_user(h2g(&frame->mctx), &sc->regs); 5107 __put_user(sig, &sc->signal); 5108 5109 /* Save user regs. */ 5110 save_user_regs(env, &frame->mctx); 5111 5112 /* Construct the trampoline code on the stack. */ 5113 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 5114 5115 /* The kernel checks for the presence of a VDSO here. We don't 5116 emulate a vdso, so use a sigreturn system call. */ 5117 env->lr = (target_ulong) h2g(frame->mctx.tramp); 5118 5119 /* Turn off all fp exceptions. */ 5120 env->fpscr = 0; 5121 5122 /* Create a stack frame for the caller of the handler. */ 5123 newsp = frame_addr - SIGNAL_FRAMESIZE; 5124 err |= put_user(env->gpr[1], newsp, target_ulong); 5125 5126 if (err) 5127 goto sigsegv; 5128 5129 /* Set up registers for signal handler. */ 5130 env->gpr[1] = newsp; 5131 env->gpr[3] = sig; 5132 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 5133 5134 env->nip = (target_ulong) ka->_sa_handler; 5135 5136 /* Signal handlers are entered in big-endian mode. */ 5137 env->msr &= ~(1ull << MSR_LE); 5138 5139 unlock_user_struct(frame, frame_addr, 1); 5140 return; 5141 5142 sigsegv: 5143 unlock_user_struct(frame, frame_addr, 1); 5144 force_sigsegv(sig); 5145 } 5146 #endif /* !defined(TARGET_PPC64) */ 5147 5148 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5149 target_siginfo_t *info, 5150 target_sigset_t *set, CPUPPCState *env) 5151 { 5152 struct target_rt_sigframe *rt_sf; 5153 uint32_t *trampptr = 0; 5154 struct target_mcontext *mctx = 0; 5155 target_ulong rt_sf_addr, newsp = 0; 5156 int i, err = 0; 5157 #if defined(TARGET_PPC64) 5158 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 5159 #endif 5160 5161 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 5162 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 5163 goto sigsegv; 5164 5165 tswap_siginfo(&rt_sf->info, info); 5166 5167 __put_user(0, &rt_sf->uc.tuc_flags); 5168 __put_user(0, &rt_sf->uc.tuc_link); 5169 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 5170 &rt_sf->uc.tuc_stack.ss_sp); 5171 __put_user(sas_ss_flags(env->gpr[1]), 5172 &rt_sf->uc.tuc_stack.ss_flags); 5173 __put_user(target_sigaltstack_used.ss_size, 5174 &rt_sf->uc.tuc_stack.ss_size); 5175 #if !defined(TARGET_PPC64) 5176 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 5177 &rt_sf->uc.tuc_regs); 5178 #endif 5179 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5180 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 5181 } 5182 5183 #if defined(TARGET_PPC64) 5184 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 5185 trampptr = &rt_sf->trampoline[0]; 5186 #else 5187 mctx = &rt_sf->uc.tuc_mcontext; 5188 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 5189 #endif 5190 5191 save_user_regs(env, mctx); 5192 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 5193 5194 /* The kernel checks for the presence of a VDSO here. We don't 5195 emulate a vdso, so use a sigreturn system call. */ 5196 env->lr = (target_ulong) h2g(trampptr); 5197 5198 /* Turn off all fp exceptions. */ 5199 env->fpscr = 0; 5200 5201 /* Create a stack frame for the caller of the handler. */ 5202 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 5203 err |= put_user(env->gpr[1], newsp, target_ulong); 5204 5205 if (err) 5206 goto sigsegv; 5207 5208 /* Set up registers for signal handler. */ 5209 env->gpr[1] = newsp; 5210 env->gpr[3] = (target_ulong) sig; 5211 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 5212 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 5213 env->gpr[6] = (target_ulong) h2g(rt_sf); 5214 5215 #if defined(TARGET_PPC64) 5216 if (get_ppc64_abi(image) < 2) { 5217 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 5218 struct target_func_ptr *handler = 5219 (struct target_func_ptr *)g2h(ka->_sa_handler); 5220 env->nip = tswapl(handler->entry); 5221 env->gpr[2] = tswapl(handler->toc); 5222 } else { 5223 /* ELFv2 PPC64 function pointers are entry points, but R12 5224 * must also be set */ 5225 env->nip = tswapl((target_ulong) ka->_sa_handler); 5226 env->gpr[12] = env->nip; 5227 } 5228 #else 5229 env->nip = (target_ulong) ka->_sa_handler; 5230 #endif 5231 5232 /* Signal handlers are entered in big-endian mode. */ 5233 env->msr &= ~(1ull << MSR_LE); 5234 5235 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5236 return; 5237 5238 sigsegv: 5239 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5240 force_sigsegv(sig); 5241 5242 } 5243 5244 #if !defined(TARGET_PPC64) 5245 long do_sigreturn(CPUPPCState *env) 5246 { 5247 struct target_sigcontext *sc = NULL; 5248 struct target_mcontext *sr = NULL; 5249 target_ulong sr_addr = 0, sc_addr; 5250 sigset_t blocked; 5251 target_sigset_t set; 5252 5253 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 5254 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 5255 goto sigsegv; 5256 5257 #if defined(TARGET_PPC64) 5258 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 5259 #else 5260 __get_user(set.sig[0], &sc->oldmask); 5261 __get_user(set.sig[1], &sc->_unused[3]); 5262 #endif 5263 target_to_host_sigset_internal(&blocked, &set); 5264 set_sigmask(&blocked); 5265 5266 __get_user(sr_addr, &sc->regs); 5267 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 5268 goto sigsegv; 5269 restore_user_regs(env, sr, 1); 5270 5271 unlock_user_struct(sr, sr_addr, 1); 5272 unlock_user_struct(sc, sc_addr, 1); 5273 return -TARGET_QEMU_ESIGRETURN; 5274 5275 sigsegv: 5276 unlock_user_struct(sr, sr_addr, 1); 5277 unlock_user_struct(sc, sc_addr, 1); 5278 force_sig(TARGET_SIGSEGV); 5279 return -TARGET_QEMU_ESIGRETURN; 5280 } 5281 #endif /* !defined(TARGET_PPC64) */ 5282 5283 /* See arch/powerpc/kernel/signal_32.c. */ 5284 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 5285 { 5286 struct target_mcontext *mcp; 5287 target_ulong mcp_addr; 5288 sigset_t blocked; 5289 target_sigset_t set; 5290 5291 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 5292 sizeof (set))) 5293 return 1; 5294 5295 #if defined(TARGET_PPC64) 5296 mcp_addr = h2g(ucp) + 5297 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 5298 #else 5299 __get_user(mcp_addr, &ucp->tuc_regs); 5300 #endif 5301 5302 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 5303 return 1; 5304 5305 target_to_host_sigset_internal(&blocked, &set); 5306 set_sigmask(&blocked); 5307 restore_user_regs(env, mcp, sig); 5308 5309 unlock_user_struct(mcp, mcp_addr, 1); 5310 return 0; 5311 } 5312 5313 long do_rt_sigreturn(CPUPPCState *env) 5314 { 5315 struct target_rt_sigframe *rt_sf = NULL; 5316 target_ulong rt_sf_addr; 5317 5318 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 5319 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 5320 goto sigsegv; 5321 5322 if (do_setcontext(&rt_sf->uc, env, 1)) 5323 goto sigsegv; 5324 5325 do_sigaltstack(rt_sf_addr 5326 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 5327 0, env->gpr[1]); 5328 5329 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5330 return -TARGET_QEMU_ESIGRETURN; 5331 5332 sigsegv: 5333 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5334 force_sig(TARGET_SIGSEGV); 5335 return -TARGET_QEMU_ESIGRETURN; 5336 } 5337 5338 #elif defined(TARGET_M68K) 5339 5340 struct target_sigcontext { 5341 abi_ulong sc_mask; 5342 abi_ulong sc_usp; 5343 abi_ulong sc_d0; 5344 abi_ulong sc_d1; 5345 abi_ulong sc_a0; 5346 abi_ulong sc_a1; 5347 unsigned short sc_sr; 5348 abi_ulong sc_pc; 5349 }; 5350 5351 struct target_sigframe 5352 { 5353 abi_ulong pretcode; 5354 int sig; 5355 int code; 5356 abi_ulong psc; 5357 char retcode[8]; 5358 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5359 struct target_sigcontext sc; 5360 }; 5361 5362 typedef int target_greg_t; 5363 #define TARGET_NGREG 18 5364 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5365 5366 typedef struct target_fpregset { 5367 int f_fpcntl[3]; 5368 int f_fpregs[8*3]; 5369 } target_fpregset_t; 5370 5371 struct target_mcontext { 5372 int version; 5373 target_gregset_t gregs; 5374 target_fpregset_t fpregs; 5375 }; 5376 5377 #define TARGET_MCONTEXT_VERSION 2 5378 5379 struct target_ucontext { 5380 abi_ulong tuc_flags; 5381 abi_ulong tuc_link; 5382 target_stack_t tuc_stack; 5383 struct target_mcontext tuc_mcontext; 5384 abi_long tuc_filler[80]; 5385 target_sigset_t tuc_sigmask; 5386 }; 5387 5388 struct target_rt_sigframe 5389 { 5390 abi_ulong pretcode; 5391 int sig; 5392 abi_ulong pinfo; 5393 abi_ulong puc; 5394 char retcode[8]; 5395 struct target_siginfo info; 5396 struct target_ucontext uc; 5397 }; 5398 5399 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5400 abi_ulong mask) 5401 { 5402 __put_user(mask, &sc->sc_mask); 5403 __put_user(env->aregs[7], &sc->sc_usp); 5404 __put_user(env->dregs[0], &sc->sc_d0); 5405 __put_user(env->dregs[1], &sc->sc_d1); 5406 __put_user(env->aregs[0], &sc->sc_a0); 5407 __put_user(env->aregs[1], &sc->sc_a1); 5408 __put_user(env->sr, &sc->sc_sr); 5409 __put_user(env->pc, &sc->sc_pc); 5410 } 5411 5412 static void 5413 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5414 { 5415 int temp; 5416 5417 __get_user(env->aregs[7], &sc->sc_usp); 5418 __get_user(env->dregs[0], &sc->sc_d0); 5419 __get_user(env->dregs[1], &sc->sc_d1); 5420 __get_user(env->aregs[0], &sc->sc_a0); 5421 __get_user(env->aregs[1], &sc->sc_a1); 5422 __get_user(env->pc, &sc->sc_pc); 5423 __get_user(temp, &sc->sc_sr); 5424 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5425 } 5426 5427 /* 5428 * Determine which stack to use.. 5429 */ 5430 static inline abi_ulong 5431 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5432 size_t frame_size) 5433 { 5434 unsigned long sp; 5435 5436 sp = regs->aregs[7]; 5437 5438 /* This is the X/Open sanctioned signal stack switching. */ 5439 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5440 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5441 } 5442 5443 return ((sp - frame_size) & -8UL); 5444 } 5445 5446 static void setup_frame(int sig, struct target_sigaction *ka, 5447 target_sigset_t *set, CPUM68KState *env) 5448 { 5449 struct target_sigframe *frame; 5450 abi_ulong frame_addr; 5451 abi_ulong retcode_addr; 5452 abi_ulong sc_addr; 5453 int i; 5454 5455 frame_addr = get_sigframe(ka, env, sizeof *frame); 5456 trace_user_setup_frame(env, frame_addr); 5457 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5458 goto give_sigsegv; 5459 } 5460 5461 __put_user(sig, &frame->sig); 5462 5463 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5464 __put_user(sc_addr, &frame->psc); 5465 5466 setup_sigcontext(&frame->sc, env, set->sig[0]); 5467 5468 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5469 __put_user(set->sig[i], &frame->extramask[i - 1]); 5470 } 5471 5472 /* Set up to return from userspace. */ 5473 5474 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5475 __put_user(retcode_addr, &frame->pretcode); 5476 5477 /* moveq #,d0; trap #0 */ 5478 5479 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5480 (uint32_t *)(frame->retcode)); 5481 5482 /* Set up to return from userspace */ 5483 5484 env->aregs[7] = frame_addr; 5485 env->pc = ka->_sa_handler; 5486 5487 unlock_user_struct(frame, frame_addr, 1); 5488 return; 5489 5490 give_sigsegv: 5491 force_sigsegv(sig); 5492 } 5493 5494 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 5495 CPUM68KState *env) 5496 { 5497 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5498 5499 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 5500 __put_user(env->dregs[0], &gregs[0]); 5501 __put_user(env->dregs[1], &gregs[1]); 5502 __put_user(env->dregs[2], &gregs[2]); 5503 __put_user(env->dregs[3], &gregs[3]); 5504 __put_user(env->dregs[4], &gregs[4]); 5505 __put_user(env->dregs[5], &gregs[5]); 5506 __put_user(env->dregs[6], &gregs[6]); 5507 __put_user(env->dregs[7], &gregs[7]); 5508 __put_user(env->aregs[0], &gregs[8]); 5509 __put_user(env->aregs[1], &gregs[9]); 5510 __put_user(env->aregs[2], &gregs[10]); 5511 __put_user(env->aregs[3], &gregs[11]); 5512 __put_user(env->aregs[4], &gregs[12]); 5513 __put_user(env->aregs[5], &gregs[13]); 5514 __put_user(env->aregs[6], &gregs[14]); 5515 __put_user(env->aregs[7], &gregs[15]); 5516 __put_user(env->pc, &gregs[16]); 5517 __put_user(env->sr, &gregs[17]); 5518 5519 return 0; 5520 } 5521 5522 static inline int target_rt_restore_ucontext(CPUM68KState *env, 5523 struct target_ucontext *uc) 5524 { 5525 int temp; 5526 target_greg_t *gregs = uc->tuc_mcontext.gregs; 5527 5528 __get_user(temp, &uc->tuc_mcontext.version); 5529 if (temp != TARGET_MCONTEXT_VERSION) 5530 goto badframe; 5531 5532 /* restore passed registers */ 5533 __get_user(env->dregs[0], &gregs[0]); 5534 __get_user(env->dregs[1], &gregs[1]); 5535 __get_user(env->dregs[2], &gregs[2]); 5536 __get_user(env->dregs[3], &gregs[3]); 5537 __get_user(env->dregs[4], &gregs[4]); 5538 __get_user(env->dregs[5], &gregs[5]); 5539 __get_user(env->dregs[6], &gregs[6]); 5540 __get_user(env->dregs[7], &gregs[7]); 5541 __get_user(env->aregs[0], &gregs[8]); 5542 __get_user(env->aregs[1], &gregs[9]); 5543 __get_user(env->aregs[2], &gregs[10]); 5544 __get_user(env->aregs[3], &gregs[11]); 5545 __get_user(env->aregs[4], &gregs[12]); 5546 __get_user(env->aregs[5], &gregs[13]); 5547 __get_user(env->aregs[6], &gregs[14]); 5548 __get_user(env->aregs[7], &gregs[15]); 5549 __get_user(env->pc, &gregs[16]); 5550 __get_user(temp, &gregs[17]); 5551 env->sr = (env->sr & 0xff00) | (temp & 0xff); 5552 5553 return 0; 5554 5555 badframe: 5556 return 1; 5557 } 5558 5559 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5560 target_siginfo_t *info, 5561 target_sigset_t *set, CPUM68KState *env) 5562 { 5563 struct target_rt_sigframe *frame; 5564 abi_ulong frame_addr; 5565 abi_ulong retcode_addr; 5566 abi_ulong info_addr; 5567 abi_ulong uc_addr; 5568 int err = 0; 5569 int i; 5570 5571 frame_addr = get_sigframe(ka, env, sizeof *frame); 5572 trace_user_setup_rt_frame(env, frame_addr); 5573 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5574 goto give_sigsegv; 5575 } 5576 5577 __put_user(sig, &frame->sig); 5578 5579 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 5580 __put_user(info_addr, &frame->pinfo); 5581 5582 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 5583 __put_user(uc_addr, &frame->puc); 5584 5585 tswap_siginfo(&frame->info, info); 5586 5587 /* Create the ucontext */ 5588 5589 __put_user(0, &frame->uc.tuc_flags); 5590 __put_user(0, &frame->uc.tuc_link); 5591 __put_user(target_sigaltstack_used.ss_sp, 5592 &frame->uc.tuc_stack.ss_sp); 5593 __put_user(sas_ss_flags(env->aregs[7]), 5594 &frame->uc.tuc_stack.ss_flags); 5595 __put_user(target_sigaltstack_used.ss_size, 5596 &frame->uc.tuc_stack.ss_size); 5597 err |= target_rt_setup_ucontext(&frame->uc, env); 5598 5599 if (err) 5600 goto give_sigsegv; 5601 5602 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5603 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5604 } 5605 5606 /* Set up to return from userspace. */ 5607 5608 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5609 __put_user(retcode_addr, &frame->pretcode); 5610 5611 /* moveq #,d0; notb d0; trap #0 */ 5612 5613 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 5614 (uint32_t *)(frame->retcode + 0)); 5615 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 5616 5617 if (err) 5618 goto give_sigsegv; 5619 5620 /* Set up to return from userspace */ 5621 5622 env->aregs[7] = frame_addr; 5623 env->pc = ka->_sa_handler; 5624 5625 unlock_user_struct(frame, frame_addr, 1); 5626 return; 5627 5628 give_sigsegv: 5629 unlock_user_struct(frame, frame_addr, 1); 5630 force_sigsegv(sig); 5631 } 5632 5633 long do_sigreturn(CPUM68KState *env) 5634 { 5635 struct target_sigframe *frame; 5636 abi_ulong frame_addr = env->aregs[7] - 4; 5637 target_sigset_t target_set; 5638 sigset_t set; 5639 int i; 5640 5641 trace_user_do_sigreturn(env, frame_addr); 5642 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5643 goto badframe; 5644 5645 /* set blocked signals */ 5646 5647 __get_user(target_set.sig[0], &frame->sc.sc_mask); 5648 5649 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5650 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 5651 } 5652 5653 target_to_host_sigset_internal(&set, &target_set); 5654 set_sigmask(&set); 5655 5656 /* restore registers */ 5657 5658 restore_sigcontext(env, &frame->sc); 5659 5660 unlock_user_struct(frame, frame_addr, 0); 5661 return -TARGET_QEMU_ESIGRETURN; 5662 5663 badframe: 5664 force_sig(TARGET_SIGSEGV); 5665 return -TARGET_QEMU_ESIGRETURN; 5666 } 5667 5668 long do_rt_sigreturn(CPUM68KState *env) 5669 { 5670 struct target_rt_sigframe *frame; 5671 abi_ulong frame_addr = env->aregs[7] - 4; 5672 target_sigset_t target_set; 5673 sigset_t set; 5674 5675 trace_user_do_rt_sigreturn(env, frame_addr); 5676 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 5677 goto badframe; 5678 5679 target_to_host_sigset_internal(&set, &target_set); 5680 set_sigmask(&set); 5681 5682 /* restore registers */ 5683 5684 if (target_rt_restore_ucontext(env, &frame->uc)) 5685 goto badframe; 5686 5687 if (do_sigaltstack(frame_addr + 5688 offsetof(struct target_rt_sigframe, uc.tuc_stack), 5689 0, get_sp_from_cpustate(env)) == -EFAULT) 5690 goto badframe; 5691 5692 unlock_user_struct(frame, frame_addr, 0); 5693 return -TARGET_QEMU_ESIGRETURN; 5694 5695 badframe: 5696 unlock_user_struct(frame, frame_addr, 0); 5697 force_sig(TARGET_SIGSEGV); 5698 return -TARGET_QEMU_ESIGRETURN; 5699 } 5700 5701 #elif defined(TARGET_ALPHA) 5702 5703 struct target_sigcontext { 5704 abi_long sc_onstack; 5705 abi_long sc_mask; 5706 abi_long sc_pc; 5707 abi_long sc_ps; 5708 abi_long sc_regs[32]; 5709 abi_long sc_ownedfp; 5710 abi_long sc_fpregs[32]; 5711 abi_ulong sc_fpcr; 5712 abi_ulong sc_fp_control; 5713 abi_ulong sc_reserved1; 5714 abi_ulong sc_reserved2; 5715 abi_ulong sc_ssize; 5716 abi_ulong sc_sbase; 5717 abi_ulong sc_traparg_a0; 5718 abi_ulong sc_traparg_a1; 5719 abi_ulong sc_traparg_a2; 5720 abi_ulong sc_fp_trap_pc; 5721 abi_ulong sc_fp_trigger_sum; 5722 abi_ulong sc_fp_trigger_inst; 5723 }; 5724 5725 struct target_ucontext { 5726 abi_ulong tuc_flags; 5727 abi_ulong tuc_link; 5728 abi_ulong tuc_osf_sigmask; 5729 target_stack_t tuc_stack; 5730 struct target_sigcontext tuc_mcontext; 5731 target_sigset_t tuc_sigmask; 5732 }; 5733 5734 struct target_sigframe { 5735 struct target_sigcontext sc; 5736 unsigned int retcode[3]; 5737 }; 5738 5739 struct target_rt_sigframe { 5740 target_siginfo_t info; 5741 struct target_ucontext uc; 5742 unsigned int retcode[3]; 5743 }; 5744 5745 #define INSN_MOV_R30_R16 0x47fe0410 5746 #define INSN_LDI_R0 0x201f0000 5747 #define INSN_CALLSYS 0x00000083 5748 5749 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 5750 abi_ulong frame_addr, target_sigset_t *set) 5751 { 5752 int i; 5753 5754 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 5755 __put_user(set->sig[0], &sc->sc_mask); 5756 __put_user(env->pc, &sc->sc_pc); 5757 __put_user(8, &sc->sc_ps); 5758 5759 for (i = 0; i < 31; ++i) { 5760 __put_user(env->ir[i], &sc->sc_regs[i]); 5761 } 5762 __put_user(0, &sc->sc_regs[31]); 5763 5764 for (i = 0; i < 31; ++i) { 5765 __put_user(env->fir[i], &sc->sc_fpregs[i]); 5766 } 5767 __put_user(0, &sc->sc_fpregs[31]); 5768 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 5769 5770 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 5771 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 5772 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 5773 } 5774 5775 static void restore_sigcontext(CPUAlphaState *env, 5776 struct target_sigcontext *sc) 5777 { 5778 uint64_t fpcr; 5779 int i; 5780 5781 __get_user(env->pc, &sc->sc_pc); 5782 5783 for (i = 0; i < 31; ++i) { 5784 __get_user(env->ir[i], &sc->sc_regs[i]); 5785 } 5786 for (i = 0; i < 31; ++i) { 5787 __get_user(env->fir[i], &sc->sc_fpregs[i]); 5788 } 5789 5790 __get_user(fpcr, &sc->sc_fpcr); 5791 cpu_alpha_store_fpcr(env, fpcr); 5792 } 5793 5794 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 5795 CPUAlphaState *env, 5796 unsigned long framesize) 5797 { 5798 abi_ulong sp = env->ir[IR_SP]; 5799 5800 /* This is the X/Open sanctioned signal stack switching. */ 5801 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 5802 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5803 } 5804 return (sp - framesize) & -32; 5805 } 5806 5807 static void setup_frame(int sig, struct target_sigaction *ka, 5808 target_sigset_t *set, CPUAlphaState *env) 5809 { 5810 abi_ulong frame_addr, r26; 5811 struct target_sigframe *frame; 5812 int err = 0; 5813 5814 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5815 trace_user_setup_frame(env, frame_addr); 5816 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5817 goto give_sigsegv; 5818 } 5819 5820 setup_sigcontext(&frame->sc, env, frame_addr, set); 5821 5822 if (ka->sa_restorer) { 5823 r26 = ka->sa_restorer; 5824 } else { 5825 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5826 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 5827 &frame->retcode[1]); 5828 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5829 /* imb() */ 5830 r26 = frame_addr; 5831 } 5832 5833 unlock_user_struct(frame, frame_addr, 1); 5834 5835 if (err) { 5836 give_sigsegv: 5837 force_sigsegv(sig); 5838 return; 5839 } 5840 5841 env->ir[IR_RA] = r26; 5842 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5843 env->ir[IR_A0] = sig; 5844 env->ir[IR_A1] = 0; 5845 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 5846 env->ir[IR_SP] = frame_addr; 5847 } 5848 5849 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5850 target_siginfo_t *info, 5851 target_sigset_t *set, CPUAlphaState *env) 5852 { 5853 abi_ulong frame_addr, r26; 5854 struct target_rt_sigframe *frame; 5855 int i, err = 0; 5856 5857 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5858 trace_user_setup_rt_frame(env, frame_addr); 5859 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5860 goto give_sigsegv; 5861 } 5862 5863 tswap_siginfo(&frame->info, info); 5864 5865 __put_user(0, &frame->uc.tuc_flags); 5866 __put_user(0, &frame->uc.tuc_link); 5867 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 5868 __put_user(target_sigaltstack_used.ss_sp, 5869 &frame->uc.tuc_stack.ss_sp); 5870 __put_user(sas_ss_flags(env->ir[IR_SP]), 5871 &frame->uc.tuc_stack.ss_flags); 5872 __put_user(target_sigaltstack_used.ss_size, 5873 &frame->uc.tuc_stack.ss_size); 5874 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 5875 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 5876 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 5877 } 5878 5879 if (ka->sa_restorer) { 5880 r26 = ka->sa_restorer; 5881 } else { 5882 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 5883 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 5884 &frame->retcode[1]); 5885 __put_user(INSN_CALLSYS, &frame->retcode[2]); 5886 /* imb(); */ 5887 r26 = frame_addr; 5888 } 5889 5890 if (err) { 5891 give_sigsegv: 5892 force_sigsegv(sig); 5893 return; 5894 } 5895 5896 env->ir[IR_RA] = r26; 5897 env->ir[IR_PV] = env->pc = ka->_sa_handler; 5898 env->ir[IR_A0] = sig; 5899 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 5900 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 5901 env->ir[IR_SP] = frame_addr; 5902 } 5903 5904 long do_sigreturn(CPUAlphaState *env) 5905 { 5906 struct target_sigcontext *sc; 5907 abi_ulong sc_addr = env->ir[IR_A0]; 5908 target_sigset_t target_set; 5909 sigset_t set; 5910 5911 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 5912 goto badframe; 5913 } 5914 5915 target_sigemptyset(&target_set); 5916 __get_user(target_set.sig[0], &sc->sc_mask); 5917 5918 target_to_host_sigset_internal(&set, &target_set); 5919 set_sigmask(&set); 5920 5921 restore_sigcontext(env, sc); 5922 unlock_user_struct(sc, sc_addr, 0); 5923 return -TARGET_QEMU_ESIGRETURN; 5924 5925 badframe: 5926 force_sig(TARGET_SIGSEGV); 5927 return -TARGET_QEMU_ESIGRETURN; 5928 } 5929 5930 long do_rt_sigreturn(CPUAlphaState *env) 5931 { 5932 abi_ulong frame_addr = env->ir[IR_A0]; 5933 struct target_rt_sigframe *frame; 5934 sigset_t set; 5935 5936 trace_user_do_rt_sigreturn(env, frame_addr); 5937 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5938 goto badframe; 5939 } 5940 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5941 set_sigmask(&set); 5942 5943 restore_sigcontext(env, &frame->uc.tuc_mcontext); 5944 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 5945 uc.tuc_stack), 5946 0, env->ir[IR_SP]) == -EFAULT) { 5947 goto badframe; 5948 } 5949 5950 unlock_user_struct(frame, frame_addr, 0); 5951 return -TARGET_QEMU_ESIGRETURN; 5952 5953 5954 badframe: 5955 unlock_user_struct(frame, frame_addr, 0); 5956 force_sig(TARGET_SIGSEGV); 5957 return -TARGET_QEMU_ESIGRETURN; 5958 } 5959 5960 #elif defined(TARGET_TILEGX) 5961 5962 struct target_sigcontext { 5963 union { 5964 /* General-purpose registers. */ 5965 abi_ulong gregs[56]; 5966 struct { 5967 abi_ulong __gregs[53]; 5968 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 5969 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 5970 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 5971 }; 5972 }; 5973 abi_ulong pc; /* Program counter. */ 5974 abi_ulong ics; /* In Interrupt Critical Section? */ 5975 abi_ulong faultnum; /* Fault number. */ 5976 abi_ulong pad[5]; 5977 }; 5978 5979 struct target_ucontext { 5980 abi_ulong tuc_flags; 5981 abi_ulong tuc_link; 5982 target_stack_t tuc_stack; 5983 struct target_sigcontext tuc_mcontext; 5984 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 5985 }; 5986 5987 struct target_rt_sigframe { 5988 unsigned char save_area[16]; /* caller save area */ 5989 struct target_siginfo info; 5990 struct target_ucontext uc; 5991 abi_ulong retcode[2]; 5992 }; 5993 5994 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 5995 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 5996 5997 5998 static void setup_sigcontext(struct target_sigcontext *sc, 5999 CPUArchState *env, int signo) 6000 { 6001 int i; 6002 6003 for (i = 0; i < TILEGX_R_COUNT; ++i) { 6004 __put_user(env->regs[i], &sc->gregs[i]); 6005 } 6006 6007 __put_user(env->pc, &sc->pc); 6008 __put_user(0, &sc->ics); 6009 __put_user(signo, &sc->faultnum); 6010 } 6011 6012 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 6013 { 6014 int i; 6015 6016 for (i = 0; i < TILEGX_R_COUNT; ++i) { 6017 __get_user(env->regs[i], &sc->gregs[i]); 6018 } 6019 6020 __get_user(env->pc, &sc->pc); 6021 } 6022 6023 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 6024 size_t frame_size) 6025 { 6026 unsigned long sp = env->regs[TILEGX_R_SP]; 6027 6028 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 6029 return -1UL; 6030 } 6031 6032 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 6033 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 6034 } 6035 6036 sp -= frame_size; 6037 sp &= -16UL; 6038 return sp; 6039 } 6040 6041 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6042 target_siginfo_t *info, 6043 target_sigset_t *set, CPUArchState *env) 6044 { 6045 abi_ulong frame_addr; 6046 struct target_rt_sigframe *frame; 6047 unsigned long restorer; 6048 6049 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 6050 trace_user_setup_rt_frame(env, frame_addr); 6051 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6052 goto give_sigsegv; 6053 } 6054 6055 /* Always write at least the signal number for the stack backtracer. */ 6056 if (ka->sa_flags & TARGET_SA_SIGINFO) { 6057 /* At sigreturn time, restore the callee-save registers too. */ 6058 tswap_siginfo(&frame->info, info); 6059 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 6060 } else { 6061 __put_user(info->si_signo, &frame->info.si_signo); 6062 } 6063 6064 /* Create the ucontext. */ 6065 __put_user(0, &frame->uc.tuc_flags); 6066 __put_user(0, &frame->uc.tuc_link); 6067 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 6068 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 6069 &frame->uc.tuc_stack.ss_flags); 6070 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 6071 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 6072 6073 if (ka->sa_flags & TARGET_SA_RESTORER) { 6074 restorer = (unsigned long) ka->sa_restorer; 6075 } else { 6076 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 6077 __put_user(INSN_SWINT1, &frame->retcode[1]); 6078 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 6079 } 6080 env->pc = (unsigned long) ka->_sa_handler; 6081 env->regs[TILEGX_R_SP] = (unsigned long) frame; 6082 env->regs[TILEGX_R_LR] = restorer; 6083 env->regs[0] = (unsigned long) sig; 6084 env->regs[1] = (unsigned long) &frame->info; 6085 env->regs[2] = (unsigned long) &frame->uc; 6086 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 6087 6088 unlock_user_struct(frame, frame_addr, 1); 6089 return; 6090 6091 give_sigsegv: 6092 force_sigsegv(sig); 6093 } 6094 6095 long do_rt_sigreturn(CPUTLGState *env) 6096 { 6097 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 6098 struct target_rt_sigframe *frame; 6099 sigset_t set; 6100 6101 trace_user_do_rt_sigreturn(env, frame_addr); 6102 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 6103 goto badframe; 6104 } 6105 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 6106 set_sigmask(&set); 6107 6108 restore_sigcontext(env, &frame->uc.tuc_mcontext); 6109 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 6110 uc.tuc_stack), 6111 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 6112 goto badframe; 6113 } 6114 6115 unlock_user_struct(frame, frame_addr, 0); 6116 return -TARGET_QEMU_ESIGRETURN; 6117 6118 6119 badframe: 6120 unlock_user_struct(frame, frame_addr, 0); 6121 force_sig(TARGET_SIGSEGV); 6122 return -TARGET_QEMU_ESIGRETURN; 6123 } 6124 6125 #elif defined(TARGET_HPPA) 6126 6127 struct target_sigcontext { 6128 abi_ulong sc_flags; 6129 abi_ulong sc_gr[32]; 6130 uint64_t sc_fr[32]; 6131 abi_ulong sc_iasq[2]; 6132 abi_ulong sc_iaoq[2]; 6133 abi_ulong sc_sar; 6134 }; 6135 6136 struct target_ucontext { 6137 abi_uint tuc_flags; 6138 abi_ulong tuc_link; 6139 target_stack_t tuc_stack; 6140 abi_uint pad[1]; 6141 struct target_sigcontext tuc_mcontext; 6142 target_sigset_t tuc_sigmask; 6143 }; 6144 6145 struct target_rt_sigframe { 6146 abi_uint tramp[9]; 6147 target_siginfo_t info; 6148 struct target_ucontext uc; 6149 /* hidden location of upper halves of pa2.0 64-bit gregs */ 6150 }; 6151 6152 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env) 6153 { 6154 int flags = 0; 6155 int i; 6156 6157 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */ 6158 6159 if (env->iaoq_f < TARGET_PAGE_SIZE) { 6160 /* In the gateway page, executing a syscall. */ 6161 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */ 6162 __put_user(env->gr[31], &sc->sc_iaoq[0]); 6163 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]); 6164 } else { 6165 __put_user(env->iaoq_f, &sc->sc_iaoq[0]); 6166 __put_user(env->iaoq_b, &sc->sc_iaoq[1]); 6167 } 6168 __put_user(0, &sc->sc_iasq[0]); 6169 __put_user(0, &sc->sc_iasq[1]); 6170 __put_user(flags, &sc->sc_flags); 6171 6172 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]); 6173 for (i = 1; i < 32; ++i) { 6174 __put_user(env->gr[i], &sc->sc_gr[i]); 6175 } 6176 6177 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]); 6178 for (i = 1; i < 32; ++i) { 6179 __put_user(env->fr[i], &sc->sc_fr[i]); 6180 } 6181 6182 __put_user(env->sar, &sc->sc_sar); 6183 } 6184 6185 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc) 6186 { 6187 target_ulong psw; 6188 int i; 6189 6190 __get_user(psw, &sc->sc_gr[0]); 6191 cpu_hppa_put_psw(env, psw); 6192 6193 for (i = 1; i < 32; ++i) { 6194 __get_user(env->gr[i], &sc->sc_gr[i]); 6195 } 6196 for (i = 0; i < 32; ++i) { 6197 __get_user(env->fr[i], &sc->sc_fr[i]); 6198 } 6199 cpu_hppa_loaded_fr0(env); 6200 6201 __get_user(env->iaoq_f, &sc->sc_iaoq[0]); 6202 __get_user(env->iaoq_b, &sc->sc_iaoq[1]); 6203 __get_user(env->sar, &sc->sc_sar); 6204 } 6205 6206 /* No, this doesn't look right, but it's copied straight from the kernel. */ 6207 #define PARISC_RT_SIGFRAME_SIZE32 \ 6208 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64) 6209 6210 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6211 target_siginfo_t *info, 6212 target_sigset_t *set, CPUArchState *env) 6213 { 6214 abi_ulong frame_addr, sp, haddr; 6215 struct target_rt_sigframe *frame; 6216 int i; 6217 6218 sp = env->gr[30]; 6219 if (ka->sa_flags & TARGET_SA_ONSTACK) { 6220 if (sas_ss_flags(sp) == 0) { 6221 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f; 6222 } 6223 } 6224 frame_addr = QEMU_ALIGN_UP(sp, 64); 6225 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32; 6226 6227 trace_user_setup_rt_frame(env, frame_addr); 6228 6229 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6230 goto give_sigsegv; 6231 } 6232 6233 tswap_siginfo(&frame->info, info); 6234 frame->uc.tuc_flags = 0; 6235 frame->uc.tuc_link = 0; 6236 6237 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 6238 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 6239 &frame->uc.tuc_stack.ss_flags); 6240 __put_user(target_sigaltstack_used.ss_size, 6241 &frame->uc.tuc_stack.ss_size); 6242 6243 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 6244 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 6245 } 6246 6247 setup_sigcontext(&frame->uc.tuc_mcontext, env); 6248 6249 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */ 6250 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */ 6251 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */ 6252 __put_user(0x08000240, frame->tramp + 3); /* nop */ 6253 6254 unlock_user_struct(frame, frame_addr, 1); 6255 6256 env->gr[2] = h2g(frame->tramp); 6257 env->gr[30] = sp; 6258 env->gr[26] = sig; 6259 env->gr[25] = h2g(&frame->info); 6260 env->gr[24] = h2g(&frame->uc); 6261 6262 haddr = ka->_sa_handler; 6263 if (haddr & 2) { 6264 /* Function descriptor. */ 6265 target_ulong *fdesc, dest; 6266 6267 haddr &= -4; 6268 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) { 6269 goto give_sigsegv; 6270 } 6271 __get_user(dest, fdesc); 6272 __get_user(env->gr[19], fdesc + 1); 6273 unlock_user_struct(fdesc, haddr, 1); 6274 haddr = dest; 6275 } 6276 env->iaoq_f = haddr; 6277 env->iaoq_b = haddr + 4;; 6278 return; 6279 6280 give_sigsegv: 6281 force_sigsegv(sig); 6282 } 6283 6284 long do_rt_sigreturn(CPUArchState *env) 6285 { 6286 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32; 6287 struct target_rt_sigframe *frame; 6288 sigset_t set; 6289 6290 trace_user_do_rt_sigreturn(env, frame_addr); 6291 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 6292 goto badframe; 6293 } 6294 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 6295 set_sigmask(&set); 6296 6297 restore_sigcontext(env, &frame->uc.tuc_mcontext); 6298 unlock_user_struct(frame, frame_addr, 0); 6299 6300 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 6301 uc.tuc_stack), 6302 0, env->gr[30]) == -EFAULT) { 6303 goto badframe; 6304 } 6305 6306 unlock_user_struct(frame, frame_addr, 0); 6307 return -TARGET_QEMU_ESIGRETURN; 6308 6309 badframe: 6310 force_sig(TARGET_SIGSEGV); 6311 return -TARGET_QEMU_ESIGRETURN; 6312 } 6313 6314 #else 6315 6316 static void setup_frame(int sig, struct target_sigaction *ka, 6317 target_sigset_t *set, CPUArchState *env) 6318 { 6319 fprintf(stderr, "setup_frame: not implemented\n"); 6320 } 6321 6322 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6323 target_siginfo_t *info, 6324 target_sigset_t *set, CPUArchState *env) 6325 { 6326 fprintf(stderr, "setup_rt_frame: not implemented\n"); 6327 } 6328 6329 long do_sigreturn(CPUArchState *env) 6330 { 6331 fprintf(stderr, "do_sigreturn: not implemented\n"); 6332 return -TARGET_ENOSYS; 6333 } 6334 6335 long do_rt_sigreturn(CPUArchState *env) 6336 { 6337 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 6338 return -TARGET_ENOSYS; 6339 } 6340 6341 #endif 6342 6343 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 6344 struct emulated_sigtable *k) 6345 { 6346 CPUState *cpu = ENV_GET_CPU(cpu_env); 6347 abi_ulong handler; 6348 sigset_t set; 6349 target_sigset_t target_old_set; 6350 struct target_sigaction *sa; 6351 TaskState *ts = cpu->opaque; 6352 6353 trace_user_handle_signal(cpu_env, sig); 6354 /* dequeue signal */ 6355 k->pending = 0; 6356 6357 sig = gdb_handlesig(cpu, sig); 6358 if (!sig) { 6359 sa = NULL; 6360 handler = TARGET_SIG_IGN; 6361 } else { 6362 sa = &sigact_table[sig - 1]; 6363 handler = sa->_sa_handler; 6364 } 6365 6366 if (do_strace) { 6367 print_taken_signal(sig, &k->info); 6368 } 6369 6370 if (handler == TARGET_SIG_DFL) { 6371 /* default handler : ignore some signal. The other are job control or fatal */ 6372 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 6373 kill(getpid(),SIGSTOP); 6374 } else if (sig != TARGET_SIGCHLD && 6375 sig != TARGET_SIGURG && 6376 sig != TARGET_SIGWINCH && 6377 sig != TARGET_SIGCONT) { 6378 dump_core_and_abort(sig); 6379 } 6380 } else if (handler == TARGET_SIG_IGN) { 6381 /* ignore sig */ 6382 } else if (handler == TARGET_SIG_ERR) { 6383 dump_core_and_abort(sig); 6384 } else { 6385 /* compute the blocked signals during the handler execution */ 6386 sigset_t *blocked_set; 6387 6388 target_to_host_sigset(&set, &sa->sa_mask); 6389 /* SA_NODEFER indicates that the current signal should not be 6390 blocked during the handler */ 6391 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 6392 sigaddset(&set, target_to_host_signal(sig)); 6393 6394 /* save the previous blocked signal state to restore it at the 6395 end of the signal execution (see do_sigreturn) */ 6396 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 6397 6398 /* block signals in the handler */ 6399 blocked_set = ts->in_sigsuspend ? 6400 &ts->sigsuspend_mask : &ts->signal_mask; 6401 sigorset(&ts->signal_mask, blocked_set, &set); 6402 ts->in_sigsuspend = 0; 6403 6404 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 6405 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 6406 { 6407 CPUX86State *env = cpu_env; 6408 if (env->eflags & VM_MASK) 6409 save_v86_state(env); 6410 } 6411 #endif 6412 /* prepare the stack frame of the virtual CPU */ 6413 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 6414 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \ 6415 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \ 6416 || defined(TARGET_NIOS2) 6417 /* These targets do not have traditional signals. */ 6418 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 6419 #else 6420 if (sa->sa_flags & TARGET_SA_SIGINFO) 6421 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 6422 else 6423 setup_frame(sig, sa, &target_old_set, cpu_env); 6424 #endif 6425 if (sa->sa_flags & TARGET_SA_RESETHAND) { 6426 sa->_sa_handler = TARGET_SIG_DFL; 6427 } 6428 } 6429 } 6430 6431 void process_pending_signals(CPUArchState *cpu_env) 6432 { 6433 CPUState *cpu = ENV_GET_CPU(cpu_env); 6434 int sig; 6435 TaskState *ts = cpu->opaque; 6436 sigset_t set; 6437 sigset_t *blocked_set; 6438 6439 while (atomic_read(&ts->signal_pending)) { 6440 /* FIXME: This is not threadsafe. */ 6441 sigfillset(&set); 6442 sigprocmask(SIG_SETMASK, &set, 0); 6443 6444 restart_scan: 6445 sig = ts->sync_signal.pending; 6446 if (sig) { 6447 /* Synchronous signals are forced, 6448 * see force_sig_info() and callers in Linux 6449 * Note that not all of our queue_signal() calls in QEMU correspond 6450 * to force_sig_info() calls in Linux (some are send_sig_info()). 6451 * However it seems like a kernel bug to me to allow the process 6452 * to block a synchronous signal since it could then just end up 6453 * looping round and round indefinitely. 6454 */ 6455 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 6456 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 6457 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 6458 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 6459 } 6460 6461 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 6462 } 6463 6464 for (sig = 1; sig <= TARGET_NSIG; sig++) { 6465 blocked_set = ts->in_sigsuspend ? 6466 &ts->sigsuspend_mask : &ts->signal_mask; 6467 6468 if (ts->sigtab[sig - 1].pending && 6469 (!sigismember(blocked_set, 6470 target_to_host_signal_table[sig]))) { 6471 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 6472 /* Restart scan from the beginning, as handle_pending_signal 6473 * might have resulted in a new synchronous signal (eg SIGSEGV). 6474 */ 6475 goto restart_scan; 6476 } 6477 } 6478 6479 /* if no signal is pending, unblock signals and recheck (the act 6480 * of unblocking might cause us to take another host signal which 6481 * will set signal_pending again). 6482 */ 6483 atomic_set(&ts->signal_pending, 0); 6484 ts->in_sigsuspend = 0; 6485 set = ts->signal_mask; 6486 sigdelset(&set, SIGSEGV); 6487 sigdelset(&set, SIGBUS); 6488 sigprocmask(SIG_SETMASK, &set, 0); 6489 } 6490 ts->in_sigsuspend = 0; 6491 } 6492