1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "qemu-common.h" 26 #include "target_signal.h" 27 #include "trace.h" 28 29 static struct target_sigaltstack target_sigaltstack_used = { 30 .ss_sp = 0, 31 .ss_size = 0, 32 .ss_flags = TARGET_SS_DISABLE, 33 }; 34 35 static struct target_sigaction sigact_table[TARGET_NSIG]; 36 37 static void host_signal_handler(int host_signum, siginfo_t *info, 38 void *puc); 39 40 static uint8_t host_to_target_signal_table[_NSIG] = { 41 [SIGHUP] = TARGET_SIGHUP, 42 [SIGINT] = TARGET_SIGINT, 43 [SIGQUIT] = TARGET_SIGQUIT, 44 [SIGILL] = TARGET_SIGILL, 45 [SIGTRAP] = TARGET_SIGTRAP, 46 [SIGABRT] = TARGET_SIGABRT, 47 /* [SIGIOT] = TARGET_SIGIOT,*/ 48 [SIGBUS] = TARGET_SIGBUS, 49 [SIGFPE] = TARGET_SIGFPE, 50 [SIGKILL] = TARGET_SIGKILL, 51 [SIGUSR1] = TARGET_SIGUSR1, 52 [SIGSEGV] = TARGET_SIGSEGV, 53 [SIGUSR2] = TARGET_SIGUSR2, 54 [SIGPIPE] = TARGET_SIGPIPE, 55 [SIGALRM] = TARGET_SIGALRM, 56 [SIGTERM] = TARGET_SIGTERM, 57 #ifdef SIGSTKFLT 58 [SIGSTKFLT] = TARGET_SIGSTKFLT, 59 #endif 60 [SIGCHLD] = TARGET_SIGCHLD, 61 [SIGCONT] = TARGET_SIGCONT, 62 [SIGSTOP] = TARGET_SIGSTOP, 63 [SIGTSTP] = TARGET_SIGTSTP, 64 [SIGTTIN] = TARGET_SIGTTIN, 65 [SIGTTOU] = TARGET_SIGTTOU, 66 [SIGURG] = TARGET_SIGURG, 67 [SIGXCPU] = TARGET_SIGXCPU, 68 [SIGXFSZ] = TARGET_SIGXFSZ, 69 [SIGVTALRM] = TARGET_SIGVTALRM, 70 [SIGPROF] = TARGET_SIGPROF, 71 [SIGWINCH] = TARGET_SIGWINCH, 72 [SIGIO] = TARGET_SIGIO, 73 [SIGPWR] = TARGET_SIGPWR, 74 [SIGSYS] = TARGET_SIGSYS, 75 /* next signals stay the same */ 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/ 78 To fix this properly we need to do manual signal delivery multiplexed 79 over a single host signal. */ 80 [__SIGRTMIN] = __SIGRTMAX, 81 [__SIGRTMAX] = __SIGRTMIN, 82 }; 83 static uint8_t target_to_host_signal_table[_NSIG]; 84 85 static inline int on_sig_stack(unsigned long sp) 86 { 87 return (sp - target_sigaltstack_used.ss_sp 88 < target_sigaltstack_used.ss_size); 89 } 90 91 static inline int sas_ss_flags(unsigned long sp) 92 { 93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE 94 : on_sig_stack(sp) ? SS_ONSTACK : 0); 95 } 96 97 int host_to_target_signal(int sig) 98 { 99 if (sig < 0 || sig >= _NSIG) 100 return sig; 101 return host_to_target_signal_table[sig]; 102 } 103 104 int target_to_host_signal(int sig) 105 { 106 if (sig < 0 || sig >= _NSIG) 107 return sig; 108 return target_to_host_signal_table[sig]; 109 } 110 111 static inline void target_sigemptyset(target_sigset_t *set) 112 { 113 memset(set, 0, sizeof(*set)); 114 } 115 116 static inline void target_sigaddset(target_sigset_t *set, int signum) 117 { 118 signum--; 119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 120 set->sig[signum / TARGET_NSIG_BPW] |= mask; 121 } 122 123 static inline int target_sigismember(const target_sigset_t *set, int signum) 124 { 125 signum--; 126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 128 } 129 130 static void host_to_target_sigset_internal(target_sigset_t *d, 131 const sigset_t *s) 132 { 133 int i; 134 target_sigemptyset(d); 135 for (i = 1; i <= TARGET_NSIG; i++) { 136 if (sigismember(s, i)) { 137 target_sigaddset(d, host_to_target_signal(i)); 138 } 139 } 140 } 141 142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 143 { 144 target_sigset_t d1; 145 int i; 146 147 host_to_target_sigset_internal(&d1, s); 148 for(i = 0;i < TARGET_NSIG_WORDS; i++) 149 d->sig[i] = tswapal(d1.sig[i]); 150 } 151 152 static void target_to_host_sigset_internal(sigset_t *d, 153 const target_sigset_t *s) 154 { 155 int i; 156 sigemptyset(d); 157 for (i = 1; i <= TARGET_NSIG; i++) { 158 if (target_sigismember(s, i)) { 159 sigaddset(d, target_to_host_signal(i)); 160 } 161 } 162 } 163 164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 165 { 166 target_sigset_t s1; 167 int i; 168 169 for(i = 0;i < TARGET_NSIG_WORDS; i++) 170 s1.sig[i] = tswapal(s->sig[i]); 171 target_to_host_sigset_internal(d, &s1); 172 } 173 174 void host_to_target_old_sigset(abi_ulong *old_sigset, 175 const sigset_t *sigset) 176 { 177 target_sigset_t d; 178 host_to_target_sigset(&d, sigset); 179 *old_sigset = d.sig[0]; 180 } 181 182 void target_to_host_old_sigset(sigset_t *sigset, 183 const abi_ulong *old_sigset) 184 { 185 target_sigset_t d; 186 int i; 187 188 d.sig[0] = *old_sigset; 189 for(i = 1;i < TARGET_NSIG_WORDS; i++) 190 d.sig[i] = 0; 191 target_to_host_sigset(sigset, &d); 192 } 193 194 int block_signals(void) 195 { 196 TaskState *ts = (TaskState *)thread_cpu->opaque; 197 sigset_t set; 198 199 /* It's OK to block everything including SIGSEGV, because we won't 200 * run any further guest code before unblocking signals in 201 * process_pending_signals(). 202 */ 203 sigfillset(&set); 204 sigprocmask(SIG_SETMASK, &set, 0); 205 206 return atomic_xchg(&ts->signal_pending, 1); 207 } 208 209 /* Wrapper for sigprocmask function 210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 212 * a signal was already pending and the syscall must be restarted, or 213 * 0 on success. 214 * If set is NULL, this is guaranteed not to fail. 215 */ 216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 217 { 218 TaskState *ts = (TaskState *)thread_cpu->opaque; 219 220 if (oldset) { 221 *oldset = ts->signal_mask; 222 } 223 224 if (set) { 225 int i; 226 227 if (block_signals()) { 228 return -TARGET_ERESTARTSYS; 229 } 230 231 switch (how) { 232 case SIG_BLOCK: 233 sigorset(&ts->signal_mask, &ts->signal_mask, set); 234 break; 235 case SIG_UNBLOCK: 236 for (i = 1; i <= NSIG; ++i) { 237 if (sigismember(set, i)) { 238 sigdelset(&ts->signal_mask, i); 239 } 240 } 241 break; 242 case SIG_SETMASK: 243 ts->signal_mask = *set; 244 break; 245 default: 246 g_assert_not_reached(); 247 } 248 249 /* Silently ignore attempts to change blocking status of KILL or STOP */ 250 sigdelset(&ts->signal_mask, SIGKILL); 251 sigdelset(&ts->signal_mask, SIGSTOP); 252 } 253 return 0; 254 } 255 256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2) 257 /* Just set the guest's signal mask to the specified value; the 258 * caller is assumed to have called block_signals() already. 259 */ 260 static void set_sigmask(const sigset_t *set) 261 { 262 TaskState *ts = (TaskState *)thread_cpu->opaque; 263 264 ts->signal_mask = *set; 265 } 266 #endif 267 268 /* siginfo conversion */ 269 270 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 271 const siginfo_t *info) 272 { 273 int sig = host_to_target_signal(info->si_signo); 274 int si_code = info->si_code; 275 int si_type; 276 tinfo->si_signo = sig; 277 tinfo->si_errno = 0; 278 tinfo->si_code = info->si_code; 279 280 /* This memset serves two purposes: 281 * (1) ensure we don't leak random junk to the guest later 282 * (2) placate false positives from gcc about fields 283 * being used uninitialized if it chooses to inline both this 284 * function and tswap_siginfo() into host_to_target_siginfo(). 285 */ 286 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 287 288 /* This is awkward, because we have to use a combination of 289 * the si_code and si_signo to figure out which of the union's 290 * members are valid. (Within the host kernel it is always possible 291 * to tell, but the kernel carefully avoids giving userspace the 292 * high 16 bits of si_code, so we don't have the information to 293 * do this the easy way...) We therefore make our best guess, 294 * bearing in mind that a guest can spoof most of the si_codes 295 * via rt_sigqueueinfo() if it likes. 296 * 297 * Once we have made our guess, we record it in the top 16 bits of 298 * the si_code, so that tswap_siginfo() later can use it. 299 * tswap_siginfo() will strip these top bits out before writing 300 * si_code to the guest (sign-extending the lower bits). 301 */ 302 303 switch (si_code) { 304 case SI_USER: 305 case SI_TKILL: 306 case SI_KERNEL: 307 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 308 * These are the only unspoofable si_code values. 309 */ 310 tinfo->_sifields._kill._pid = info->si_pid; 311 tinfo->_sifields._kill._uid = info->si_uid; 312 si_type = QEMU_SI_KILL; 313 break; 314 default: 315 /* Everything else is spoofable. Make best guess based on signal */ 316 switch (sig) { 317 case TARGET_SIGCHLD: 318 tinfo->_sifields._sigchld._pid = info->si_pid; 319 tinfo->_sifields._sigchld._uid = info->si_uid; 320 tinfo->_sifields._sigchld._status 321 = host_to_target_waitstatus(info->si_status); 322 tinfo->_sifields._sigchld._utime = info->si_utime; 323 tinfo->_sifields._sigchld._stime = info->si_stime; 324 si_type = QEMU_SI_CHLD; 325 break; 326 case TARGET_SIGIO: 327 tinfo->_sifields._sigpoll._band = info->si_band; 328 tinfo->_sifields._sigpoll._fd = info->si_fd; 329 si_type = QEMU_SI_POLL; 330 break; 331 default: 332 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 333 tinfo->_sifields._rt._pid = info->si_pid; 334 tinfo->_sifields._rt._uid = info->si_uid; 335 /* XXX: potential problem if 64 bit */ 336 tinfo->_sifields._rt._sigval.sival_ptr 337 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 338 si_type = QEMU_SI_RT; 339 break; 340 } 341 break; 342 } 343 344 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 345 } 346 347 static void tswap_siginfo(target_siginfo_t *tinfo, 348 const target_siginfo_t *info) 349 { 350 int si_type = extract32(info->si_code, 16, 16); 351 int si_code = sextract32(info->si_code, 0, 16); 352 353 __put_user(info->si_signo, &tinfo->si_signo); 354 __put_user(info->si_errno, &tinfo->si_errno); 355 __put_user(si_code, &tinfo->si_code); 356 357 /* We can use our internal marker of which fields in the structure 358 * are valid, rather than duplicating the guesswork of 359 * host_to_target_siginfo_noswap() here. 360 */ 361 switch (si_type) { 362 case QEMU_SI_KILL: 363 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 364 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 365 break; 366 case QEMU_SI_TIMER: 367 __put_user(info->_sifields._timer._timer1, 368 &tinfo->_sifields._timer._timer1); 369 __put_user(info->_sifields._timer._timer2, 370 &tinfo->_sifields._timer._timer2); 371 break; 372 case QEMU_SI_POLL: 373 __put_user(info->_sifields._sigpoll._band, 374 &tinfo->_sifields._sigpoll._band); 375 __put_user(info->_sifields._sigpoll._fd, 376 &tinfo->_sifields._sigpoll._fd); 377 break; 378 case QEMU_SI_FAULT: 379 __put_user(info->_sifields._sigfault._addr, 380 &tinfo->_sifields._sigfault._addr); 381 break; 382 case QEMU_SI_CHLD: 383 __put_user(info->_sifields._sigchld._pid, 384 &tinfo->_sifields._sigchld._pid); 385 __put_user(info->_sifields._sigchld._uid, 386 &tinfo->_sifields._sigchld._uid); 387 __put_user(info->_sifields._sigchld._status, 388 &tinfo->_sifields._sigchld._status); 389 __put_user(info->_sifields._sigchld._utime, 390 &tinfo->_sifields._sigchld._utime); 391 __put_user(info->_sifields._sigchld._stime, 392 &tinfo->_sifields._sigchld._stime); 393 break; 394 case QEMU_SI_RT: 395 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 396 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 397 __put_user(info->_sifields._rt._sigval.sival_ptr, 398 &tinfo->_sifields._rt._sigval.sival_ptr); 399 break; 400 default: 401 g_assert_not_reached(); 402 } 403 } 404 405 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 406 { 407 target_siginfo_t tgt_tmp; 408 host_to_target_siginfo_noswap(&tgt_tmp, info); 409 tswap_siginfo(tinfo, &tgt_tmp); 410 } 411 412 /* XXX: we support only POSIX RT signals are used. */ 413 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 414 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 415 { 416 /* This conversion is used only for the rt_sigqueueinfo syscall, 417 * and so we know that the _rt fields are the valid ones. 418 */ 419 abi_ulong sival_ptr; 420 421 __get_user(info->si_signo, &tinfo->si_signo); 422 __get_user(info->si_errno, &tinfo->si_errno); 423 __get_user(info->si_code, &tinfo->si_code); 424 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 425 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 426 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 427 info->si_value.sival_ptr = (void *)(long)sival_ptr; 428 } 429 430 static int fatal_signal (int sig) 431 { 432 switch (sig) { 433 case TARGET_SIGCHLD: 434 case TARGET_SIGURG: 435 case TARGET_SIGWINCH: 436 /* Ignored by default. */ 437 return 0; 438 case TARGET_SIGCONT: 439 case TARGET_SIGSTOP: 440 case TARGET_SIGTSTP: 441 case TARGET_SIGTTIN: 442 case TARGET_SIGTTOU: 443 /* Job control signals. */ 444 return 0; 445 default: 446 return 1; 447 } 448 } 449 450 /* returns 1 if given signal should dump core if not handled */ 451 static int core_dump_signal(int sig) 452 { 453 switch (sig) { 454 case TARGET_SIGABRT: 455 case TARGET_SIGFPE: 456 case TARGET_SIGILL: 457 case TARGET_SIGQUIT: 458 case TARGET_SIGSEGV: 459 case TARGET_SIGTRAP: 460 case TARGET_SIGBUS: 461 return (1); 462 default: 463 return (0); 464 } 465 } 466 467 void signal_init(void) 468 { 469 TaskState *ts = (TaskState *)thread_cpu->opaque; 470 struct sigaction act; 471 struct sigaction oact; 472 int i, j; 473 int host_sig; 474 475 /* generate signal conversion tables */ 476 for(i = 1; i < _NSIG; i++) { 477 if (host_to_target_signal_table[i] == 0) 478 host_to_target_signal_table[i] = i; 479 } 480 for(i = 1; i < _NSIG; i++) { 481 j = host_to_target_signal_table[i]; 482 target_to_host_signal_table[j] = i; 483 } 484 485 /* Set the signal mask from the host mask. */ 486 sigprocmask(0, 0, &ts->signal_mask); 487 488 /* set all host signal handlers. ALL signals are blocked during 489 the handlers to serialize them. */ 490 memset(sigact_table, 0, sizeof(sigact_table)); 491 492 sigfillset(&act.sa_mask); 493 act.sa_flags = SA_SIGINFO; 494 act.sa_sigaction = host_signal_handler; 495 for(i = 1; i <= TARGET_NSIG; i++) { 496 host_sig = target_to_host_signal(i); 497 sigaction(host_sig, NULL, &oact); 498 if (oact.sa_sigaction == (void *)SIG_IGN) { 499 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 500 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 501 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 502 } 503 /* If there's already a handler installed then something has 504 gone horribly wrong, so don't even try to handle that case. */ 505 /* Install some handlers for our own use. We need at least 506 SIGSEGV and SIGBUS, to detect exceptions. We can not just 507 trap all signals because it affects syscall interrupt 508 behavior. But do trap all default-fatal signals. */ 509 if (fatal_signal (i)) 510 sigaction(host_sig, &act, NULL); 511 } 512 } 513 514 /* Force a synchronously taken signal. The kernel force_sig() function 515 * also forces the signal to "not blocked, not ignored", but for QEMU 516 * that work is done in process_pending_signals(). 517 */ 518 static void force_sig(int sig) 519 { 520 CPUState *cpu = thread_cpu; 521 CPUArchState *env = cpu->env_ptr; 522 target_siginfo_t info; 523 524 info.si_signo = sig; 525 info.si_errno = 0; 526 info.si_code = TARGET_SI_KERNEL; 527 info._sifields._kill._pid = 0; 528 info._sifields._kill._uid = 0; 529 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 530 } 531 532 /* Force a SIGSEGV if we couldn't write to memory trying to set 533 * up the signal frame. oldsig is the signal we were trying to handle 534 * at the point of failure. 535 */ 536 #if !defined(TARGET_RISCV) 537 static void force_sigsegv(int oldsig) 538 { 539 if (oldsig == SIGSEGV) { 540 /* Make sure we don't try to deliver the signal again; this will 541 * end up with handle_pending_signal() calling dump_core_and_abort(). 542 */ 543 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL; 544 } 545 force_sig(TARGET_SIGSEGV); 546 } 547 548 #endif 549 550 /* abort execution with signal */ 551 static void QEMU_NORETURN dump_core_and_abort(int target_sig) 552 { 553 CPUState *cpu = thread_cpu; 554 CPUArchState *env = cpu->env_ptr; 555 TaskState *ts = (TaskState *)cpu->opaque; 556 int host_sig, core_dumped = 0; 557 struct sigaction act; 558 559 host_sig = target_to_host_signal(target_sig); 560 trace_user_force_sig(env, target_sig, host_sig); 561 gdb_signalled(env, target_sig); 562 563 /* dump core if supported by target binary format */ 564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 565 stop_all_tasks(); 566 core_dumped = 567 ((*ts->bprm->core_dump)(target_sig, env) == 0); 568 } 569 if (core_dumped) { 570 /* we already dumped the core of target process, we don't want 571 * a coredump of qemu itself */ 572 struct rlimit nodump; 573 getrlimit(RLIMIT_CORE, &nodump); 574 nodump.rlim_cur=0; 575 setrlimit(RLIMIT_CORE, &nodump); 576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 577 target_sig, strsignal(host_sig), "core dumped" ); 578 } 579 580 /* The proper exit code for dying from an uncaught signal is 581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 582 * a negative value. To get the proper exit code we need to 583 * actually die from an uncaught signal. Here the default signal 584 * handler is installed, we send ourself a signal and we wait for 585 * it to arrive. */ 586 sigfillset(&act.sa_mask); 587 act.sa_handler = SIG_DFL; 588 act.sa_flags = 0; 589 sigaction(host_sig, &act, NULL); 590 591 /* For some reason raise(host_sig) doesn't send the signal when 592 * statically linked on x86-64. */ 593 kill(getpid(), host_sig); 594 595 /* Make sure the signal isn't masked (just reuse the mask inside 596 of act) */ 597 sigdelset(&act.sa_mask, host_sig); 598 sigsuspend(&act.sa_mask); 599 600 /* unreachable */ 601 abort(); 602 } 603 604 /* queue a signal so that it will be send to the virtual CPU as soon 605 as possible */ 606 int queue_signal(CPUArchState *env, int sig, int si_type, 607 target_siginfo_t *info) 608 { 609 CPUState *cpu = ENV_GET_CPU(env); 610 TaskState *ts = cpu->opaque; 611 612 trace_user_queue_signal(env, sig); 613 614 info->si_code = deposit32(info->si_code, 16, 16, si_type); 615 616 ts->sync_signal.info = *info; 617 ts->sync_signal.pending = sig; 618 /* signal that a new signal is pending */ 619 atomic_set(&ts->signal_pending, 1); 620 return 1; /* indicates that the signal was queued */ 621 } 622 623 #ifndef HAVE_SAFE_SYSCALL 624 static inline void rewind_if_in_safe_syscall(void *puc) 625 { 626 /* Default version: never rewind */ 627 } 628 #endif 629 630 static void host_signal_handler(int host_signum, siginfo_t *info, 631 void *puc) 632 { 633 CPUArchState *env = thread_cpu->env_ptr; 634 CPUState *cpu = ENV_GET_CPU(env); 635 TaskState *ts = cpu->opaque; 636 637 int sig; 638 target_siginfo_t tinfo; 639 ucontext_t *uc = puc; 640 struct emulated_sigtable *k; 641 642 /* the CPU emulator uses some host signals to detect exceptions, 643 we forward to it some signals */ 644 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 645 && info->si_code > 0) { 646 if (cpu_signal_handler(host_signum, info, puc)) 647 return; 648 } 649 650 /* get target signal number */ 651 sig = host_to_target_signal(host_signum); 652 if (sig < 1 || sig > TARGET_NSIG) 653 return; 654 trace_user_host_signal(env, host_signum, sig); 655 656 rewind_if_in_safe_syscall(puc); 657 658 host_to_target_siginfo_noswap(&tinfo, info); 659 k = &ts->sigtab[sig - 1]; 660 k->info = tinfo; 661 k->pending = sig; 662 ts->signal_pending = 1; 663 664 /* Block host signals until target signal handler entered. We 665 * can't block SIGSEGV or SIGBUS while we're executing guest 666 * code in case the guest code provokes one in the window between 667 * now and it getting out to the main loop. Signals will be 668 * unblocked again in process_pending_signals(). 669 * 670 * WARNING: we cannot use sigfillset() here because the uc_sigmask 671 * field is a kernel sigset_t, which is much smaller than the 672 * libc sigset_t which sigfillset() operates on. Using sigfillset() 673 * would write 0xff bytes off the end of the structure and trash 674 * data on the struct. 675 * We can't use sizeof(uc->uc_sigmask) either, because the libc 676 * headers define the struct field with the wrong (too large) type. 677 */ 678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 679 sigdelset(&uc->uc_sigmask, SIGSEGV); 680 sigdelset(&uc->uc_sigmask, SIGBUS); 681 682 /* interrupt the virtual CPU as soon as possible */ 683 cpu_exit(thread_cpu); 684 } 685 686 /* do_sigaltstack() returns target values and errnos. */ 687 /* compare linux/kernel/signal.c:do_sigaltstack() */ 688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) 689 { 690 int ret; 691 struct target_sigaltstack oss; 692 693 /* XXX: test errors */ 694 if(uoss_addr) 695 { 696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); 697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); 698 __put_user(sas_ss_flags(sp), &oss.ss_flags); 699 } 700 701 if(uss_addr) 702 { 703 struct target_sigaltstack *uss; 704 struct target_sigaltstack ss; 705 size_t minstacksize = TARGET_MINSIGSTKSZ; 706 707 #if defined(TARGET_PPC64) 708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 710 if (get_ppc64_abi(image) > 1) { 711 minstacksize = 4096; 712 } 713 #endif 714 715 ret = -TARGET_EFAULT; 716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 717 goto out; 718 } 719 __get_user(ss.ss_sp, &uss->ss_sp); 720 __get_user(ss.ss_size, &uss->ss_size); 721 __get_user(ss.ss_flags, &uss->ss_flags); 722 unlock_user_struct(uss, uss_addr, 0); 723 724 ret = -TARGET_EPERM; 725 if (on_sig_stack(sp)) 726 goto out; 727 728 ret = -TARGET_EINVAL; 729 if (ss.ss_flags != TARGET_SS_DISABLE 730 && ss.ss_flags != TARGET_SS_ONSTACK 731 && ss.ss_flags != 0) 732 goto out; 733 734 if (ss.ss_flags == TARGET_SS_DISABLE) { 735 ss.ss_size = 0; 736 ss.ss_sp = 0; 737 } else { 738 ret = -TARGET_ENOMEM; 739 if (ss.ss_size < minstacksize) { 740 goto out; 741 } 742 } 743 744 target_sigaltstack_used.ss_sp = ss.ss_sp; 745 target_sigaltstack_used.ss_size = ss.ss_size; 746 } 747 748 if (uoss_addr) { 749 ret = -TARGET_EFAULT; 750 if (copy_to_user(uoss_addr, &oss, sizeof(oss))) 751 goto out; 752 } 753 754 ret = 0; 755 out: 756 return ret; 757 } 758 759 /* do_sigaction() return target values and host errnos */ 760 int do_sigaction(int sig, const struct target_sigaction *act, 761 struct target_sigaction *oact) 762 { 763 struct target_sigaction *k; 764 struct sigaction act1; 765 int host_sig; 766 int ret = 0; 767 768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) { 769 return -TARGET_EINVAL; 770 } 771 772 if (block_signals()) { 773 return -TARGET_ERESTARTSYS; 774 } 775 776 k = &sigact_table[sig - 1]; 777 if (oact) { 778 __put_user(k->_sa_handler, &oact->_sa_handler); 779 __put_user(k->sa_flags, &oact->sa_flags); 780 #ifdef TARGET_ARCH_HAS_SA_RESTORER 781 __put_user(k->sa_restorer, &oact->sa_restorer); 782 #endif 783 /* Not swapped. */ 784 oact->sa_mask = k->sa_mask; 785 } 786 if (act) { 787 /* FIXME: This is not threadsafe. */ 788 __get_user(k->_sa_handler, &act->_sa_handler); 789 __get_user(k->sa_flags, &act->sa_flags); 790 #ifdef TARGET_ARCH_HAS_SA_RESTORER 791 __get_user(k->sa_restorer, &act->sa_restorer); 792 #endif 793 /* To be swapped in target_to_host_sigset. */ 794 k->sa_mask = act->sa_mask; 795 796 /* we update the host linux signal state */ 797 host_sig = target_to_host_signal(sig); 798 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 799 sigfillset(&act1.sa_mask); 800 act1.sa_flags = SA_SIGINFO; 801 if (k->sa_flags & TARGET_SA_RESTART) 802 act1.sa_flags |= SA_RESTART; 803 /* NOTE: it is important to update the host kernel signal 804 ignore state to avoid getting unexpected interrupted 805 syscalls */ 806 if (k->_sa_handler == TARGET_SIG_IGN) { 807 act1.sa_sigaction = (void *)SIG_IGN; 808 } else if (k->_sa_handler == TARGET_SIG_DFL) { 809 if (fatal_signal (sig)) 810 act1.sa_sigaction = host_signal_handler; 811 else 812 act1.sa_sigaction = (void *)SIG_DFL; 813 } else { 814 act1.sa_sigaction = host_signal_handler; 815 } 816 ret = sigaction(host_sig, &act1, NULL); 817 } 818 } 819 return ret; 820 } 821 822 #if defined(TARGET_I386) 823 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */ 824 825 struct target_fpreg { 826 uint16_t significand[4]; 827 uint16_t exponent; 828 }; 829 830 struct target_fpxreg { 831 uint16_t significand[4]; 832 uint16_t exponent; 833 uint16_t padding[3]; 834 }; 835 836 struct target_xmmreg { 837 uint32_t element[4]; 838 }; 839 840 struct target_fpstate_32 { 841 /* Regular FPU environment */ 842 uint32_t cw; 843 uint32_t sw; 844 uint32_t tag; 845 uint32_t ipoff; 846 uint32_t cssel; 847 uint32_t dataoff; 848 uint32_t datasel; 849 struct target_fpreg st[8]; 850 uint16_t status; 851 uint16_t magic; /* 0xffff = regular FPU data only */ 852 853 /* FXSR FPU environment */ 854 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */ 855 uint32_t mxcsr; 856 uint32_t reserved; 857 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */ 858 struct target_xmmreg xmm[8]; 859 uint32_t padding[56]; 860 }; 861 862 struct target_fpstate_64 { 863 /* FXSAVE format */ 864 uint16_t cw; 865 uint16_t sw; 866 uint16_t twd; 867 uint16_t fop; 868 uint64_t rip; 869 uint64_t rdp; 870 uint32_t mxcsr; 871 uint32_t mxcsr_mask; 872 uint32_t st_space[32]; 873 uint32_t xmm_space[64]; 874 uint32_t reserved[24]; 875 }; 876 877 #ifndef TARGET_X86_64 878 # define target_fpstate target_fpstate_32 879 #else 880 # define target_fpstate target_fpstate_64 881 #endif 882 883 struct target_sigcontext_32 { 884 uint16_t gs, __gsh; 885 uint16_t fs, __fsh; 886 uint16_t es, __esh; 887 uint16_t ds, __dsh; 888 uint32_t edi; 889 uint32_t esi; 890 uint32_t ebp; 891 uint32_t esp; 892 uint32_t ebx; 893 uint32_t edx; 894 uint32_t ecx; 895 uint32_t eax; 896 uint32_t trapno; 897 uint32_t err; 898 uint32_t eip; 899 uint16_t cs, __csh; 900 uint32_t eflags; 901 uint32_t esp_at_signal; 902 uint16_t ss, __ssh; 903 uint32_t fpstate; /* pointer */ 904 uint32_t oldmask; 905 uint32_t cr2; 906 }; 907 908 struct target_sigcontext_64 { 909 uint64_t r8; 910 uint64_t r9; 911 uint64_t r10; 912 uint64_t r11; 913 uint64_t r12; 914 uint64_t r13; 915 uint64_t r14; 916 uint64_t r15; 917 918 uint64_t rdi; 919 uint64_t rsi; 920 uint64_t rbp; 921 uint64_t rbx; 922 uint64_t rdx; 923 uint64_t rax; 924 uint64_t rcx; 925 uint64_t rsp; 926 uint64_t rip; 927 928 uint64_t eflags; 929 930 uint16_t cs; 931 uint16_t gs; 932 uint16_t fs; 933 uint16_t ss; 934 935 uint64_t err; 936 uint64_t trapno; 937 uint64_t oldmask; 938 uint64_t cr2; 939 940 uint64_t fpstate; /* pointer */ 941 uint64_t padding[8]; 942 }; 943 944 #ifndef TARGET_X86_64 945 # define target_sigcontext target_sigcontext_32 946 #else 947 # define target_sigcontext target_sigcontext_64 948 #endif 949 950 /* see Linux/include/uapi/asm-generic/ucontext.h */ 951 struct target_ucontext { 952 abi_ulong tuc_flags; 953 abi_ulong tuc_link; 954 target_stack_t tuc_stack; 955 struct target_sigcontext tuc_mcontext; 956 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 957 }; 958 959 #ifndef TARGET_X86_64 960 struct sigframe { 961 abi_ulong pretcode; 962 int sig; 963 struct target_sigcontext sc; 964 struct target_fpstate fpstate; 965 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 966 char retcode[8]; 967 }; 968 969 struct rt_sigframe { 970 abi_ulong pretcode; 971 int sig; 972 abi_ulong pinfo; 973 abi_ulong puc; 974 struct target_siginfo info; 975 struct target_ucontext uc; 976 struct target_fpstate fpstate; 977 char retcode[8]; 978 }; 979 980 #else 981 982 struct rt_sigframe { 983 abi_ulong pretcode; 984 struct target_ucontext uc; 985 struct target_siginfo info; 986 struct target_fpstate fpstate; 987 }; 988 989 #endif 990 991 /* 992 * Set up a signal frame. 993 */ 994 995 /* XXX: save x87 state */ 996 static void setup_sigcontext(struct target_sigcontext *sc, 997 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 998 abi_ulong fpstate_addr) 999 { 1000 CPUState *cs = CPU(x86_env_get_cpu(env)); 1001 #ifndef TARGET_X86_64 1002 uint16_t magic; 1003 1004 /* already locked in setup_frame() */ 1005 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs); 1006 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs); 1007 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es); 1008 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds); 1009 __put_user(env->regs[R_EDI], &sc->edi); 1010 __put_user(env->regs[R_ESI], &sc->esi); 1011 __put_user(env->regs[R_EBP], &sc->ebp); 1012 __put_user(env->regs[R_ESP], &sc->esp); 1013 __put_user(env->regs[R_EBX], &sc->ebx); 1014 __put_user(env->regs[R_EDX], &sc->edx); 1015 __put_user(env->regs[R_ECX], &sc->ecx); 1016 __put_user(env->regs[R_EAX], &sc->eax); 1017 __put_user(cs->exception_index, &sc->trapno); 1018 __put_user(env->error_code, &sc->err); 1019 __put_user(env->eip, &sc->eip); 1020 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs); 1021 __put_user(env->eflags, &sc->eflags); 1022 __put_user(env->regs[R_ESP], &sc->esp_at_signal); 1023 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss); 1024 1025 cpu_x86_fsave(env, fpstate_addr, 1); 1026 fpstate->status = fpstate->sw; 1027 magic = 0xffff; 1028 __put_user(magic, &fpstate->magic); 1029 __put_user(fpstate_addr, &sc->fpstate); 1030 1031 /* non-iBCS2 extensions.. */ 1032 __put_user(mask, &sc->oldmask); 1033 __put_user(env->cr[2], &sc->cr2); 1034 #else 1035 __put_user(env->regs[R_EDI], &sc->rdi); 1036 __put_user(env->regs[R_ESI], &sc->rsi); 1037 __put_user(env->regs[R_EBP], &sc->rbp); 1038 __put_user(env->regs[R_ESP], &sc->rsp); 1039 __put_user(env->regs[R_EBX], &sc->rbx); 1040 __put_user(env->regs[R_EDX], &sc->rdx); 1041 __put_user(env->regs[R_ECX], &sc->rcx); 1042 __put_user(env->regs[R_EAX], &sc->rax); 1043 1044 __put_user(env->regs[8], &sc->r8); 1045 __put_user(env->regs[9], &sc->r9); 1046 __put_user(env->regs[10], &sc->r10); 1047 __put_user(env->regs[11], &sc->r11); 1048 __put_user(env->regs[12], &sc->r12); 1049 __put_user(env->regs[13], &sc->r13); 1050 __put_user(env->regs[14], &sc->r14); 1051 __put_user(env->regs[15], &sc->r15); 1052 1053 __put_user(cs->exception_index, &sc->trapno); 1054 __put_user(env->error_code, &sc->err); 1055 __put_user(env->eip, &sc->rip); 1056 1057 __put_user(env->eflags, &sc->eflags); 1058 __put_user(env->segs[R_CS].selector, &sc->cs); 1059 __put_user((uint16_t)0, &sc->gs); 1060 __put_user((uint16_t)0, &sc->fs); 1061 __put_user(env->segs[R_SS].selector, &sc->ss); 1062 1063 __put_user(mask, &sc->oldmask); 1064 __put_user(env->cr[2], &sc->cr2); 1065 1066 /* fpstate_addr must be 16 byte aligned for fxsave */ 1067 assert(!(fpstate_addr & 0xf)); 1068 1069 cpu_x86_fxsave(env, fpstate_addr); 1070 __put_user(fpstate_addr, &sc->fpstate); 1071 #endif 1072 } 1073 1074 /* 1075 * Determine which stack to use.. 1076 */ 1077 1078 static inline abi_ulong 1079 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) 1080 { 1081 unsigned long esp; 1082 1083 /* Default to using normal stack */ 1084 esp = env->regs[R_ESP]; 1085 #ifdef TARGET_X86_64 1086 esp -= 128; /* this is the redzone */ 1087 #endif 1088 1089 /* This is the X/Open sanctioned signal stack switching. */ 1090 if (ka->sa_flags & TARGET_SA_ONSTACK) { 1091 if (sas_ss_flags(esp) == 0) { 1092 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1093 } 1094 } else { 1095 #ifndef TARGET_X86_64 1096 /* This is the legacy signal stack switching. */ 1097 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS && 1098 !(ka->sa_flags & TARGET_SA_RESTORER) && 1099 ka->sa_restorer) { 1100 esp = (unsigned long) ka->sa_restorer; 1101 } 1102 #endif 1103 } 1104 1105 #ifndef TARGET_X86_64 1106 return (esp - frame_size) & -8ul; 1107 #else 1108 return ((esp - frame_size) & (~15ul)) - 8; 1109 #endif 1110 } 1111 1112 #ifndef TARGET_X86_64 1113 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ 1114 static void setup_frame(int sig, struct target_sigaction *ka, 1115 target_sigset_t *set, CPUX86State *env) 1116 { 1117 abi_ulong frame_addr; 1118 struct sigframe *frame; 1119 int i; 1120 1121 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1122 trace_user_setup_frame(env, frame_addr); 1123 1124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1125 goto give_sigsegv; 1126 1127 __put_user(sig, &frame->sig); 1128 1129 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], 1130 frame_addr + offsetof(struct sigframe, fpstate)); 1131 1132 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1133 __put_user(set->sig[i], &frame->extramask[i - 1]); 1134 } 1135 1136 /* Set up to return from userspace. If provided, use a stub 1137 already in userspace. */ 1138 if (ka->sa_flags & TARGET_SA_RESTORER) { 1139 __put_user(ka->sa_restorer, &frame->pretcode); 1140 } else { 1141 uint16_t val16; 1142 abi_ulong retcode_addr; 1143 retcode_addr = frame_addr + offsetof(struct sigframe, retcode); 1144 __put_user(retcode_addr, &frame->pretcode); 1145 /* This is popl %eax ; movl $,%eax ; int $0x80 */ 1146 val16 = 0xb858; 1147 __put_user(val16, (uint16_t *)(frame->retcode+0)); 1148 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); 1149 val16 = 0x80cd; 1150 __put_user(val16, (uint16_t *)(frame->retcode+6)); 1151 } 1152 1153 /* Set up registers for signal handler */ 1154 env->regs[R_ESP] = frame_addr; 1155 env->eip = ka->_sa_handler; 1156 1157 cpu_x86_load_seg(env, R_DS, __USER_DS); 1158 cpu_x86_load_seg(env, R_ES, __USER_DS); 1159 cpu_x86_load_seg(env, R_SS, __USER_DS); 1160 cpu_x86_load_seg(env, R_CS, __USER_CS); 1161 env->eflags &= ~TF_MASK; 1162 1163 unlock_user_struct(frame, frame_addr, 1); 1164 1165 return; 1166 1167 give_sigsegv: 1168 force_sigsegv(sig); 1169 } 1170 #endif 1171 1172 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */ 1173 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1174 target_siginfo_t *info, 1175 target_sigset_t *set, CPUX86State *env) 1176 { 1177 abi_ulong frame_addr; 1178 #ifndef TARGET_X86_64 1179 abi_ulong addr; 1180 #endif 1181 struct rt_sigframe *frame; 1182 int i; 1183 1184 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 1185 trace_user_setup_rt_frame(env, frame_addr); 1186 1187 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 1188 goto give_sigsegv; 1189 1190 /* These fields are only in rt_sigframe on 32 bit */ 1191 #ifndef TARGET_X86_64 1192 __put_user(sig, &frame->sig); 1193 addr = frame_addr + offsetof(struct rt_sigframe, info); 1194 __put_user(addr, &frame->pinfo); 1195 addr = frame_addr + offsetof(struct rt_sigframe, uc); 1196 __put_user(addr, &frame->puc); 1197 #endif 1198 if (ka->sa_flags & TARGET_SA_SIGINFO) { 1199 tswap_siginfo(&frame->info, info); 1200 } 1201 1202 /* Create the ucontext. */ 1203 __put_user(0, &frame->uc.tuc_flags); 1204 __put_user(0, &frame->uc.tuc_link); 1205 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 1206 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 1207 &frame->uc.tuc_stack.ss_flags); 1208 __put_user(target_sigaltstack_used.ss_size, 1209 &frame->uc.tuc_stack.ss_size); 1210 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env, 1211 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate)); 1212 1213 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 1214 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 1215 } 1216 1217 /* Set up to return from userspace. If provided, use a stub 1218 already in userspace. */ 1219 #ifndef TARGET_X86_64 1220 if (ka->sa_flags & TARGET_SA_RESTORER) { 1221 __put_user(ka->sa_restorer, &frame->pretcode); 1222 } else { 1223 uint16_t val16; 1224 addr = frame_addr + offsetof(struct rt_sigframe, retcode); 1225 __put_user(addr, &frame->pretcode); 1226 /* This is movl $,%eax ; int $0x80 */ 1227 __put_user(0xb8, (char *)(frame->retcode+0)); 1228 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); 1229 val16 = 0x80cd; 1230 __put_user(val16, (uint16_t *)(frame->retcode+5)); 1231 } 1232 #else 1233 /* XXX: Would be slightly better to return -EFAULT here if test fails 1234 assert(ka->sa_flags & TARGET_SA_RESTORER); */ 1235 __put_user(ka->sa_restorer, &frame->pretcode); 1236 #endif 1237 1238 /* Set up registers for signal handler */ 1239 env->regs[R_ESP] = frame_addr; 1240 env->eip = ka->_sa_handler; 1241 1242 #ifndef TARGET_X86_64 1243 env->regs[R_EAX] = sig; 1244 env->regs[R_EDX] = (unsigned long)&frame->info; 1245 env->regs[R_ECX] = (unsigned long)&frame->uc; 1246 #else 1247 env->regs[R_EAX] = 0; 1248 env->regs[R_EDI] = sig; 1249 env->regs[R_ESI] = (unsigned long)&frame->info; 1250 env->regs[R_EDX] = (unsigned long)&frame->uc; 1251 #endif 1252 1253 cpu_x86_load_seg(env, R_DS, __USER_DS); 1254 cpu_x86_load_seg(env, R_ES, __USER_DS); 1255 cpu_x86_load_seg(env, R_CS, __USER_CS); 1256 cpu_x86_load_seg(env, R_SS, __USER_DS); 1257 env->eflags &= ~TF_MASK; 1258 1259 unlock_user_struct(frame, frame_addr, 1); 1260 1261 return; 1262 1263 give_sigsegv: 1264 force_sigsegv(sig); 1265 } 1266 1267 static int 1268 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc) 1269 { 1270 unsigned int err = 0; 1271 abi_ulong fpstate_addr; 1272 unsigned int tmpflags; 1273 1274 #ifndef TARGET_X86_64 1275 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs)); 1276 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs)); 1277 cpu_x86_load_seg(env, R_ES, tswap16(sc->es)); 1278 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds)); 1279 1280 env->regs[R_EDI] = tswapl(sc->edi); 1281 env->regs[R_ESI] = tswapl(sc->esi); 1282 env->regs[R_EBP] = tswapl(sc->ebp); 1283 env->regs[R_ESP] = tswapl(sc->esp); 1284 env->regs[R_EBX] = tswapl(sc->ebx); 1285 env->regs[R_EDX] = tswapl(sc->edx); 1286 env->regs[R_ECX] = tswapl(sc->ecx); 1287 env->regs[R_EAX] = tswapl(sc->eax); 1288 1289 env->eip = tswapl(sc->eip); 1290 #else 1291 env->regs[8] = tswapl(sc->r8); 1292 env->regs[9] = tswapl(sc->r9); 1293 env->regs[10] = tswapl(sc->r10); 1294 env->regs[11] = tswapl(sc->r11); 1295 env->regs[12] = tswapl(sc->r12); 1296 env->regs[13] = tswapl(sc->r13); 1297 env->regs[14] = tswapl(sc->r14); 1298 env->regs[15] = tswapl(sc->r15); 1299 1300 env->regs[R_EDI] = tswapl(sc->rdi); 1301 env->regs[R_ESI] = tswapl(sc->rsi); 1302 env->regs[R_EBP] = tswapl(sc->rbp); 1303 env->regs[R_EBX] = tswapl(sc->rbx); 1304 env->regs[R_EDX] = tswapl(sc->rdx); 1305 env->regs[R_EAX] = tswapl(sc->rax); 1306 env->regs[R_ECX] = tswapl(sc->rcx); 1307 env->regs[R_ESP] = tswapl(sc->rsp); 1308 1309 env->eip = tswapl(sc->rip); 1310 #endif 1311 1312 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3); 1313 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3); 1314 1315 tmpflags = tswapl(sc->eflags); 1316 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5); 1317 // regs->orig_eax = -1; /* disable syscall checks */ 1318 1319 fpstate_addr = tswapl(sc->fpstate); 1320 if (fpstate_addr != 0) { 1321 if (!access_ok(VERIFY_READ, fpstate_addr, 1322 sizeof(struct target_fpstate))) 1323 goto badframe; 1324 #ifndef TARGET_X86_64 1325 cpu_x86_frstor(env, fpstate_addr, 1); 1326 #else 1327 cpu_x86_fxrstor(env, fpstate_addr); 1328 #endif 1329 } 1330 1331 return err; 1332 badframe: 1333 return 1; 1334 } 1335 1336 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */ 1337 #ifndef TARGET_X86_64 1338 long do_sigreturn(CPUX86State *env) 1339 { 1340 struct sigframe *frame; 1341 abi_ulong frame_addr = env->regs[R_ESP] - 8; 1342 target_sigset_t target_set; 1343 sigset_t set; 1344 int i; 1345 1346 trace_user_do_sigreturn(env, frame_addr); 1347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1348 goto badframe; 1349 /* set blocked signals */ 1350 __get_user(target_set.sig[0], &frame->sc.oldmask); 1351 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 1352 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 1353 } 1354 1355 target_to_host_sigset_internal(&set, &target_set); 1356 set_sigmask(&set); 1357 1358 /* restore registers */ 1359 if (restore_sigcontext(env, &frame->sc)) 1360 goto badframe; 1361 unlock_user_struct(frame, frame_addr, 0); 1362 return -TARGET_QEMU_ESIGRETURN; 1363 1364 badframe: 1365 unlock_user_struct(frame, frame_addr, 0); 1366 force_sig(TARGET_SIGSEGV); 1367 return -TARGET_QEMU_ESIGRETURN; 1368 } 1369 #endif 1370 1371 long do_rt_sigreturn(CPUX86State *env) 1372 { 1373 abi_ulong frame_addr; 1374 struct rt_sigframe *frame; 1375 sigset_t set; 1376 1377 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong); 1378 trace_user_do_rt_sigreturn(env, frame_addr); 1379 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 1380 goto badframe; 1381 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 1382 set_sigmask(&set); 1383 1384 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 1385 goto badframe; 1386 } 1387 1388 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, 1389 get_sp_from_cpustate(env)) == -EFAULT) { 1390 goto badframe; 1391 } 1392 1393 unlock_user_struct(frame, frame_addr, 0); 1394 return -TARGET_QEMU_ESIGRETURN; 1395 1396 badframe: 1397 unlock_user_struct(frame, frame_addr, 0); 1398 force_sig(TARGET_SIGSEGV); 1399 return -TARGET_QEMU_ESIGRETURN; 1400 } 1401 1402 #elif defined(TARGET_AARCH64) 1403 1404 struct target_sigcontext { 1405 uint64_t fault_address; 1406 /* AArch64 registers */ 1407 uint64_t regs[31]; 1408 uint64_t sp; 1409 uint64_t pc; 1410 uint64_t pstate; 1411 /* 4K reserved for FP/SIMD state and future expansion */ 1412 char __reserved[4096] __attribute__((__aligned__(16))); 1413 }; 1414 1415 struct target_ucontext { 1416 abi_ulong tuc_flags; 1417 abi_ulong tuc_link; 1418 target_stack_t tuc_stack; 1419 target_sigset_t tuc_sigmask; 1420 /* glibc uses a 1024-bit sigset_t */ 1421 char __unused[1024 / 8 - sizeof(target_sigset_t)]; 1422 /* last for future expansion */ 1423 struct target_sigcontext tuc_mcontext; 1424 }; 1425 1426 /* 1427 * Header to be used at the beginning of structures extending the user 1428 * context. Such structures must be placed after the rt_sigframe on the stack 1429 * and be 16-byte aligned. The last structure must be a dummy one with the 1430 * magic and size set to 0. 1431 */ 1432 struct target_aarch64_ctx { 1433 uint32_t magic; 1434 uint32_t size; 1435 }; 1436 1437 #define TARGET_FPSIMD_MAGIC 0x46508001 1438 1439 struct target_fpsimd_context { 1440 struct target_aarch64_ctx head; 1441 uint32_t fpsr; 1442 uint32_t fpcr; 1443 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ 1444 }; 1445 1446 #define TARGET_EXTRA_MAGIC 0x45585401 1447 1448 struct target_extra_context { 1449 struct target_aarch64_ctx head; 1450 uint64_t datap; /* 16-byte aligned pointer to extra space cast to __u64 */ 1451 uint32_t size; /* size in bytes of the extra space */ 1452 uint32_t reserved[3]; 1453 }; 1454 1455 #define TARGET_SVE_MAGIC 0x53564501 1456 1457 struct target_sve_context { 1458 struct target_aarch64_ctx head; 1459 uint16_t vl; 1460 uint16_t reserved[3]; 1461 /* The actual SVE data immediately follows. It is layed out 1462 * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of 1463 * the original struct pointer. 1464 */ 1465 }; 1466 1467 #define TARGET_SVE_VQ_BYTES 16 1468 1469 #define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES) 1470 #define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8)) 1471 1472 #define TARGET_SVE_SIG_REGS_OFFSET \ 1473 QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES) 1474 #define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \ 1475 (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N)) 1476 #define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \ 1477 (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N)) 1478 #define TARGET_SVE_SIG_FFR_OFFSET(VQ) \ 1479 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16)) 1480 #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \ 1481 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17)) 1482 1483 struct target_rt_sigframe { 1484 struct target_siginfo info; 1485 struct target_ucontext uc; 1486 }; 1487 1488 struct target_rt_frame_record { 1489 uint64_t fp; 1490 uint64_t lr; 1491 uint32_t tramp[2]; 1492 }; 1493 1494 static void target_setup_general_frame(struct target_rt_sigframe *sf, 1495 CPUARMState *env, target_sigset_t *set) 1496 { 1497 int i; 1498 1499 __put_user(0, &sf->uc.tuc_flags); 1500 __put_user(0, &sf->uc.tuc_link); 1501 1502 __put_user(target_sigaltstack_used.ss_sp, &sf->uc.tuc_stack.ss_sp); 1503 __put_user(sas_ss_flags(env->xregs[31]), &sf->uc.tuc_stack.ss_flags); 1504 __put_user(target_sigaltstack_used.ss_size, &sf->uc.tuc_stack.ss_size); 1505 1506 for (i = 0; i < 31; i++) { 1507 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1508 } 1509 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1510 __put_user(env->pc, &sf->uc.tuc_mcontext.pc); 1511 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate); 1512 1513 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address); 1514 1515 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 1516 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]); 1517 } 1518 } 1519 1520 static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd, 1521 CPUARMState *env) 1522 { 1523 int i; 1524 1525 __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic); 1526 __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size); 1527 __put_user(vfp_get_fpsr(env), &fpsimd->fpsr); 1528 __put_user(vfp_get_fpcr(env), &fpsimd->fpcr); 1529 1530 for (i = 0; i < 32; i++) { 1531 uint64_t *q = aa64_vfp_qreg(env, i); 1532 #ifdef TARGET_WORDS_BIGENDIAN 1533 __put_user(q[0], &fpsimd->vregs[i * 2 + 1]); 1534 __put_user(q[1], &fpsimd->vregs[i * 2]); 1535 #else 1536 __put_user(q[0], &fpsimd->vregs[i * 2]); 1537 __put_user(q[1], &fpsimd->vregs[i * 2 + 1]); 1538 #endif 1539 } 1540 } 1541 1542 static void target_setup_extra_record(struct target_extra_context *extra, 1543 uint64_t datap, uint32_t extra_size) 1544 { 1545 __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic); 1546 __put_user(sizeof(struct target_extra_context), &extra->head.size); 1547 __put_user(datap, &extra->datap); 1548 __put_user(extra_size, &extra->size); 1549 } 1550 1551 static void target_setup_end_record(struct target_aarch64_ctx *end) 1552 { 1553 __put_user(0, &end->magic); 1554 __put_user(0, &end->size); 1555 } 1556 1557 static void target_setup_sve_record(struct target_sve_context *sve, 1558 CPUARMState *env, int vq, int size) 1559 { 1560 int i, j; 1561 1562 __put_user(TARGET_SVE_MAGIC, &sve->head.magic); 1563 __put_user(size, &sve->head.size); 1564 __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl); 1565 1566 /* Note that SVE regs are stored as a byte stream, with each byte element 1567 * at a subsequent address. This corresponds to a little-endian store 1568 * of our 64-bit hunks. 1569 */ 1570 for (i = 0; i < 32; ++i) { 1571 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i); 1572 for (j = 0; j < vq * 2; ++j) { 1573 __put_user_e(env->vfp.zregs[i].d[j], z + j, le); 1574 } 1575 } 1576 for (i = 0; i <= 16; ++i) { 1577 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i); 1578 for (j = 0; j < vq; ++j) { 1579 uint64_t r = env->vfp.pregs[i].p[j >> 2]; 1580 __put_user_e(r >> ((j & 3) * 16), p + j, le); 1581 } 1582 } 1583 } 1584 1585 static void target_restore_general_frame(CPUARMState *env, 1586 struct target_rt_sigframe *sf) 1587 { 1588 sigset_t set; 1589 uint64_t pstate; 1590 int i; 1591 1592 target_to_host_sigset(&set, &sf->uc.tuc_sigmask); 1593 set_sigmask(&set); 1594 1595 for (i = 0; i < 31; i++) { 1596 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); 1597 } 1598 1599 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp); 1600 __get_user(env->pc, &sf->uc.tuc_mcontext.pc); 1601 __get_user(pstate, &sf->uc.tuc_mcontext.pstate); 1602 pstate_write(env, pstate); 1603 } 1604 1605 static void target_restore_fpsimd_record(CPUARMState *env, 1606 struct target_fpsimd_context *fpsimd) 1607 { 1608 uint32_t fpsr, fpcr; 1609 int i; 1610 1611 __get_user(fpsr, &fpsimd->fpsr); 1612 vfp_set_fpsr(env, fpsr); 1613 __get_user(fpcr, &fpsimd->fpcr); 1614 vfp_set_fpcr(env, fpcr); 1615 1616 for (i = 0; i < 32; i++) { 1617 uint64_t *q = aa64_vfp_qreg(env, i); 1618 #ifdef TARGET_WORDS_BIGENDIAN 1619 __get_user(q[0], &fpsimd->vregs[i * 2 + 1]); 1620 __get_user(q[1], &fpsimd->vregs[i * 2]); 1621 #else 1622 __get_user(q[0], &fpsimd->vregs[i * 2]); 1623 __get_user(q[1], &fpsimd->vregs[i * 2 + 1]); 1624 #endif 1625 } 1626 } 1627 1628 static void target_restore_sve_record(CPUARMState *env, 1629 struct target_sve_context *sve, int vq) 1630 { 1631 int i, j; 1632 1633 /* Note that SVE regs are stored as a byte stream, with each byte element 1634 * at a subsequent address. This corresponds to a little-endian load 1635 * of our 64-bit hunks. 1636 */ 1637 for (i = 0; i < 32; ++i) { 1638 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i); 1639 for (j = 0; j < vq * 2; ++j) { 1640 __get_user_e(env->vfp.zregs[i].d[j], z + j, le); 1641 } 1642 } 1643 for (i = 0; i <= 16; ++i) { 1644 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i); 1645 for (j = 0; j < vq; ++j) { 1646 uint16_t r; 1647 __get_user_e(r, p + j, le); 1648 if (j & 3) { 1649 env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16); 1650 } else { 1651 env->vfp.pregs[i].p[j >> 2] = r; 1652 } 1653 } 1654 } 1655 } 1656 1657 static int target_restore_sigframe(CPUARMState *env, 1658 struct target_rt_sigframe *sf) 1659 { 1660 struct target_aarch64_ctx *ctx, *extra = NULL; 1661 struct target_fpsimd_context *fpsimd = NULL; 1662 struct target_sve_context *sve = NULL; 1663 uint64_t extra_datap = 0; 1664 bool used_extra = false; 1665 bool err = false; 1666 int vq = 0, sve_size = 0; 1667 1668 target_restore_general_frame(env, sf); 1669 1670 ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved; 1671 while (ctx) { 1672 uint32_t magic, size, extra_size; 1673 1674 __get_user(magic, &ctx->magic); 1675 __get_user(size, &ctx->size); 1676 switch (magic) { 1677 case 0: 1678 if (size != 0) { 1679 err = true; 1680 goto exit; 1681 } 1682 if (used_extra) { 1683 ctx = NULL; 1684 } else { 1685 ctx = extra; 1686 used_extra = true; 1687 } 1688 continue; 1689 1690 case TARGET_FPSIMD_MAGIC: 1691 if (fpsimd || size != sizeof(struct target_fpsimd_context)) { 1692 err = true; 1693 goto exit; 1694 } 1695 fpsimd = (struct target_fpsimd_context *)ctx; 1696 break; 1697 1698 case TARGET_SVE_MAGIC: 1699 if (arm_feature(env, ARM_FEATURE_SVE)) { 1700 vq = (env->vfp.zcr_el[1] & 0xf) + 1; 1701 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); 1702 if (!sve && size == sve_size) { 1703 sve = (struct target_sve_context *)ctx; 1704 break; 1705 } 1706 } 1707 err = true; 1708 goto exit; 1709 1710 case TARGET_EXTRA_MAGIC: 1711 if (extra || size != sizeof(struct target_extra_context)) { 1712 err = true; 1713 goto exit; 1714 } 1715 __get_user(extra_datap, 1716 &((struct target_extra_context *)ctx)->datap); 1717 __get_user(extra_size, 1718 &((struct target_extra_context *)ctx)->size); 1719 extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0); 1720 break; 1721 1722 default: 1723 /* Unknown record -- we certainly didn't generate it. 1724 * Did we in fact get out of sync? 1725 */ 1726 err = true; 1727 goto exit; 1728 } 1729 ctx = (void *)ctx + size; 1730 } 1731 1732 /* Require FPSIMD always. */ 1733 if (fpsimd) { 1734 target_restore_fpsimd_record(env, fpsimd); 1735 } else { 1736 err = true; 1737 } 1738 1739 /* SVE data, if present, overwrites FPSIMD data. */ 1740 if (sve) { 1741 target_restore_sve_record(env, sve, vq); 1742 } 1743 1744 exit: 1745 unlock_user(extra, extra_datap, 0); 1746 return err; 1747 } 1748 1749 static abi_ulong get_sigframe(struct target_sigaction *ka, 1750 CPUARMState *env, int size) 1751 { 1752 abi_ulong sp; 1753 1754 sp = env->xregs[31]; 1755 1756 /* 1757 * This is the X/Open sanctioned signal stack switching. 1758 */ 1759 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 1760 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 1761 } 1762 1763 sp = (sp - size) & ~15; 1764 1765 return sp; 1766 } 1767 1768 typedef struct { 1769 int total_size; 1770 int extra_base; 1771 int extra_size; 1772 int std_end_ofs; 1773 int extra_ofs; 1774 int extra_end_ofs; 1775 } target_sigframe_layout; 1776 1777 static int alloc_sigframe_space(int this_size, target_sigframe_layout *l) 1778 { 1779 /* Make sure there will always be space for the end marker. */ 1780 const int std_size = sizeof(struct target_rt_sigframe) 1781 - sizeof(struct target_aarch64_ctx); 1782 int this_loc = l->total_size; 1783 1784 if (l->extra_base) { 1785 /* Once we have begun an extra space, all allocations go there. */ 1786 l->extra_size += this_size; 1787 } else if (this_size + this_loc > std_size) { 1788 /* This allocation does not fit in the standard space. */ 1789 /* Allocate the extra record. */ 1790 l->extra_ofs = this_loc; 1791 l->total_size += sizeof(struct target_extra_context); 1792 1793 /* Allocate the standard end record. */ 1794 l->std_end_ofs = l->total_size; 1795 l->total_size += sizeof(struct target_aarch64_ctx); 1796 1797 /* Allocate the requested record. */ 1798 l->extra_base = this_loc = l->total_size; 1799 l->extra_size = this_size; 1800 } 1801 l->total_size += this_size; 1802 1803 return this_loc; 1804 } 1805 1806 static void target_setup_frame(int usig, struct target_sigaction *ka, 1807 target_siginfo_t *info, target_sigset_t *set, 1808 CPUARMState *env) 1809 { 1810 target_sigframe_layout layout = { 1811 /* Begin with the size pointing to the reserved space. */ 1812 .total_size = offsetof(struct target_rt_sigframe, 1813 uc.tuc_mcontext.__reserved), 1814 }; 1815 int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0; 1816 struct target_rt_sigframe *frame; 1817 struct target_rt_frame_record *fr; 1818 abi_ulong frame_addr, return_addr; 1819 1820 /* FPSIMD record is always in the standard space. */ 1821 fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context), 1822 &layout); 1823 1824 /* SVE state needs saving only if it exists. */ 1825 if (arm_feature(env, ARM_FEATURE_SVE)) { 1826 vq = (env->vfp.zcr_el[1] & 0xf) + 1; 1827 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); 1828 sve_ofs = alloc_sigframe_space(sve_size, &layout); 1829 } 1830 1831 if (layout.extra_ofs) { 1832 /* Reserve space for the extra end marker. The standard end marker 1833 * will have been allocated when we allocated the extra record. 1834 */ 1835 layout.extra_end_ofs 1836 = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout); 1837 } else { 1838 /* Reserve space for the standard end marker. 1839 * Do not use alloc_sigframe_space because we cheat 1840 * std_size therein to reserve space for this. 1841 */ 1842 layout.std_end_ofs = layout.total_size; 1843 layout.total_size += sizeof(struct target_aarch64_ctx); 1844 } 1845 1846 /* We must always provide at least the standard 4K reserved space, 1847 * even if we don't use all of it (this is part of the ABI) 1848 */ 1849 layout.total_size = MAX(layout.total_size, 1850 sizeof(struct target_rt_sigframe)); 1851 1852 /* Reserve space for the return code. On a real system this would 1853 * be within the VDSO. So, despite the name this is not a "real" 1854 * record within the frame. 1855 */ 1856 fr_ofs = layout.total_size; 1857 layout.total_size += sizeof(struct target_rt_frame_record); 1858 1859 frame_addr = get_sigframe(ka, env, layout.total_size); 1860 trace_user_setup_frame(env, frame_addr); 1861 frame = lock_user(VERIFY_WRITE, frame_addr, layout.total_size, 0); 1862 if (!frame) { 1863 goto give_sigsegv; 1864 } 1865 1866 target_setup_general_frame(frame, env, set); 1867 target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env); 1868 target_setup_end_record((void *)frame + layout.std_end_ofs); 1869 if (layout.extra_ofs) { 1870 target_setup_extra_record((void *)frame + layout.extra_ofs, 1871 frame_addr + layout.extra_base, 1872 layout.extra_size); 1873 target_setup_end_record((void *)frame + layout.extra_end_ofs); 1874 } 1875 if (sve_ofs) { 1876 target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size); 1877 } 1878 1879 /* Set up the stack frame for unwinding. */ 1880 fr = (void *)frame + fr_ofs; 1881 __put_user(env->xregs[29], &fr->fp); 1882 __put_user(env->xregs[30], &fr->lr); 1883 1884 if (ka->sa_flags & TARGET_SA_RESTORER) { 1885 return_addr = ka->sa_restorer; 1886 } else { 1887 /* 1888 * mov x8,#__NR_rt_sigreturn; svc #0 1889 * Since these are instructions they need to be put as little-endian 1890 * regardless of target default or current CPU endianness. 1891 */ 1892 __put_user_e(0xd2801168, &fr->tramp[0], le); 1893 __put_user_e(0xd4000001, &fr->tramp[1], le); 1894 return_addr = frame_addr + fr_ofs 1895 + offsetof(struct target_rt_frame_record, tramp); 1896 } 1897 env->xregs[0] = usig; 1898 env->xregs[31] = frame_addr; 1899 env->xregs[29] = frame_addr + fr_ofs; 1900 env->pc = ka->_sa_handler; 1901 env->xregs[30] = return_addr; 1902 if (info) { 1903 tswap_siginfo(&frame->info, info); 1904 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); 1905 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 1906 } 1907 1908 unlock_user(frame, frame_addr, layout.total_size); 1909 return; 1910 1911 give_sigsegv: 1912 unlock_user(frame, frame_addr, layout.total_size); 1913 force_sigsegv(usig); 1914 } 1915 1916 static void setup_rt_frame(int sig, struct target_sigaction *ka, 1917 target_siginfo_t *info, target_sigset_t *set, 1918 CPUARMState *env) 1919 { 1920 target_setup_frame(sig, ka, info, set, env); 1921 } 1922 1923 static void setup_frame(int sig, struct target_sigaction *ka, 1924 target_sigset_t *set, CPUARMState *env) 1925 { 1926 target_setup_frame(sig, ka, 0, set, env); 1927 } 1928 1929 long do_rt_sigreturn(CPUARMState *env) 1930 { 1931 struct target_rt_sigframe *frame = NULL; 1932 abi_ulong frame_addr = env->xregs[31]; 1933 1934 trace_user_do_rt_sigreturn(env, frame_addr); 1935 if (frame_addr & 15) { 1936 goto badframe; 1937 } 1938 1939 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 1940 goto badframe; 1941 } 1942 1943 if (target_restore_sigframe(env, frame)) { 1944 goto badframe; 1945 } 1946 1947 if (do_sigaltstack(frame_addr + 1948 offsetof(struct target_rt_sigframe, uc.tuc_stack), 1949 0, get_sp_from_cpustate(env)) == -EFAULT) { 1950 goto badframe; 1951 } 1952 1953 unlock_user_struct(frame, frame_addr, 0); 1954 return -TARGET_QEMU_ESIGRETURN; 1955 1956 badframe: 1957 unlock_user_struct(frame, frame_addr, 0); 1958 force_sig(TARGET_SIGSEGV); 1959 return -TARGET_QEMU_ESIGRETURN; 1960 } 1961 1962 long do_sigreturn(CPUARMState *env) 1963 { 1964 return do_rt_sigreturn(env); 1965 } 1966 1967 #elif defined(TARGET_ARM) 1968 1969 struct target_sigcontext { 1970 abi_ulong trap_no; 1971 abi_ulong error_code; 1972 abi_ulong oldmask; 1973 abi_ulong arm_r0; 1974 abi_ulong arm_r1; 1975 abi_ulong arm_r2; 1976 abi_ulong arm_r3; 1977 abi_ulong arm_r4; 1978 abi_ulong arm_r5; 1979 abi_ulong arm_r6; 1980 abi_ulong arm_r7; 1981 abi_ulong arm_r8; 1982 abi_ulong arm_r9; 1983 abi_ulong arm_r10; 1984 abi_ulong arm_fp; 1985 abi_ulong arm_ip; 1986 abi_ulong arm_sp; 1987 abi_ulong arm_lr; 1988 abi_ulong arm_pc; 1989 abi_ulong arm_cpsr; 1990 abi_ulong fault_address; 1991 }; 1992 1993 struct target_ucontext_v1 { 1994 abi_ulong tuc_flags; 1995 abi_ulong tuc_link; 1996 target_stack_t tuc_stack; 1997 struct target_sigcontext tuc_mcontext; 1998 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 1999 }; 2000 2001 struct target_ucontext_v2 { 2002 abi_ulong tuc_flags; 2003 abi_ulong tuc_link; 2004 target_stack_t tuc_stack; 2005 struct target_sigcontext tuc_mcontext; 2006 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 2007 char __unused[128 - sizeof(target_sigset_t)]; 2008 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8))); 2009 }; 2010 2011 struct target_user_vfp { 2012 uint64_t fpregs[32]; 2013 abi_ulong fpscr; 2014 }; 2015 2016 struct target_user_vfp_exc { 2017 abi_ulong fpexc; 2018 abi_ulong fpinst; 2019 abi_ulong fpinst2; 2020 }; 2021 2022 struct target_vfp_sigframe { 2023 abi_ulong magic; 2024 abi_ulong size; 2025 struct target_user_vfp ufp; 2026 struct target_user_vfp_exc ufp_exc; 2027 } __attribute__((__aligned__(8))); 2028 2029 struct target_iwmmxt_sigframe { 2030 abi_ulong magic; 2031 abi_ulong size; 2032 uint64_t regs[16]; 2033 /* Note that not all the coprocessor control registers are stored here */ 2034 uint32_t wcssf; 2035 uint32_t wcasf; 2036 uint32_t wcgr0; 2037 uint32_t wcgr1; 2038 uint32_t wcgr2; 2039 uint32_t wcgr3; 2040 } __attribute__((__aligned__(8))); 2041 2042 #define TARGET_VFP_MAGIC 0x56465001 2043 #define TARGET_IWMMXT_MAGIC 0x12ef842a 2044 2045 struct sigframe_v1 2046 { 2047 struct target_sigcontext sc; 2048 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 2049 abi_ulong retcode; 2050 }; 2051 2052 struct sigframe_v2 2053 { 2054 struct target_ucontext_v2 uc; 2055 abi_ulong retcode; 2056 }; 2057 2058 struct rt_sigframe_v1 2059 { 2060 abi_ulong pinfo; 2061 abi_ulong puc; 2062 struct target_siginfo info; 2063 struct target_ucontext_v1 uc; 2064 abi_ulong retcode; 2065 }; 2066 2067 struct rt_sigframe_v2 2068 { 2069 struct target_siginfo info; 2070 struct target_ucontext_v2 uc; 2071 abi_ulong retcode; 2072 }; 2073 2074 #define TARGET_CONFIG_CPU_32 1 2075 2076 /* 2077 * For ARM syscalls, we encode the syscall number into the instruction. 2078 */ 2079 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) 2080 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) 2081 2082 /* 2083 * For Thumb syscalls, we pass the syscall number via r7. We therefore 2084 * need two 16-bit instructions. 2085 */ 2086 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) 2087 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) 2088 2089 static const abi_ulong retcodes[4] = { 2090 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, 2091 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN 2092 }; 2093 2094 2095 static inline int valid_user_regs(CPUARMState *regs) 2096 { 2097 return 1; 2098 } 2099 2100 static void 2101 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2102 CPUARMState *env, abi_ulong mask) 2103 { 2104 __put_user(env->regs[0], &sc->arm_r0); 2105 __put_user(env->regs[1], &sc->arm_r1); 2106 __put_user(env->regs[2], &sc->arm_r2); 2107 __put_user(env->regs[3], &sc->arm_r3); 2108 __put_user(env->regs[4], &sc->arm_r4); 2109 __put_user(env->regs[5], &sc->arm_r5); 2110 __put_user(env->regs[6], &sc->arm_r6); 2111 __put_user(env->regs[7], &sc->arm_r7); 2112 __put_user(env->regs[8], &sc->arm_r8); 2113 __put_user(env->regs[9], &sc->arm_r9); 2114 __put_user(env->regs[10], &sc->arm_r10); 2115 __put_user(env->regs[11], &sc->arm_fp); 2116 __put_user(env->regs[12], &sc->arm_ip); 2117 __put_user(env->regs[13], &sc->arm_sp); 2118 __put_user(env->regs[14], &sc->arm_lr); 2119 __put_user(env->regs[15], &sc->arm_pc); 2120 #ifdef TARGET_CONFIG_CPU_32 2121 __put_user(cpsr_read(env), &sc->arm_cpsr); 2122 #endif 2123 2124 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no); 2125 __put_user(/* current->thread.error_code */ 0, &sc->error_code); 2126 __put_user(/* current->thread.address */ 0, &sc->fault_address); 2127 __put_user(mask, &sc->oldmask); 2128 } 2129 2130 static inline abi_ulong 2131 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) 2132 { 2133 unsigned long sp = regs->regs[13]; 2134 2135 /* 2136 * This is the X/Open sanctioned signal stack switching. 2137 */ 2138 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 2139 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2140 } 2141 /* 2142 * ATPCS B01 mandates 8-byte alignment 2143 */ 2144 return (sp - framesize) & ~7; 2145 } 2146 2147 static void 2148 setup_return(CPUARMState *env, struct target_sigaction *ka, 2149 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) 2150 { 2151 abi_ulong handler = ka->_sa_handler; 2152 abi_ulong retcode; 2153 int thumb = handler & 1; 2154 uint32_t cpsr = cpsr_read(env); 2155 2156 cpsr &= ~CPSR_IT; 2157 if (thumb) { 2158 cpsr |= CPSR_T; 2159 } else { 2160 cpsr &= ~CPSR_T; 2161 } 2162 2163 if (ka->sa_flags & TARGET_SA_RESTORER) { 2164 retcode = ka->sa_restorer; 2165 } else { 2166 unsigned int idx = thumb; 2167 2168 if (ka->sa_flags & TARGET_SA_SIGINFO) { 2169 idx += 2; 2170 } 2171 2172 __put_user(retcodes[idx], rc); 2173 2174 retcode = rc_addr + thumb; 2175 } 2176 2177 env->regs[0] = usig; 2178 env->regs[13] = frame_addr; 2179 env->regs[14] = retcode; 2180 env->regs[15] = handler & (thumb ? ~1 : ~3); 2181 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr); 2182 } 2183 2184 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) 2185 { 2186 int i; 2187 struct target_vfp_sigframe *vfpframe; 2188 vfpframe = (struct target_vfp_sigframe *)regspace; 2189 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic); 2190 __put_user(sizeof(*vfpframe), &vfpframe->size); 2191 for (i = 0; i < 32; i++) { 2192 __put_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]); 2193 } 2194 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr); 2195 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc); 2196 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2197 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2198 return (abi_ulong*)(vfpframe+1); 2199 } 2200 2201 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, 2202 CPUARMState *env) 2203 { 2204 int i; 2205 struct target_iwmmxt_sigframe *iwmmxtframe; 2206 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2207 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); 2208 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); 2209 for (i = 0; i < 16; i++) { 2210 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2211 } 2212 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2213 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2214 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2215 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2216 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2217 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2218 return (abi_ulong*)(iwmmxtframe+1); 2219 } 2220 2221 static void setup_sigframe_v2(struct target_ucontext_v2 *uc, 2222 target_sigset_t *set, CPUARMState *env) 2223 { 2224 struct target_sigaltstack stack; 2225 int i; 2226 abi_ulong *regspace; 2227 2228 /* Clear all the bits of the ucontext we don't use. */ 2229 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); 2230 2231 memset(&stack, 0, sizeof(stack)); 2232 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 2233 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 2234 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 2235 memcpy(&uc->tuc_stack, &stack, sizeof(stack)); 2236 2237 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]); 2238 /* Save coprocessor signal frame. */ 2239 regspace = uc->tuc_regspace; 2240 if (arm_feature(env, ARM_FEATURE_VFP)) { 2241 regspace = setup_sigframe_v2_vfp(regspace, env); 2242 } 2243 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2244 regspace = setup_sigframe_v2_iwmmxt(regspace, env); 2245 } 2246 2247 /* Write terminating magic word */ 2248 __put_user(0, regspace); 2249 2250 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 2251 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]); 2252 } 2253 } 2254 2255 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */ 2256 static void setup_frame_v1(int usig, struct target_sigaction *ka, 2257 target_sigset_t *set, CPUARMState *regs) 2258 { 2259 struct sigframe_v1 *frame; 2260 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 2261 int i; 2262 2263 trace_user_setup_frame(regs, frame_addr); 2264 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2265 goto sigsegv; 2266 } 2267 2268 setup_sigcontext(&frame->sc, regs, set->sig[0]); 2269 2270 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2271 __put_user(set->sig[i], &frame->extramask[i - 1]); 2272 } 2273 2274 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 2275 frame_addr + offsetof(struct sigframe_v1, retcode)); 2276 2277 unlock_user_struct(frame, frame_addr, 1); 2278 return; 2279 sigsegv: 2280 force_sigsegv(usig); 2281 } 2282 2283 static void setup_frame_v2(int usig, struct target_sigaction *ka, 2284 target_sigset_t *set, CPUARMState *regs) 2285 { 2286 struct sigframe_v2 *frame; 2287 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 2288 2289 trace_user_setup_frame(regs, frame_addr); 2290 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2291 goto sigsegv; 2292 } 2293 2294 setup_sigframe_v2(&frame->uc, set, regs); 2295 2296 setup_return(regs, ka, &frame->retcode, frame_addr, usig, 2297 frame_addr + offsetof(struct sigframe_v2, retcode)); 2298 2299 unlock_user_struct(frame, frame_addr, 1); 2300 return; 2301 sigsegv: 2302 force_sigsegv(usig); 2303 } 2304 2305 static void setup_frame(int usig, struct target_sigaction *ka, 2306 target_sigset_t *set, CPUARMState *regs) 2307 { 2308 if (get_osversion() >= 0x020612) { 2309 setup_frame_v2(usig, ka, set, regs); 2310 } else { 2311 setup_frame_v1(usig, ka, set, regs); 2312 } 2313 } 2314 2315 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ 2316 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, 2317 target_siginfo_t *info, 2318 target_sigset_t *set, CPUARMState *env) 2319 { 2320 struct rt_sigframe_v1 *frame; 2321 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 2322 struct target_sigaltstack stack; 2323 int i; 2324 abi_ulong info_addr, uc_addr; 2325 2326 trace_user_setup_rt_frame(env, frame_addr); 2327 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2328 goto sigsegv; 2329 } 2330 2331 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); 2332 __put_user(info_addr, &frame->pinfo); 2333 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); 2334 __put_user(uc_addr, &frame->puc); 2335 tswap_siginfo(&frame->info, info); 2336 2337 /* Clear all the bits of the ucontext we don't use. */ 2338 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); 2339 2340 memset(&stack, 0, sizeof(stack)); 2341 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); 2342 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); 2343 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); 2344 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); 2345 2346 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); 2347 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 2348 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 2349 } 2350 2351 setup_return(env, ka, &frame->retcode, frame_addr, usig, 2352 frame_addr + offsetof(struct rt_sigframe_v1, retcode)); 2353 2354 env->regs[1] = info_addr; 2355 env->regs[2] = uc_addr; 2356 2357 unlock_user_struct(frame, frame_addr, 1); 2358 return; 2359 sigsegv: 2360 force_sigsegv(usig); 2361 } 2362 2363 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, 2364 target_siginfo_t *info, 2365 target_sigset_t *set, CPUARMState *env) 2366 { 2367 struct rt_sigframe_v2 *frame; 2368 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); 2369 abi_ulong info_addr, uc_addr; 2370 2371 trace_user_setup_rt_frame(env, frame_addr); 2372 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 2373 goto sigsegv; 2374 } 2375 2376 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); 2377 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); 2378 tswap_siginfo(&frame->info, info); 2379 2380 setup_sigframe_v2(&frame->uc, set, env); 2381 2382 setup_return(env, ka, &frame->retcode, frame_addr, usig, 2383 frame_addr + offsetof(struct rt_sigframe_v2, retcode)); 2384 2385 env->regs[1] = info_addr; 2386 env->regs[2] = uc_addr; 2387 2388 unlock_user_struct(frame, frame_addr, 1); 2389 return; 2390 sigsegv: 2391 force_sigsegv(usig); 2392 } 2393 2394 static void setup_rt_frame(int usig, struct target_sigaction *ka, 2395 target_siginfo_t *info, 2396 target_sigset_t *set, CPUARMState *env) 2397 { 2398 if (get_osversion() >= 0x020612) { 2399 setup_rt_frame_v2(usig, ka, info, set, env); 2400 } else { 2401 setup_rt_frame_v1(usig, ka, info, set, env); 2402 } 2403 } 2404 2405 static int 2406 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) 2407 { 2408 int err = 0; 2409 uint32_t cpsr; 2410 2411 __get_user(env->regs[0], &sc->arm_r0); 2412 __get_user(env->regs[1], &sc->arm_r1); 2413 __get_user(env->regs[2], &sc->arm_r2); 2414 __get_user(env->regs[3], &sc->arm_r3); 2415 __get_user(env->regs[4], &sc->arm_r4); 2416 __get_user(env->regs[5], &sc->arm_r5); 2417 __get_user(env->regs[6], &sc->arm_r6); 2418 __get_user(env->regs[7], &sc->arm_r7); 2419 __get_user(env->regs[8], &sc->arm_r8); 2420 __get_user(env->regs[9], &sc->arm_r9); 2421 __get_user(env->regs[10], &sc->arm_r10); 2422 __get_user(env->regs[11], &sc->arm_fp); 2423 __get_user(env->regs[12], &sc->arm_ip); 2424 __get_user(env->regs[13], &sc->arm_sp); 2425 __get_user(env->regs[14], &sc->arm_lr); 2426 __get_user(env->regs[15], &sc->arm_pc); 2427 #ifdef TARGET_CONFIG_CPU_32 2428 __get_user(cpsr, &sc->arm_cpsr); 2429 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); 2430 #endif 2431 2432 err |= !valid_user_regs(env); 2433 2434 return err; 2435 } 2436 2437 static long do_sigreturn_v1(CPUARMState *env) 2438 { 2439 abi_ulong frame_addr; 2440 struct sigframe_v1 *frame = NULL; 2441 target_sigset_t set; 2442 sigset_t host_set; 2443 int i; 2444 2445 /* 2446 * Since we stacked the signal on a 64-bit boundary, 2447 * then 'sp' should be word aligned here. If it's 2448 * not, then the user is trying to mess with us. 2449 */ 2450 frame_addr = env->regs[13]; 2451 trace_user_do_sigreturn(env, frame_addr); 2452 if (frame_addr & 7) { 2453 goto badframe; 2454 } 2455 2456 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2457 goto badframe; 2458 } 2459 2460 __get_user(set.sig[0], &frame->sc.oldmask); 2461 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 2462 __get_user(set.sig[i], &frame->extramask[i - 1]); 2463 } 2464 2465 target_to_host_sigset_internal(&host_set, &set); 2466 set_sigmask(&host_set); 2467 2468 if (restore_sigcontext(env, &frame->sc)) { 2469 goto badframe; 2470 } 2471 2472 #if 0 2473 /* Send SIGTRAP if we're single-stepping */ 2474 if (ptrace_cancel_bpt(current)) 2475 send_sig(SIGTRAP, current, 1); 2476 #endif 2477 unlock_user_struct(frame, frame_addr, 0); 2478 return -TARGET_QEMU_ESIGRETURN; 2479 2480 badframe: 2481 force_sig(TARGET_SIGSEGV); 2482 return -TARGET_QEMU_ESIGRETURN; 2483 } 2484 2485 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) 2486 { 2487 int i; 2488 abi_ulong magic, sz; 2489 uint32_t fpscr, fpexc; 2490 struct target_vfp_sigframe *vfpframe; 2491 vfpframe = (struct target_vfp_sigframe *)regspace; 2492 2493 __get_user(magic, &vfpframe->magic); 2494 __get_user(sz, &vfpframe->size); 2495 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) { 2496 return 0; 2497 } 2498 for (i = 0; i < 32; i++) { 2499 __get_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]); 2500 } 2501 __get_user(fpscr, &vfpframe->ufp.fpscr); 2502 vfp_set_fpscr(env, fpscr); 2503 __get_user(fpexc, &vfpframe->ufp_exc.fpexc); 2504 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid 2505 * and the exception flag is cleared 2506 */ 2507 fpexc |= (1 << 30); 2508 fpexc &= ~((1 << 31) | (1 << 28)); 2509 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc; 2510 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst); 2511 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2); 2512 return (abi_ulong*)(vfpframe + 1); 2513 } 2514 2515 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, 2516 abi_ulong *regspace) 2517 { 2518 int i; 2519 abi_ulong magic, sz; 2520 struct target_iwmmxt_sigframe *iwmmxtframe; 2521 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; 2522 2523 __get_user(magic, &iwmmxtframe->magic); 2524 __get_user(sz, &iwmmxtframe->size); 2525 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { 2526 return 0; 2527 } 2528 for (i = 0; i < 16; i++) { 2529 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); 2530 } 2531 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); 2532 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); 2533 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); 2534 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); 2535 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); 2536 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); 2537 return (abi_ulong*)(iwmmxtframe + 1); 2538 } 2539 2540 static int do_sigframe_return_v2(CPUARMState *env, 2541 target_ulong context_addr, 2542 struct target_ucontext_v2 *uc) 2543 { 2544 sigset_t host_set; 2545 abi_ulong *regspace; 2546 2547 target_to_host_sigset(&host_set, &uc->tuc_sigmask); 2548 set_sigmask(&host_set); 2549 2550 if (restore_sigcontext(env, &uc->tuc_mcontext)) 2551 return 1; 2552 2553 /* Restore coprocessor signal frame */ 2554 regspace = uc->tuc_regspace; 2555 if (arm_feature(env, ARM_FEATURE_VFP)) { 2556 regspace = restore_sigframe_v2_vfp(env, regspace); 2557 if (!regspace) { 2558 return 1; 2559 } 2560 } 2561 if (arm_feature(env, ARM_FEATURE_IWMMXT)) { 2562 regspace = restore_sigframe_v2_iwmmxt(env, regspace); 2563 if (!regspace) { 2564 return 1; 2565 } 2566 } 2567 2568 if (do_sigaltstack(context_addr 2569 + offsetof(struct target_ucontext_v2, tuc_stack), 2570 0, get_sp_from_cpustate(env)) == -EFAULT) { 2571 return 1; 2572 } 2573 2574 #if 0 2575 /* Send SIGTRAP if we're single-stepping */ 2576 if (ptrace_cancel_bpt(current)) 2577 send_sig(SIGTRAP, current, 1); 2578 #endif 2579 2580 return 0; 2581 } 2582 2583 static long do_sigreturn_v2(CPUARMState *env) 2584 { 2585 abi_ulong frame_addr; 2586 struct sigframe_v2 *frame = NULL; 2587 2588 /* 2589 * Since we stacked the signal on a 64-bit boundary, 2590 * then 'sp' should be word aligned here. If it's 2591 * not, then the user is trying to mess with us. 2592 */ 2593 frame_addr = env->regs[13]; 2594 trace_user_do_sigreturn(env, frame_addr); 2595 if (frame_addr & 7) { 2596 goto badframe; 2597 } 2598 2599 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2600 goto badframe; 2601 } 2602 2603 if (do_sigframe_return_v2(env, 2604 frame_addr 2605 + offsetof(struct sigframe_v2, uc), 2606 &frame->uc)) { 2607 goto badframe; 2608 } 2609 2610 unlock_user_struct(frame, frame_addr, 0); 2611 return -TARGET_QEMU_ESIGRETURN; 2612 2613 badframe: 2614 unlock_user_struct(frame, frame_addr, 0); 2615 force_sig(TARGET_SIGSEGV); 2616 return -TARGET_QEMU_ESIGRETURN; 2617 } 2618 2619 long do_sigreturn(CPUARMState *env) 2620 { 2621 if (get_osversion() >= 0x020612) { 2622 return do_sigreturn_v2(env); 2623 } else { 2624 return do_sigreturn_v1(env); 2625 } 2626 } 2627 2628 static long do_rt_sigreturn_v1(CPUARMState *env) 2629 { 2630 abi_ulong frame_addr; 2631 struct rt_sigframe_v1 *frame = NULL; 2632 sigset_t host_set; 2633 2634 /* 2635 * Since we stacked the signal on a 64-bit boundary, 2636 * then 'sp' should be word aligned here. If it's 2637 * not, then the user is trying to mess with us. 2638 */ 2639 frame_addr = env->regs[13]; 2640 trace_user_do_rt_sigreturn(env, frame_addr); 2641 if (frame_addr & 7) { 2642 goto badframe; 2643 } 2644 2645 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2646 goto badframe; 2647 } 2648 2649 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); 2650 set_sigmask(&host_set); 2651 2652 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { 2653 goto badframe; 2654 } 2655 2656 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) 2657 goto badframe; 2658 2659 #if 0 2660 /* Send SIGTRAP if we're single-stepping */ 2661 if (ptrace_cancel_bpt(current)) 2662 send_sig(SIGTRAP, current, 1); 2663 #endif 2664 unlock_user_struct(frame, frame_addr, 0); 2665 return -TARGET_QEMU_ESIGRETURN; 2666 2667 badframe: 2668 unlock_user_struct(frame, frame_addr, 0); 2669 force_sig(TARGET_SIGSEGV); 2670 return -TARGET_QEMU_ESIGRETURN; 2671 } 2672 2673 static long do_rt_sigreturn_v2(CPUARMState *env) 2674 { 2675 abi_ulong frame_addr; 2676 struct rt_sigframe_v2 *frame = NULL; 2677 2678 /* 2679 * Since we stacked the signal on a 64-bit boundary, 2680 * then 'sp' should be word aligned here. If it's 2681 * not, then the user is trying to mess with us. 2682 */ 2683 frame_addr = env->regs[13]; 2684 trace_user_do_rt_sigreturn(env, frame_addr); 2685 if (frame_addr & 7) { 2686 goto badframe; 2687 } 2688 2689 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 2690 goto badframe; 2691 } 2692 2693 if (do_sigframe_return_v2(env, 2694 frame_addr 2695 + offsetof(struct rt_sigframe_v2, uc), 2696 &frame->uc)) { 2697 goto badframe; 2698 } 2699 2700 unlock_user_struct(frame, frame_addr, 0); 2701 return -TARGET_QEMU_ESIGRETURN; 2702 2703 badframe: 2704 unlock_user_struct(frame, frame_addr, 0); 2705 force_sig(TARGET_SIGSEGV); 2706 return -TARGET_QEMU_ESIGRETURN; 2707 } 2708 2709 long do_rt_sigreturn(CPUARMState *env) 2710 { 2711 if (get_osversion() >= 0x020612) { 2712 return do_rt_sigreturn_v2(env); 2713 } else { 2714 return do_rt_sigreturn_v1(env); 2715 } 2716 } 2717 2718 #elif defined(TARGET_SPARC) 2719 2720 #define __SUNOS_MAXWIN 31 2721 2722 /* This is what SunOS does, so shall I. */ 2723 struct target_sigcontext { 2724 abi_ulong sigc_onstack; /* state to restore */ 2725 2726 abi_ulong sigc_mask; /* sigmask to restore */ 2727 abi_ulong sigc_sp; /* stack pointer */ 2728 abi_ulong sigc_pc; /* program counter */ 2729 abi_ulong sigc_npc; /* next program counter */ 2730 abi_ulong sigc_psr; /* for condition codes etc */ 2731 abi_ulong sigc_g1; /* User uses these two registers */ 2732 abi_ulong sigc_o0; /* within the trampoline code. */ 2733 2734 /* Now comes information regarding the users window set 2735 * at the time of the signal. 2736 */ 2737 abi_ulong sigc_oswins; /* outstanding windows */ 2738 2739 /* stack ptrs for each regwin buf */ 2740 char *sigc_spbuf[__SUNOS_MAXWIN]; 2741 2742 /* Windows to restore after signal */ 2743 struct { 2744 abi_ulong locals[8]; 2745 abi_ulong ins[8]; 2746 } sigc_wbuf[__SUNOS_MAXWIN]; 2747 }; 2748 /* A Sparc stack frame */ 2749 struct sparc_stackf { 2750 abi_ulong locals[8]; 2751 abi_ulong ins[8]; 2752 /* It's simpler to treat fp and callers_pc as elements of ins[] 2753 * since we never need to access them ourselves. 2754 */ 2755 char *structptr; 2756 abi_ulong xargs[6]; 2757 abi_ulong xxargs[1]; 2758 }; 2759 2760 typedef struct { 2761 struct { 2762 abi_ulong psr; 2763 abi_ulong pc; 2764 abi_ulong npc; 2765 abi_ulong y; 2766 abi_ulong u_regs[16]; /* globals and ins */ 2767 } si_regs; 2768 int si_mask; 2769 } __siginfo_t; 2770 2771 typedef struct { 2772 abi_ulong si_float_regs[32]; 2773 unsigned long si_fsr; 2774 unsigned long si_fpqdepth; 2775 struct { 2776 unsigned long *insn_addr; 2777 unsigned long insn; 2778 } si_fpqueue [16]; 2779 } qemu_siginfo_fpu_t; 2780 2781 2782 struct target_signal_frame { 2783 struct sparc_stackf ss; 2784 __siginfo_t info; 2785 abi_ulong fpu_save; 2786 abi_ulong insns[2] __attribute__ ((aligned (8))); 2787 abi_ulong extramask[TARGET_NSIG_WORDS - 1]; 2788 abi_ulong extra_size; /* Should be 0 */ 2789 qemu_siginfo_fpu_t fpu_state; 2790 }; 2791 struct target_rt_signal_frame { 2792 struct sparc_stackf ss; 2793 siginfo_t info; 2794 abi_ulong regs[20]; 2795 sigset_t mask; 2796 abi_ulong fpu_save; 2797 unsigned int insns[2]; 2798 stack_t stack; 2799 unsigned int extra_size; /* Should be 0 */ 2800 qemu_siginfo_fpu_t fpu_state; 2801 }; 2802 2803 #define UREG_O0 16 2804 #define UREG_O6 22 2805 #define UREG_I0 0 2806 #define UREG_I1 1 2807 #define UREG_I2 2 2808 #define UREG_I3 3 2809 #define UREG_I4 4 2810 #define UREG_I5 5 2811 #define UREG_I6 6 2812 #define UREG_I7 7 2813 #define UREG_L0 8 2814 #define UREG_FP UREG_I6 2815 #define UREG_SP UREG_O6 2816 2817 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 2818 CPUSPARCState *env, 2819 unsigned long framesize) 2820 { 2821 abi_ulong sp; 2822 2823 sp = env->regwptr[UREG_FP]; 2824 2825 /* This is the X/Open sanctioned signal stack switching. */ 2826 if (sa->sa_flags & TARGET_SA_ONSTACK) { 2827 if (!on_sig_stack(sp) 2828 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) { 2829 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 2830 } 2831 } 2832 return sp - framesize; 2833 } 2834 2835 static int 2836 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask) 2837 { 2838 int err = 0, i; 2839 2840 __put_user(env->psr, &si->si_regs.psr); 2841 __put_user(env->pc, &si->si_regs.pc); 2842 __put_user(env->npc, &si->si_regs.npc); 2843 __put_user(env->y, &si->si_regs.y); 2844 for (i=0; i < 8; i++) { 2845 __put_user(env->gregs[i], &si->si_regs.u_regs[i]); 2846 } 2847 for (i=0; i < 8; i++) { 2848 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]); 2849 } 2850 __put_user(mask, &si->si_mask); 2851 return err; 2852 } 2853 2854 #if 0 2855 static int 2856 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/ 2857 CPUSPARCState *env, unsigned long mask) 2858 { 2859 int err = 0; 2860 2861 __put_user(mask, &sc->sigc_mask); 2862 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp); 2863 __put_user(env->pc, &sc->sigc_pc); 2864 __put_user(env->npc, &sc->sigc_npc); 2865 __put_user(env->psr, &sc->sigc_psr); 2866 __put_user(env->gregs[1], &sc->sigc_g1); 2867 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0); 2868 2869 return err; 2870 } 2871 #endif 2872 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7))) 2873 2874 static void setup_frame(int sig, struct target_sigaction *ka, 2875 target_sigset_t *set, CPUSPARCState *env) 2876 { 2877 abi_ulong sf_addr; 2878 struct target_signal_frame *sf; 2879 int sigframe_size, err, i; 2880 2881 /* 1. Make sure everything is clean */ 2882 //synchronize_user_stack(); 2883 2884 sigframe_size = NF_ALIGNEDSZ; 2885 sf_addr = get_sigframe(ka, env, sigframe_size); 2886 trace_user_setup_frame(env, sf_addr); 2887 2888 sf = lock_user(VERIFY_WRITE, sf_addr, 2889 sizeof(struct target_signal_frame), 0); 2890 if (!sf) { 2891 goto sigsegv; 2892 } 2893 #if 0 2894 if (invalid_frame_pointer(sf, sigframe_size)) 2895 goto sigill_and_return; 2896 #endif 2897 /* 2. Save the current process state */ 2898 err = setup___siginfo(&sf->info, env, set->sig[0]); 2899 __put_user(0, &sf->extra_size); 2900 2901 //save_fpu_state(regs, &sf->fpu_state); 2902 //__put_user(&sf->fpu_state, &sf->fpu_save); 2903 2904 __put_user(set->sig[0], &sf->info.si_mask); 2905 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 2906 __put_user(set->sig[i + 1], &sf->extramask[i]); 2907 } 2908 2909 for (i = 0; i < 8; i++) { 2910 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]); 2911 } 2912 for (i = 0; i < 8; i++) { 2913 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]); 2914 } 2915 if (err) 2916 goto sigsegv; 2917 2918 /* 3. signal handler back-trampoline and parameters */ 2919 env->regwptr[UREG_FP] = sf_addr; 2920 env->regwptr[UREG_I0] = sig; 2921 env->regwptr[UREG_I1] = sf_addr + 2922 offsetof(struct target_signal_frame, info); 2923 env->regwptr[UREG_I2] = sf_addr + 2924 offsetof(struct target_signal_frame, info); 2925 2926 /* 4. signal handler */ 2927 env->pc = ka->_sa_handler; 2928 env->npc = (env->pc + 4); 2929 /* 5. return to kernel instructions */ 2930 if (ka->ka_restorer) { 2931 env->regwptr[UREG_I7] = ka->ka_restorer; 2932 } else { 2933 uint32_t val32; 2934 2935 env->regwptr[UREG_I7] = sf_addr + 2936 offsetof(struct target_signal_frame, insns) - 2 * 4; 2937 2938 /* mov __NR_sigreturn, %g1 */ 2939 val32 = 0x821020d8; 2940 __put_user(val32, &sf->insns[0]); 2941 2942 /* t 0x10 */ 2943 val32 = 0x91d02010; 2944 __put_user(val32, &sf->insns[1]); 2945 if (err) 2946 goto sigsegv; 2947 2948 /* Flush instruction space. */ 2949 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 2950 // tb_flush(env); 2951 } 2952 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2953 return; 2954 #if 0 2955 sigill_and_return: 2956 force_sig(TARGET_SIGILL); 2957 #endif 2958 sigsegv: 2959 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); 2960 force_sigsegv(sig); 2961 } 2962 2963 static void setup_rt_frame(int sig, struct target_sigaction *ka, 2964 target_siginfo_t *info, 2965 target_sigset_t *set, CPUSPARCState *env) 2966 { 2967 fprintf(stderr, "setup_rt_frame: not implemented\n"); 2968 } 2969 2970 long do_sigreturn(CPUSPARCState *env) 2971 { 2972 abi_ulong sf_addr; 2973 struct target_signal_frame *sf; 2974 uint32_t up_psr, pc, npc; 2975 target_sigset_t set; 2976 sigset_t host_set; 2977 int err=0, i; 2978 2979 sf_addr = env->regwptr[UREG_FP]; 2980 trace_user_do_sigreturn(env, sf_addr); 2981 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) { 2982 goto segv_and_exit; 2983 } 2984 2985 /* 1. Make sure we are not getting garbage from the user */ 2986 2987 if (sf_addr & 3) 2988 goto segv_and_exit; 2989 2990 __get_user(pc, &sf->info.si_regs.pc); 2991 __get_user(npc, &sf->info.si_regs.npc); 2992 2993 if ((pc | npc) & 3) { 2994 goto segv_and_exit; 2995 } 2996 2997 /* 2. Restore the state */ 2998 __get_user(up_psr, &sf->info.si_regs.psr); 2999 3000 /* User can only change condition codes and FPU enabling in %psr. */ 3001 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */)) 3002 | (env->psr & ~(PSR_ICC /* | PSR_EF */)); 3003 3004 env->pc = pc; 3005 env->npc = npc; 3006 __get_user(env->y, &sf->info.si_regs.y); 3007 for (i=0; i < 8; i++) { 3008 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]); 3009 } 3010 for (i=0; i < 8; i++) { 3011 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]); 3012 } 3013 3014 /* FIXME: implement FPU save/restore: 3015 * __get_user(fpu_save, &sf->fpu_save); 3016 * if (fpu_save) 3017 * err |= restore_fpu_state(env, fpu_save); 3018 */ 3019 3020 /* This is pretty much atomic, no amount locking would prevent 3021 * the races which exist anyways. 3022 */ 3023 __get_user(set.sig[0], &sf->info.si_mask); 3024 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3025 __get_user(set.sig[i], &sf->extramask[i - 1]); 3026 } 3027 3028 target_to_host_sigset_internal(&host_set, &set); 3029 set_sigmask(&host_set); 3030 3031 if (err) { 3032 goto segv_and_exit; 3033 } 3034 unlock_user_struct(sf, sf_addr, 0); 3035 return -TARGET_QEMU_ESIGRETURN; 3036 3037 segv_and_exit: 3038 unlock_user_struct(sf, sf_addr, 0); 3039 force_sig(TARGET_SIGSEGV); 3040 return -TARGET_QEMU_ESIGRETURN; 3041 } 3042 3043 long do_rt_sigreturn(CPUSPARCState *env) 3044 { 3045 trace_user_do_rt_sigreturn(env, 0); 3046 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 3047 return -TARGET_ENOSYS; 3048 } 3049 3050 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) 3051 #define SPARC_MC_TSTATE 0 3052 #define SPARC_MC_PC 1 3053 #define SPARC_MC_NPC 2 3054 #define SPARC_MC_Y 3 3055 #define SPARC_MC_G1 4 3056 #define SPARC_MC_G2 5 3057 #define SPARC_MC_G3 6 3058 #define SPARC_MC_G4 7 3059 #define SPARC_MC_G5 8 3060 #define SPARC_MC_G6 9 3061 #define SPARC_MC_G7 10 3062 #define SPARC_MC_O0 11 3063 #define SPARC_MC_O1 12 3064 #define SPARC_MC_O2 13 3065 #define SPARC_MC_O3 14 3066 #define SPARC_MC_O4 15 3067 #define SPARC_MC_O5 16 3068 #define SPARC_MC_O6 17 3069 #define SPARC_MC_O7 18 3070 #define SPARC_MC_NGREG 19 3071 3072 typedef abi_ulong target_mc_greg_t; 3073 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG]; 3074 3075 struct target_mc_fq { 3076 abi_ulong *mcfq_addr; 3077 uint32_t mcfq_insn; 3078 }; 3079 3080 struct target_mc_fpu { 3081 union { 3082 uint32_t sregs[32]; 3083 uint64_t dregs[32]; 3084 //uint128_t qregs[16]; 3085 } mcfpu_fregs; 3086 abi_ulong mcfpu_fsr; 3087 abi_ulong mcfpu_fprs; 3088 abi_ulong mcfpu_gsr; 3089 struct target_mc_fq *mcfpu_fq; 3090 unsigned char mcfpu_qcnt; 3091 unsigned char mcfpu_qentsz; 3092 unsigned char mcfpu_enab; 3093 }; 3094 typedef struct target_mc_fpu target_mc_fpu_t; 3095 3096 typedef struct { 3097 target_mc_gregset_t mc_gregs; 3098 target_mc_greg_t mc_fp; 3099 target_mc_greg_t mc_i7; 3100 target_mc_fpu_t mc_fpregs; 3101 } target_mcontext_t; 3102 3103 struct target_ucontext { 3104 struct target_ucontext *tuc_link; 3105 abi_ulong tuc_flags; 3106 target_sigset_t tuc_sigmask; 3107 target_mcontext_t tuc_mcontext; 3108 }; 3109 3110 /* A V9 register window */ 3111 struct target_reg_window { 3112 abi_ulong locals[8]; 3113 abi_ulong ins[8]; 3114 }; 3115 3116 #define TARGET_STACK_BIAS 2047 3117 3118 /* {set, get}context() needed for 64-bit SparcLinux userland. */ 3119 void sparc64_set_context(CPUSPARCState *env) 3120 { 3121 abi_ulong ucp_addr; 3122 struct target_ucontext *ucp; 3123 target_mc_gregset_t *grp; 3124 abi_ulong pc, npc, tstate; 3125 abi_ulong fp, i7, w_addr; 3126 unsigned int i; 3127 3128 ucp_addr = env->regwptr[UREG_I0]; 3129 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) { 3130 goto do_sigsegv; 3131 } 3132 grp = &ucp->tuc_mcontext.mc_gregs; 3133 __get_user(pc, &((*grp)[SPARC_MC_PC])); 3134 __get_user(npc, &((*grp)[SPARC_MC_NPC])); 3135 if ((pc | npc) & 3) { 3136 goto do_sigsegv; 3137 } 3138 if (env->regwptr[UREG_I1]) { 3139 target_sigset_t target_set; 3140 sigset_t set; 3141 3142 if (TARGET_NSIG_WORDS == 1) { 3143 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]); 3144 } else { 3145 abi_ulong *src, *dst; 3146 src = ucp->tuc_sigmask.sig; 3147 dst = target_set.sig; 3148 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 3149 __get_user(*dst, src); 3150 } 3151 } 3152 target_to_host_sigset_internal(&set, &target_set); 3153 set_sigmask(&set); 3154 } 3155 env->pc = pc; 3156 env->npc = npc; 3157 __get_user(env->y, &((*grp)[SPARC_MC_Y])); 3158 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE])); 3159 env->asi = (tstate >> 24) & 0xff; 3160 cpu_put_ccr(env, tstate >> 32); 3161 cpu_put_cwp64(env, tstate & 0x1f); 3162 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1])); 3163 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2])); 3164 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3])); 3165 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4])); 3166 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5])); 3167 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6])); 3168 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7])); 3169 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0])); 3170 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1])); 3171 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2])); 3172 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3])); 3173 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4])); 3174 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5])); 3175 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6])); 3176 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7])); 3177 3178 __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); 3179 __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); 3180 3181 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 3182 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 3183 abi_ulong) != 0) { 3184 goto do_sigsegv; 3185 } 3186 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 3187 abi_ulong) != 0) { 3188 goto do_sigsegv; 3189 } 3190 /* FIXME this does not match how the kernel handles the FPU in 3191 * its sparc64_set_context implementation. In particular the FPU 3192 * is only restored if fenab is non-zero in: 3193 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); 3194 */ 3195 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); 3196 { 3197 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 3198 for (i = 0; i < 64; i++, src++) { 3199 if (i & 1) { 3200 __get_user(env->fpr[i/2].l.lower, src); 3201 } else { 3202 __get_user(env->fpr[i/2].l.upper, src); 3203 } 3204 } 3205 } 3206 __get_user(env->fsr, 3207 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); 3208 __get_user(env->gsr, 3209 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); 3210 unlock_user_struct(ucp, ucp_addr, 0); 3211 return; 3212 do_sigsegv: 3213 unlock_user_struct(ucp, ucp_addr, 0); 3214 force_sig(TARGET_SIGSEGV); 3215 } 3216 3217 void sparc64_get_context(CPUSPARCState *env) 3218 { 3219 abi_ulong ucp_addr; 3220 struct target_ucontext *ucp; 3221 target_mc_gregset_t *grp; 3222 target_mcontext_t *mcp; 3223 abi_ulong fp, i7, w_addr; 3224 int err; 3225 unsigned int i; 3226 target_sigset_t target_set; 3227 sigset_t set; 3228 3229 ucp_addr = env->regwptr[UREG_I0]; 3230 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) { 3231 goto do_sigsegv; 3232 } 3233 3234 mcp = &ucp->tuc_mcontext; 3235 grp = &mcp->mc_gregs; 3236 3237 /* Skip over the trap instruction, first. */ 3238 env->pc = env->npc; 3239 env->npc += 4; 3240 3241 /* If we're only reading the signal mask then do_sigprocmask() 3242 * is guaranteed not to fail, which is important because we don't 3243 * have any way to signal a failure or restart this operation since 3244 * this is not a normal syscall. 3245 */ 3246 err = do_sigprocmask(0, NULL, &set); 3247 assert(err == 0); 3248 host_to_target_sigset_internal(&target_set, &set); 3249 if (TARGET_NSIG_WORDS == 1) { 3250 __put_user(target_set.sig[0], 3251 (abi_ulong *)&ucp->tuc_sigmask); 3252 } else { 3253 abi_ulong *src, *dst; 3254 src = target_set.sig; 3255 dst = ucp->tuc_sigmask.sig; 3256 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { 3257 __put_user(*src, dst); 3258 } 3259 if (err) 3260 goto do_sigsegv; 3261 } 3262 3263 /* XXX: tstate must be saved properly */ 3264 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE])); 3265 __put_user(env->pc, &((*grp)[SPARC_MC_PC])); 3266 __put_user(env->npc, &((*grp)[SPARC_MC_NPC])); 3267 __put_user(env->y, &((*grp)[SPARC_MC_Y])); 3268 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1])); 3269 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2])); 3270 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3])); 3271 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4])); 3272 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5])); 3273 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6])); 3274 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7])); 3275 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0])); 3276 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1])); 3277 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2])); 3278 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3])); 3279 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4])); 3280 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5])); 3281 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6])); 3282 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7])); 3283 3284 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; 3285 fp = i7 = 0; 3286 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), 3287 abi_ulong) != 0) { 3288 goto do_sigsegv; 3289 } 3290 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), 3291 abi_ulong) != 0) { 3292 goto do_sigsegv; 3293 } 3294 __put_user(fp, &(mcp->mc_fp)); 3295 __put_user(i7, &(mcp->mc_i7)); 3296 3297 { 3298 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; 3299 for (i = 0; i < 64; i++, dst++) { 3300 if (i & 1) { 3301 __put_user(env->fpr[i/2].l.lower, dst); 3302 } else { 3303 __put_user(env->fpr[i/2].l.upper, dst); 3304 } 3305 } 3306 } 3307 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); 3308 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); 3309 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); 3310 3311 if (err) 3312 goto do_sigsegv; 3313 unlock_user_struct(ucp, ucp_addr, 1); 3314 return; 3315 do_sigsegv: 3316 unlock_user_struct(ucp, ucp_addr, 1); 3317 force_sig(TARGET_SIGSEGV); 3318 } 3319 #endif 3320 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) 3321 3322 # if defined(TARGET_ABI_MIPSO32) 3323 struct target_sigcontext { 3324 uint32_t sc_regmask; /* Unused */ 3325 uint32_t sc_status; 3326 uint64_t sc_pc; 3327 uint64_t sc_regs[32]; 3328 uint64_t sc_fpregs[32]; 3329 uint32_t sc_ownedfp; /* Unused */ 3330 uint32_t sc_fpc_csr; 3331 uint32_t sc_fpc_eir; /* Unused */ 3332 uint32_t sc_used_math; 3333 uint32_t sc_dsp; /* dsp status, was sc_ssflags */ 3334 uint32_t pad0; 3335 uint64_t sc_mdhi; 3336 uint64_t sc_mdlo; 3337 target_ulong sc_hi1; /* Was sc_cause */ 3338 target_ulong sc_lo1; /* Was sc_badvaddr */ 3339 target_ulong sc_hi2; /* Was sc_sigset[4] */ 3340 target_ulong sc_lo2; 3341 target_ulong sc_hi3; 3342 target_ulong sc_lo3; 3343 }; 3344 # else /* N32 || N64 */ 3345 struct target_sigcontext { 3346 uint64_t sc_regs[32]; 3347 uint64_t sc_fpregs[32]; 3348 uint64_t sc_mdhi; 3349 uint64_t sc_hi1; 3350 uint64_t sc_hi2; 3351 uint64_t sc_hi3; 3352 uint64_t sc_mdlo; 3353 uint64_t sc_lo1; 3354 uint64_t sc_lo2; 3355 uint64_t sc_lo3; 3356 uint64_t sc_pc; 3357 uint32_t sc_fpc_csr; 3358 uint32_t sc_used_math; 3359 uint32_t sc_dsp; 3360 uint32_t sc_reserved; 3361 }; 3362 # endif /* O32 */ 3363 3364 struct sigframe { 3365 uint32_t sf_ass[4]; /* argument save space for o32 */ 3366 uint32_t sf_code[2]; /* signal trampoline */ 3367 struct target_sigcontext sf_sc; 3368 target_sigset_t sf_mask; 3369 }; 3370 3371 struct target_ucontext { 3372 target_ulong tuc_flags; 3373 target_ulong tuc_link; 3374 target_stack_t tuc_stack; 3375 target_ulong pad0; 3376 struct target_sigcontext tuc_mcontext; 3377 target_sigset_t tuc_sigmask; 3378 }; 3379 3380 struct target_rt_sigframe { 3381 uint32_t rs_ass[4]; /* argument save space for o32 */ 3382 uint32_t rs_code[2]; /* signal trampoline */ 3383 struct target_siginfo rs_info; 3384 struct target_ucontext rs_uc; 3385 }; 3386 3387 /* Install trampoline to jump back from signal handler */ 3388 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) 3389 { 3390 int err = 0; 3391 3392 /* 3393 * Set up the return code ... 3394 * 3395 * li v0, __NR__foo_sigreturn 3396 * syscall 3397 */ 3398 3399 __put_user(0x24020000 + syscall, tramp + 0); 3400 __put_user(0x0000000c , tramp + 1); 3401 return err; 3402 } 3403 3404 static inline void setup_sigcontext(CPUMIPSState *regs, 3405 struct target_sigcontext *sc) 3406 { 3407 int i; 3408 3409 __put_user(exception_resume_pc(regs), &sc->sc_pc); 3410 regs->hflags &= ~MIPS_HFLAG_BMASK; 3411 3412 __put_user(0, &sc->sc_regs[0]); 3413 for (i = 1; i < 32; ++i) { 3414 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 3415 } 3416 3417 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi); 3418 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo); 3419 3420 /* Rather than checking for dsp existence, always copy. The storage 3421 would just be garbage otherwise. */ 3422 __put_user(regs->active_tc.HI[1], &sc->sc_hi1); 3423 __put_user(regs->active_tc.HI[2], &sc->sc_hi2); 3424 __put_user(regs->active_tc.HI[3], &sc->sc_hi3); 3425 __put_user(regs->active_tc.LO[1], &sc->sc_lo1); 3426 __put_user(regs->active_tc.LO[2], &sc->sc_lo2); 3427 __put_user(regs->active_tc.LO[3], &sc->sc_lo3); 3428 { 3429 uint32_t dsp = cpu_rddsp(0x3ff, regs); 3430 __put_user(dsp, &sc->sc_dsp); 3431 } 3432 3433 __put_user(1, &sc->sc_used_math); 3434 3435 for (i = 0; i < 32; ++i) { 3436 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 3437 } 3438 } 3439 3440 static inline void 3441 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc) 3442 { 3443 int i; 3444 3445 __get_user(regs->CP0_EPC, &sc->sc_pc); 3446 3447 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi); 3448 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo); 3449 3450 for (i = 1; i < 32; ++i) { 3451 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); 3452 } 3453 3454 __get_user(regs->active_tc.HI[1], &sc->sc_hi1); 3455 __get_user(regs->active_tc.HI[2], &sc->sc_hi2); 3456 __get_user(regs->active_tc.HI[3], &sc->sc_hi3); 3457 __get_user(regs->active_tc.LO[1], &sc->sc_lo1); 3458 __get_user(regs->active_tc.LO[2], &sc->sc_lo2); 3459 __get_user(regs->active_tc.LO[3], &sc->sc_lo3); 3460 { 3461 uint32_t dsp; 3462 __get_user(dsp, &sc->sc_dsp); 3463 cpu_wrdsp(dsp, 0x3ff, regs); 3464 } 3465 3466 for (i = 0; i < 32; ++i) { 3467 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]); 3468 } 3469 } 3470 3471 /* 3472 * Determine which stack to use.. 3473 */ 3474 static inline abi_ulong 3475 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size) 3476 { 3477 unsigned long sp; 3478 3479 /* Default to using normal stack */ 3480 sp = regs->active_tc.gpr[29]; 3481 3482 /* 3483 * FPU emulator may have its own trampoline active just 3484 * above the user stack, 16-bytes before the next lowest 3485 * 16 byte boundary. Try to avoid trashing it. 3486 */ 3487 sp -= 32; 3488 3489 /* This is the X/Open sanctioned signal stack switching. */ 3490 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 3491 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3492 } 3493 3494 return (sp - frame_size) & ~7; 3495 } 3496 3497 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env) 3498 { 3499 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { 3500 env->hflags &= ~MIPS_HFLAG_M16; 3501 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT; 3502 env->active_tc.PC &= ~(target_ulong) 1; 3503 } 3504 } 3505 3506 # if defined(TARGET_ABI_MIPSO32) 3507 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ 3508 static void setup_frame(int sig, struct target_sigaction * ka, 3509 target_sigset_t *set, CPUMIPSState *regs) 3510 { 3511 struct sigframe *frame; 3512 abi_ulong frame_addr; 3513 int i; 3514 3515 frame_addr = get_sigframe(ka, regs, sizeof(*frame)); 3516 trace_user_setup_frame(regs, frame_addr); 3517 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3518 goto give_sigsegv; 3519 } 3520 3521 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); 3522 3523 setup_sigcontext(regs, &frame->sf_sc); 3524 3525 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3526 __put_user(set->sig[i], &frame->sf_mask.sig[i]); 3527 } 3528 3529 /* 3530 * Arguments to signal handler: 3531 * 3532 * a0 = signal number 3533 * a1 = 0 (should be cause) 3534 * a2 = pointer to struct sigcontext 3535 * 3536 * $25 and PC point to the signal handler, $29 points to the 3537 * struct sigframe. 3538 */ 3539 regs->active_tc.gpr[ 4] = sig; 3540 regs->active_tc.gpr[ 5] = 0; 3541 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); 3542 regs->active_tc.gpr[29] = frame_addr; 3543 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); 3544 /* The original kernel code sets CP0_EPC to the handler 3545 * since it returns to userland using eret 3546 * we cannot do this here, and we must set PC directly */ 3547 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; 3548 mips_set_hflags_isa_mode_from_pc(regs); 3549 unlock_user_struct(frame, frame_addr, 1); 3550 return; 3551 3552 give_sigsegv: 3553 force_sigsegv(sig); 3554 } 3555 3556 long do_sigreturn(CPUMIPSState *regs) 3557 { 3558 struct sigframe *frame; 3559 abi_ulong frame_addr; 3560 sigset_t blocked; 3561 target_sigset_t target_set; 3562 int i; 3563 3564 frame_addr = regs->active_tc.gpr[29]; 3565 trace_user_do_sigreturn(regs, frame_addr); 3566 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 3567 goto badframe; 3568 3569 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3570 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]); 3571 } 3572 3573 target_to_host_sigset_internal(&blocked, &target_set); 3574 set_sigmask(&blocked); 3575 3576 restore_sigcontext(regs, &frame->sf_sc); 3577 3578 #if 0 3579 /* 3580 * Don't let your children do this ... 3581 */ 3582 __asm__ __volatile__( 3583 "move\t$29, %0\n\t" 3584 "j\tsyscall_exit" 3585 :/* no outputs */ 3586 :"r" (®s)); 3587 /* Unreached */ 3588 #endif 3589 3590 regs->active_tc.PC = regs->CP0_EPC; 3591 mips_set_hflags_isa_mode_from_pc(regs); 3592 /* I am not sure this is right, but it seems to work 3593 * maybe a problem with nested signals ? */ 3594 regs->CP0_EPC = 0; 3595 return -TARGET_QEMU_ESIGRETURN; 3596 3597 badframe: 3598 force_sig(TARGET_SIGSEGV); 3599 return -TARGET_QEMU_ESIGRETURN; 3600 } 3601 # endif /* O32 */ 3602 3603 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3604 target_siginfo_t *info, 3605 target_sigset_t *set, CPUMIPSState *env) 3606 { 3607 struct target_rt_sigframe *frame; 3608 abi_ulong frame_addr; 3609 int i; 3610 3611 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 3612 trace_user_setup_rt_frame(env, frame_addr); 3613 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3614 goto give_sigsegv; 3615 } 3616 3617 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); 3618 3619 tswap_siginfo(&frame->rs_info, info); 3620 3621 __put_user(0, &frame->rs_uc.tuc_flags); 3622 __put_user(0, &frame->rs_uc.tuc_link); 3623 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp); 3624 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size); 3625 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 3626 &frame->rs_uc.tuc_stack.ss_flags); 3627 3628 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3629 3630 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3631 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); 3632 } 3633 3634 /* 3635 * Arguments to signal handler: 3636 * 3637 * a0 = signal number 3638 * a1 = pointer to siginfo_t 3639 * a2 = pointer to ucontext_t 3640 * 3641 * $25 and PC point to the signal handler, $29 points to the 3642 * struct sigframe. 3643 */ 3644 env->active_tc.gpr[ 4] = sig; 3645 env->active_tc.gpr[ 5] = frame_addr 3646 + offsetof(struct target_rt_sigframe, rs_info); 3647 env->active_tc.gpr[ 6] = frame_addr 3648 + offsetof(struct target_rt_sigframe, rs_uc); 3649 env->active_tc.gpr[29] = frame_addr; 3650 env->active_tc.gpr[31] = frame_addr 3651 + offsetof(struct target_rt_sigframe, rs_code); 3652 /* The original kernel code sets CP0_EPC to the handler 3653 * since it returns to userland using eret 3654 * we cannot do this here, and we must set PC directly */ 3655 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; 3656 mips_set_hflags_isa_mode_from_pc(env); 3657 unlock_user_struct(frame, frame_addr, 1); 3658 return; 3659 3660 give_sigsegv: 3661 unlock_user_struct(frame, frame_addr, 1); 3662 force_sigsegv(sig); 3663 } 3664 3665 long do_rt_sigreturn(CPUMIPSState *env) 3666 { 3667 struct target_rt_sigframe *frame; 3668 abi_ulong frame_addr; 3669 sigset_t blocked; 3670 3671 frame_addr = env->active_tc.gpr[29]; 3672 trace_user_do_rt_sigreturn(env, frame_addr); 3673 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3674 goto badframe; 3675 } 3676 3677 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); 3678 set_sigmask(&blocked); 3679 3680 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); 3681 3682 if (do_sigaltstack(frame_addr + 3683 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 3684 0, get_sp_from_cpustate(env)) == -EFAULT) 3685 goto badframe; 3686 3687 env->active_tc.PC = env->CP0_EPC; 3688 mips_set_hflags_isa_mode_from_pc(env); 3689 /* I am not sure this is right, but it seems to work 3690 * maybe a problem with nested signals ? */ 3691 env->CP0_EPC = 0; 3692 return -TARGET_QEMU_ESIGRETURN; 3693 3694 badframe: 3695 force_sig(TARGET_SIGSEGV); 3696 return -TARGET_QEMU_ESIGRETURN; 3697 } 3698 3699 #elif defined(TARGET_SH4) 3700 3701 /* 3702 * code and data structures from linux kernel: 3703 * include/asm-sh/sigcontext.h 3704 * arch/sh/kernel/signal.c 3705 */ 3706 3707 struct target_sigcontext { 3708 target_ulong oldmask; 3709 3710 /* CPU registers */ 3711 target_ulong sc_gregs[16]; 3712 target_ulong sc_pc; 3713 target_ulong sc_pr; 3714 target_ulong sc_sr; 3715 target_ulong sc_gbr; 3716 target_ulong sc_mach; 3717 target_ulong sc_macl; 3718 3719 /* FPU registers */ 3720 target_ulong sc_fpregs[16]; 3721 target_ulong sc_xfpregs[16]; 3722 unsigned int sc_fpscr; 3723 unsigned int sc_fpul; 3724 unsigned int sc_ownedfp; 3725 }; 3726 3727 struct target_sigframe 3728 { 3729 struct target_sigcontext sc; 3730 target_ulong extramask[TARGET_NSIG_WORDS-1]; 3731 uint16_t retcode[3]; 3732 }; 3733 3734 3735 struct target_ucontext { 3736 target_ulong tuc_flags; 3737 struct target_ucontext *tuc_link; 3738 target_stack_t tuc_stack; 3739 struct target_sigcontext tuc_mcontext; 3740 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 3741 }; 3742 3743 struct target_rt_sigframe 3744 { 3745 struct target_siginfo info; 3746 struct target_ucontext uc; 3747 uint16_t retcode[3]; 3748 }; 3749 3750 3751 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ 3752 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */ 3753 3754 static abi_ulong get_sigframe(struct target_sigaction *ka, 3755 unsigned long sp, size_t frame_size) 3756 { 3757 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) { 3758 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 3759 } 3760 3761 return (sp - frame_size) & -8ul; 3762 } 3763 3764 /* Notice when we're in the middle of a gUSA region and reset. 3765 Note that this will only occur for !parallel_cpus, as we will 3766 translate such sequences differently in a parallel context. */ 3767 static void unwind_gusa(CPUSH4State *regs) 3768 { 3769 /* If the stack pointer is sufficiently negative, and we haven't 3770 completed the sequence, then reset to the entry to the region. */ 3771 /* ??? The SH4 kernel checks for and address above 0xC0000000. 3772 However, the page mappings in qemu linux-user aren't as restricted 3773 and we wind up with the normal stack mapped above 0xF0000000. 3774 That said, there is no reason why the kernel should be allowing 3775 a gUSA region that spans 1GB. Use a tighter check here, for what 3776 can actually be enabled by the immediate move. */ 3777 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) { 3778 /* Reset the PC to before the gUSA region, as computed from 3779 R0 = region end, SP = -(region size), plus one more for the 3780 insn that actually initializes SP to the region size. */ 3781 regs->pc = regs->gregs[0] + regs->gregs[15] - 2; 3782 3783 /* Reset the SP to the saved version in R1. */ 3784 regs->gregs[15] = regs->gregs[1]; 3785 } 3786 } 3787 3788 static void setup_sigcontext(struct target_sigcontext *sc, 3789 CPUSH4State *regs, unsigned long mask) 3790 { 3791 int i; 3792 3793 #define COPY(x) __put_user(regs->x, &sc->sc_##x) 3794 COPY(gregs[0]); COPY(gregs[1]); 3795 COPY(gregs[2]); COPY(gregs[3]); 3796 COPY(gregs[4]); COPY(gregs[5]); 3797 COPY(gregs[6]); COPY(gregs[7]); 3798 COPY(gregs[8]); COPY(gregs[9]); 3799 COPY(gregs[10]); COPY(gregs[11]); 3800 COPY(gregs[12]); COPY(gregs[13]); 3801 COPY(gregs[14]); COPY(gregs[15]); 3802 COPY(gbr); COPY(mach); 3803 COPY(macl); COPY(pr); 3804 COPY(sr); COPY(pc); 3805 #undef COPY 3806 3807 for (i=0; i<16; i++) { 3808 __put_user(regs->fregs[i], &sc->sc_fpregs[i]); 3809 } 3810 __put_user(regs->fpscr, &sc->sc_fpscr); 3811 __put_user(regs->fpul, &sc->sc_fpul); 3812 3813 /* non-iBCS2 extensions.. */ 3814 __put_user(mask, &sc->oldmask); 3815 } 3816 3817 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc) 3818 { 3819 int i; 3820 3821 #define COPY(x) __get_user(regs->x, &sc->sc_##x) 3822 COPY(gregs[0]); COPY(gregs[1]); 3823 COPY(gregs[2]); COPY(gregs[3]); 3824 COPY(gregs[4]); COPY(gregs[5]); 3825 COPY(gregs[6]); COPY(gregs[7]); 3826 COPY(gregs[8]); COPY(gregs[9]); 3827 COPY(gregs[10]); COPY(gregs[11]); 3828 COPY(gregs[12]); COPY(gregs[13]); 3829 COPY(gregs[14]); COPY(gregs[15]); 3830 COPY(gbr); COPY(mach); 3831 COPY(macl); COPY(pr); 3832 COPY(sr); COPY(pc); 3833 #undef COPY 3834 3835 for (i=0; i<16; i++) { 3836 __get_user(regs->fregs[i], &sc->sc_fpregs[i]); 3837 } 3838 __get_user(regs->fpscr, &sc->sc_fpscr); 3839 __get_user(regs->fpul, &sc->sc_fpul); 3840 3841 regs->tra = -1; /* disable syscall checks */ 3842 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); 3843 } 3844 3845 static void setup_frame(int sig, struct target_sigaction *ka, 3846 target_sigset_t *set, CPUSH4State *regs) 3847 { 3848 struct target_sigframe *frame; 3849 abi_ulong frame_addr; 3850 int i; 3851 3852 unwind_gusa(regs); 3853 3854 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3855 trace_user_setup_frame(regs, frame_addr); 3856 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3857 goto give_sigsegv; 3858 } 3859 3860 setup_sigcontext(&frame->sc, regs, set->sig[0]); 3861 3862 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) { 3863 __put_user(set->sig[i + 1], &frame->extramask[i]); 3864 } 3865 3866 /* Set up to return from userspace. If provided, use a stub 3867 already in userspace. */ 3868 if (ka->sa_flags & TARGET_SA_RESTORER) { 3869 regs->pr = (unsigned long) ka->sa_restorer; 3870 } else { 3871 /* Generate return code (system call to sigreturn) */ 3872 abi_ulong retcode_addr = frame_addr + 3873 offsetof(struct target_sigframe, retcode); 3874 __put_user(MOVW(2), &frame->retcode[0]); 3875 __put_user(TRAP_NOARG, &frame->retcode[1]); 3876 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); 3877 regs->pr = (unsigned long) retcode_addr; 3878 } 3879 3880 /* Set up registers for signal handler */ 3881 regs->gregs[15] = frame_addr; 3882 regs->gregs[4] = sig; /* Arg for signal handler */ 3883 regs->gregs[5] = 0; 3884 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc); 3885 regs->pc = (unsigned long) ka->_sa_handler; 3886 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); 3887 3888 unlock_user_struct(frame, frame_addr, 1); 3889 return; 3890 3891 give_sigsegv: 3892 unlock_user_struct(frame, frame_addr, 1); 3893 force_sigsegv(sig); 3894 } 3895 3896 static void setup_rt_frame(int sig, struct target_sigaction *ka, 3897 target_siginfo_t *info, 3898 target_sigset_t *set, CPUSH4State *regs) 3899 { 3900 struct target_rt_sigframe *frame; 3901 abi_ulong frame_addr; 3902 int i; 3903 3904 unwind_gusa(regs); 3905 3906 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); 3907 trace_user_setup_rt_frame(regs, frame_addr); 3908 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 3909 goto give_sigsegv; 3910 } 3911 3912 tswap_siginfo(&frame->info, info); 3913 3914 /* Create the ucontext. */ 3915 __put_user(0, &frame->uc.tuc_flags); 3916 __put_user(0, (unsigned long *)&frame->uc.tuc_link); 3917 __put_user((unsigned long)target_sigaltstack_used.ss_sp, 3918 &frame->uc.tuc_stack.ss_sp); 3919 __put_user(sas_ss_flags(regs->gregs[15]), 3920 &frame->uc.tuc_stack.ss_flags); 3921 __put_user(target_sigaltstack_used.ss_size, 3922 &frame->uc.tuc_stack.ss_size); 3923 setup_sigcontext(&frame->uc.tuc_mcontext, 3924 regs, set->sig[0]); 3925 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 3926 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 3927 } 3928 3929 /* Set up to return from userspace. If provided, use a stub 3930 already in userspace. */ 3931 if (ka->sa_flags & TARGET_SA_RESTORER) { 3932 regs->pr = (unsigned long) ka->sa_restorer; 3933 } else { 3934 /* Generate return code (system call to sigreturn) */ 3935 abi_ulong retcode_addr = frame_addr + 3936 offsetof(struct target_rt_sigframe, retcode); 3937 __put_user(MOVW(2), &frame->retcode[0]); 3938 __put_user(TRAP_NOARG, &frame->retcode[1]); 3939 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); 3940 regs->pr = (unsigned long) retcode_addr; 3941 } 3942 3943 /* Set up registers for signal handler */ 3944 regs->gregs[15] = frame_addr; 3945 regs->gregs[4] = sig; /* Arg for signal handler */ 3946 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); 3947 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); 3948 regs->pc = (unsigned long) ka->_sa_handler; 3949 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK); 3950 3951 unlock_user_struct(frame, frame_addr, 1); 3952 return; 3953 3954 give_sigsegv: 3955 unlock_user_struct(frame, frame_addr, 1); 3956 force_sigsegv(sig); 3957 } 3958 3959 long do_sigreturn(CPUSH4State *regs) 3960 { 3961 struct target_sigframe *frame; 3962 abi_ulong frame_addr; 3963 sigset_t blocked; 3964 target_sigset_t target_set; 3965 int i; 3966 int err = 0; 3967 3968 frame_addr = regs->gregs[15]; 3969 trace_user_do_sigreturn(regs, frame_addr); 3970 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 3971 goto badframe; 3972 } 3973 3974 __get_user(target_set.sig[0], &frame->sc.oldmask); 3975 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 3976 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 3977 } 3978 3979 if (err) 3980 goto badframe; 3981 3982 target_to_host_sigset_internal(&blocked, &target_set); 3983 set_sigmask(&blocked); 3984 3985 restore_sigcontext(regs, &frame->sc); 3986 3987 unlock_user_struct(frame, frame_addr, 0); 3988 return -TARGET_QEMU_ESIGRETURN; 3989 3990 badframe: 3991 unlock_user_struct(frame, frame_addr, 0); 3992 force_sig(TARGET_SIGSEGV); 3993 return -TARGET_QEMU_ESIGRETURN; 3994 } 3995 3996 long do_rt_sigreturn(CPUSH4State *regs) 3997 { 3998 struct target_rt_sigframe *frame; 3999 abi_ulong frame_addr; 4000 sigset_t blocked; 4001 4002 frame_addr = regs->gregs[15]; 4003 trace_user_do_rt_sigreturn(regs, frame_addr); 4004 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4005 goto badframe; 4006 } 4007 4008 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); 4009 set_sigmask(&blocked); 4010 4011 restore_sigcontext(regs, &frame->uc.tuc_mcontext); 4012 4013 if (do_sigaltstack(frame_addr + 4014 offsetof(struct target_rt_sigframe, uc.tuc_stack), 4015 0, get_sp_from_cpustate(regs)) == -EFAULT) { 4016 goto badframe; 4017 } 4018 4019 unlock_user_struct(frame, frame_addr, 0); 4020 return -TARGET_QEMU_ESIGRETURN; 4021 4022 badframe: 4023 unlock_user_struct(frame, frame_addr, 0); 4024 force_sig(TARGET_SIGSEGV); 4025 return -TARGET_QEMU_ESIGRETURN; 4026 } 4027 #elif defined(TARGET_MICROBLAZE) 4028 4029 struct target_sigcontext { 4030 struct target_pt_regs regs; /* needs to be first */ 4031 uint32_t oldmask; 4032 }; 4033 4034 struct target_stack_t { 4035 abi_ulong ss_sp; 4036 int ss_flags; 4037 unsigned int ss_size; 4038 }; 4039 4040 struct target_ucontext { 4041 abi_ulong tuc_flags; 4042 abi_ulong tuc_link; 4043 struct target_stack_t tuc_stack; 4044 struct target_sigcontext tuc_mcontext; 4045 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1]; 4046 }; 4047 4048 /* Signal frames. */ 4049 struct target_signal_frame { 4050 struct target_ucontext uc; 4051 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 4052 uint32_t tramp[2]; 4053 }; 4054 4055 struct rt_signal_frame { 4056 siginfo_t info; 4057 ucontext_t uc; 4058 uint32_t tramp[2]; 4059 }; 4060 4061 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 4062 { 4063 __put_user(env->regs[0], &sc->regs.r0); 4064 __put_user(env->regs[1], &sc->regs.r1); 4065 __put_user(env->regs[2], &sc->regs.r2); 4066 __put_user(env->regs[3], &sc->regs.r3); 4067 __put_user(env->regs[4], &sc->regs.r4); 4068 __put_user(env->regs[5], &sc->regs.r5); 4069 __put_user(env->regs[6], &sc->regs.r6); 4070 __put_user(env->regs[7], &sc->regs.r7); 4071 __put_user(env->regs[8], &sc->regs.r8); 4072 __put_user(env->regs[9], &sc->regs.r9); 4073 __put_user(env->regs[10], &sc->regs.r10); 4074 __put_user(env->regs[11], &sc->regs.r11); 4075 __put_user(env->regs[12], &sc->regs.r12); 4076 __put_user(env->regs[13], &sc->regs.r13); 4077 __put_user(env->regs[14], &sc->regs.r14); 4078 __put_user(env->regs[15], &sc->regs.r15); 4079 __put_user(env->regs[16], &sc->regs.r16); 4080 __put_user(env->regs[17], &sc->regs.r17); 4081 __put_user(env->regs[18], &sc->regs.r18); 4082 __put_user(env->regs[19], &sc->regs.r19); 4083 __put_user(env->regs[20], &sc->regs.r20); 4084 __put_user(env->regs[21], &sc->regs.r21); 4085 __put_user(env->regs[22], &sc->regs.r22); 4086 __put_user(env->regs[23], &sc->regs.r23); 4087 __put_user(env->regs[24], &sc->regs.r24); 4088 __put_user(env->regs[25], &sc->regs.r25); 4089 __put_user(env->regs[26], &sc->regs.r26); 4090 __put_user(env->regs[27], &sc->regs.r27); 4091 __put_user(env->regs[28], &sc->regs.r28); 4092 __put_user(env->regs[29], &sc->regs.r29); 4093 __put_user(env->regs[30], &sc->regs.r30); 4094 __put_user(env->regs[31], &sc->regs.r31); 4095 __put_user(env->sregs[SR_PC], &sc->regs.pc); 4096 } 4097 4098 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) 4099 { 4100 __get_user(env->regs[0], &sc->regs.r0); 4101 __get_user(env->regs[1], &sc->regs.r1); 4102 __get_user(env->regs[2], &sc->regs.r2); 4103 __get_user(env->regs[3], &sc->regs.r3); 4104 __get_user(env->regs[4], &sc->regs.r4); 4105 __get_user(env->regs[5], &sc->regs.r5); 4106 __get_user(env->regs[6], &sc->regs.r6); 4107 __get_user(env->regs[7], &sc->regs.r7); 4108 __get_user(env->regs[8], &sc->regs.r8); 4109 __get_user(env->regs[9], &sc->regs.r9); 4110 __get_user(env->regs[10], &sc->regs.r10); 4111 __get_user(env->regs[11], &sc->regs.r11); 4112 __get_user(env->regs[12], &sc->regs.r12); 4113 __get_user(env->regs[13], &sc->regs.r13); 4114 __get_user(env->regs[14], &sc->regs.r14); 4115 __get_user(env->regs[15], &sc->regs.r15); 4116 __get_user(env->regs[16], &sc->regs.r16); 4117 __get_user(env->regs[17], &sc->regs.r17); 4118 __get_user(env->regs[18], &sc->regs.r18); 4119 __get_user(env->regs[19], &sc->regs.r19); 4120 __get_user(env->regs[20], &sc->regs.r20); 4121 __get_user(env->regs[21], &sc->regs.r21); 4122 __get_user(env->regs[22], &sc->regs.r22); 4123 __get_user(env->regs[23], &sc->regs.r23); 4124 __get_user(env->regs[24], &sc->regs.r24); 4125 __get_user(env->regs[25], &sc->regs.r25); 4126 __get_user(env->regs[26], &sc->regs.r26); 4127 __get_user(env->regs[27], &sc->regs.r27); 4128 __get_user(env->regs[28], &sc->regs.r28); 4129 __get_user(env->regs[29], &sc->regs.r29); 4130 __get_user(env->regs[30], &sc->regs.r30); 4131 __get_user(env->regs[31], &sc->regs.r31); 4132 __get_user(env->sregs[SR_PC], &sc->regs.pc); 4133 } 4134 4135 static abi_ulong get_sigframe(struct target_sigaction *ka, 4136 CPUMBState *env, int frame_size) 4137 { 4138 abi_ulong sp = env->regs[1]; 4139 4140 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) { 4141 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4142 } 4143 4144 return ((sp - frame_size) & -8UL); 4145 } 4146 4147 static void setup_frame(int sig, struct target_sigaction *ka, 4148 target_sigset_t *set, CPUMBState *env) 4149 { 4150 struct target_signal_frame *frame; 4151 abi_ulong frame_addr; 4152 int i; 4153 4154 frame_addr = get_sigframe(ka, env, sizeof *frame); 4155 trace_user_setup_frame(env, frame_addr); 4156 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 4157 goto badframe; 4158 4159 /* Save the mask. */ 4160 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); 4161 4162 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 4163 __put_user(set->sig[i], &frame->extramask[i - 1]); 4164 } 4165 4166 setup_sigcontext(&frame->uc.tuc_mcontext, env); 4167 4168 /* Set up to return from userspace. If provided, use a stub 4169 already in userspace. */ 4170 /* minus 8 is offset to cater for "rtsd r15,8" offset */ 4171 if (ka->sa_flags & TARGET_SA_RESTORER) { 4172 env->regs[15] = ((unsigned long)ka->sa_restorer)-8; 4173 } else { 4174 uint32_t t; 4175 /* Note, these encodings are _big endian_! */ 4176 /* addi r12, r0, __NR_sigreturn */ 4177 t = 0x31800000UL | TARGET_NR_sigreturn; 4178 __put_user(t, frame->tramp + 0); 4179 /* brki r14, 0x8 */ 4180 t = 0xb9cc0008UL; 4181 __put_user(t, frame->tramp + 1); 4182 4183 /* Return from sighandler will jump to the tramp. 4184 Negative 8 offset because return is rtsd r15, 8 */ 4185 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp) 4186 - 8; 4187 } 4188 4189 /* Set up registers for signal handler */ 4190 env->regs[1] = frame_addr; 4191 /* Signal handler args: */ 4192 env->regs[5] = sig; /* Arg 0: signum */ 4193 env->regs[6] = 0; 4194 /* arg 1: sigcontext */ 4195 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); 4196 4197 /* Offset of 4 to handle microblaze rtid r14, 0 */ 4198 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; 4199 4200 unlock_user_struct(frame, frame_addr, 1); 4201 return; 4202 badframe: 4203 force_sigsegv(sig); 4204 } 4205 4206 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4207 target_siginfo_t *info, 4208 target_sigset_t *set, CPUMBState *env) 4209 { 4210 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n"); 4211 } 4212 4213 long do_sigreturn(CPUMBState *env) 4214 { 4215 struct target_signal_frame *frame; 4216 abi_ulong frame_addr; 4217 target_sigset_t target_set; 4218 sigset_t set; 4219 int i; 4220 4221 frame_addr = env->regs[R_SP]; 4222 trace_user_do_sigreturn(env, frame_addr); 4223 /* Make sure the guest isn't playing games. */ 4224 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 4225 goto badframe; 4226 4227 /* Restore blocked signals */ 4228 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask); 4229 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 4230 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 4231 } 4232 target_to_host_sigset_internal(&set, &target_set); 4233 set_sigmask(&set); 4234 4235 restore_sigcontext(&frame->uc.tuc_mcontext, env); 4236 /* We got here through a sigreturn syscall, our path back is via an 4237 rtb insn so setup r14 for that. */ 4238 env->regs[14] = env->sregs[SR_PC]; 4239 4240 unlock_user_struct(frame, frame_addr, 0); 4241 return -TARGET_QEMU_ESIGRETURN; 4242 badframe: 4243 force_sig(TARGET_SIGSEGV); 4244 return -TARGET_QEMU_ESIGRETURN; 4245 } 4246 4247 long do_rt_sigreturn(CPUMBState *env) 4248 { 4249 trace_user_do_rt_sigreturn(env, 0); 4250 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n"); 4251 return -TARGET_ENOSYS; 4252 } 4253 4254 #elif defined(TARGET_CRIS) 4255 4256 struct target_sigcontext { 4257 struct target_pt_regs regs; /* needs to be first */ 4258 uint32_t oldmask; 4259 uint32_t usp; /* usp before stacking this gunk on it */ 4260 }; 4261 4262 /* Signal frames. */ 4263 struct target_signal_frame { 4264 struct target_sigcontext sc; 4265 uint32_t extramask[TARGET_NSIG_WORDS - 1]; 4266 uint16_t retcode[4]; /* Trampoline code. */ 4267 }; 4268 4269 struct rt_signal_frame { 4270 siginfo_t *pinfo; 4271 void *puc; 4272 siginfo_t info; 4273 ucontext_t uc; 4274 uint16_t retcode[4]; /* Trampoline code. */ 4275 }; 4276 4277 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 4278 { 4279 __put_user(env->regs[0], &sc->regs.r0); 4280 __put_user(env->regs[1], &sc->regs.r1); 4281 __put_user(env->regs[2], &sc->regs.r2); 4282 __put_user(env->regs[3], &sc->regs.r3); 4283 __put_user(env->regs[4], &sc->regs.r4); 4284 __put_user(env->regs[5], &sc->regs.r5); 4285 __put_user(env->regs[6], &sc->regs.r6); 4286 __put_user(env->regs[7], &sc->regs.r7); 4287 __put_user(env->regs[8], &sc->regs.r8); 4288 __put_user(env->regs[9], &sc->regs.r9); 4289 __put_user(env->regs[10], &sc->regs.r10); 4290 __put_user(env->regs[11], &sc->regs.r11); 4291 __put_user(env->regs[12], &sc->regs.r12); 4292 __put_user(env->regs[13], &sc->regs.r13); 4293 __put_user(env->regs[14], &sc->usp); 4294 __put_user(env->regs[15], &sc->regs.acr); 4295 __put_user(env->pregs[PR_MOF], &sc->regs.mof); 4296 __put_user(env->pregs[PR_SRP], &sc->regs.srp); 4297 __put_user(env->pc, &sc->regs.erp); 4298 } 4299 4300 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env) 4301 { 4302 __get_user(env->regs[0], &sc->regs.r0); 4303 __get_user(env->regs[1], &sc->regs.r1); 4304 __get_user(env->regs[2], &sc->regs.r2); 4305 __get_user(env->regs[3], &sc->regs.r3); 4306 __get_user(env->regs[4], &sc->regs.r4); 4307 __get_user(env->regs[5], &sc->regs.r5); 4308 __get_user(env->regs[6], &sc->regs.r6); 4309 __get_user(env->regs[7], &sc->regs.r7); 4310 __get_user(env->regs[8], &sc->regs.r8); 4311 __get_user(env->regs[9], &sc->regs.r9); 4312 __get_user(env->regs[10], &sc->regs.r10); 4313 __get_user(env->regs[11], &sc->regs.r11); 4314 __get_user(env->regs[12], &sc->regs.r12); 4315 __get_user(env->regs[13], &sc->regs.r13); 4316 __get_user(env->regs[14], &sc->usp); 4317 __get_user(env->regs[15], &sc->regs.acr); 4318 __get_user(env->pregs[PR_MOF], &sc->regs.mof); 4319 __get_user(env->pregs[PR_SRP], &sc->regs.srp); 4320 __get_user(env->pc, &sc->regs.erp); 4321 } 4322 4323 static abi_ulong get_sigframe(CPUCRISState *env, int framesize) 4324 { 4325 abi_ulong sp; 4326 /* Align the stack downwards to 4. */ 4327 sp = (env->regs[R_SP] & ~3); 4328 return sp - framesize; 4329 } 4330 4331 static void setup_frame(int sig, struct target_sigaction *ka, 4332 target_sigset_t *set, CPUCRISState *env) 4333 { 4334 struct target_signal_frame *frame; 4335 abi_ulong frame_addr; 4336 int i; 4337 4338 frame_addr = get_sigframe(env, sizeof *frame); 4339 trace_user_setup_frame(env, frame_addr); 4340 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) 4341 goto badframe; 4342 4343 /* 4344 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't 4345 * use this trampoline anymore but it sets it up for GDB. 4346 * In QEMU, using the trampoline simplifies things a bit so we use it. 4347 * 4348 * This is movu.w __NR_sigreturn, r9; break 13; 4349 */ 4350 __put_user(0x9c5f, frame->retcode+0); 4351 __put_user(TARGET_NR_sigreturn, 4352 frame->retcode + 1); 4353 __put_user(0xe93d, frame->retcode + 2); 4354 4355 /* Save the mask. */ 4356 __put_user(set->sig[0], &frame->sc.oldmask); 4357 4358 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 4359 __put_user(set->sig[i], &frame->extramask[i - 1]); 4360 } 4361 4362 setup_sigcontext(&frame->sc, env); 4363 4364 /* Move the stack and setup the arguments for the handler. */ 4365 env->regs[R_SP] = frame_addr; 4366 env->regs[10] = sig; 4367 env->pc = (unsigned long) ka->_sa_handler; 4368 /* Link SRP so the guest returns through the trampoline. */ 4369 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); 4370 4371 unlock_user_struct(frame, frame_addr, 1); 4372 return; 4373 badframe: 4374 force_sigsegv(sig); 4375 } 4376 4377 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4378 target_siginfo_t *info, 4379 target_sigset_t *set, CPUCRISState *env) 4380 { 4381 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n"); 4382 } 4383 4384 long do_sigreturn(CPUCRISState *env) 4385 { 4386 struct target_signal_frame *frame; 4387 abi_ulong frame_addr; 4388 target_sigset_t target_set; 4389 sigset_t set; 4390 int i; 4391 4392 frame_addr = env->regs[R_SP]; 4393 trace_user_do_sigreturn(env, frame_addr); 4394 /* Make sure the guest isn't playing games. */ 4395 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) { 4396 goto badframe; 4397 } 4398 4399 /* Restore blocked signals */ 4400 __get_user(target_set.sig[0], &frame->sc.oldmask); 4401 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 4402 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 4403 } 4404 target_to_host_sigset_internal(&set, &target_set); 4405 set_sigmask(&set); 4406 4407 restore_sigcontext(&frame->sc, env); 4408 unlock_user_struct(frame, frame_addr, 0); 4409 return -TARGET_QEMU_ESIGRETURN; 4410 badframe: 4411 force_sig(TARGET_SIGSEGV); 4412 return -TARGET_QEMU_ESIGRETURN; 4413 } 4414 4415 long do_rt_sigreturn(CPUCRISState *env) 4416 { 4417 trace_user_do_rt_sigreturn(env, 0); 4418 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n"); 4419 return -TARGET_ENOSYS; 4420 } 4421 4422 #elif defined(TARGET_NIOS2) 4423 4424 #define MCONTEXT_VERSION 2 4425 4426 struct target_sigcontext { 4427 int version; 4428 unsigned long gregs[32]; 4429 }; 4430 4431 struct target_ucontext { 4432 abi_ulong tuc_flags; 4433 abi_ulong tuc_link; 4434 target_stack_t tuc_stack; 4435 struct target_sigcontext tuc_mcontext; 4436 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4437 }; 4438 4439 struct target_rt_sigframe { 4440 struct target_siginfo info; 4441 struct target_ucontext uc; 4442 }; 4443 4444 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka) 4445 { 4446 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) { 4447 #ifdef CONFIG_STACK_GROWSUP 4448 return target_sigaltstack_used.ss_sp; 4449 #else 4450 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4451 #endif 4452 } 4453 return sp; 4454 } 4455 4456 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env) 4457 { 4458 unsigned long *gregs = uc->tuc_mcontext.gregs; 4459 4460 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version); 4461 __put_user(env->regs[1], &gregs[0]); 4462 __put_user(env->regs[2], &gregs[1]); 4463 __put_user(env->regs[3], &gregs[2]); 4464 __put_user(env->regs[4], &gregs[3]); 4465 __put_user(env->regs[5], &gregs[4]); 4466 __put_user(env->regs[6], &gregs[5]); 4467 __put_user(env->regs[7], &gregs[6]); 4468 __put_user(env->regs[8], &gregs[7]); 4469 __put_user(env->regs[9], &gregs[8]); 4470 __put_user(env->regs[10], &gregs[9]); 4471 __put_user(env->regs[11], &gregs[10]); 4472 __put_user(env->regs[12], &gregs[11]); 4473 __put_user(env->regs[13], &gregs[12]); 4474 __put_user(env->regs[14], &gregs[13]); 4475 __put_user(env->regs[15], &gregs[14]); 4476 __put_user(env->regs[16], &gregs[15]); 4477 __put_user(env->regs[17], &gregs[16]); 4478 __put_user(env->regs[18], &gregs[17]); 4479 __put_user(env->regs[19], &gregs[18]); 4480 __put_user(env->regs[20], &gregs[19]); 4481 __put_user(env->regs[21], &gregs[20]); 4482 __put_user(env->regs[22], &gregs[21]); 4483 __put_user(env->regs[23], &gregs[22]); 4484 __put_user(env->regs[R_RA], &gregs[23]); 4485 __put_user(env->regs[R_FP], &gregs[24]); 4486 __put_user(env->regs[R_GP], &gregs[25]); 4487 __put_user(env->regs[R_EA], &gregs[27]); 4488 __put_user(env->regs[R_SP], &gregs[28]); 4489 4490 return 0; 4491 } 4492 4493 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc, 4494 int *pr2) 4495 { 4496 int temp; 4497 abi_ulong off, frame_addr = env->regs[R_SP]; 4498 unsigned long *gregs = uc->tuc_mcontext.gregs; 4499 int err; 4500 4501 /* Always make any pending restarted system calls return -EINTR */ 4502 /* current->restart_block.fn = do_no_restart_syscall; */ 4503 4504 __get_user(temp, &uc->tuc_mcontext.version); 4505 if (temp != MCONTEXT_VERSION) { 4506 return 1; 4507 } 4508 4509 /* restore passed registers */ 4510 __get_user(env->regs[1], &gregs[0]); 4511 __get_user(env->regs[2], &gregs[1]); 4512 __get_user(env->regs[3], &gregs[2]); 4513 __get_user(env->regs[4], &gregs[3]); 4514 __get_user(env->regs[5], &gregs[4]); 4515 __get_user(env->regs[6], &gregs[5]); 4516 __get_user(env->regs[7], &gregs[6]); 4517 __get_user(env->regs[8], &gregs[7]); 4518 __get_user(env->regs[9], &gregs[8]); 4519 __get_user(env->regs[10], &gregs[9]); 4520 __get_user(env->regs[11], &gregs[10]); 4521 __get_user(env->regs[12], &gregs[11]); 4522 __get_user(env->regs[13], &gregs[12]); 4523 __get_user(env->regs[14], &gregs[13]); 4524 __get_user(env->regs[15], &gregs[14]); 4525 __get_user(env->regs[16], &gregs[15]); 4526 __get_user(env->regs[17], &gregs[16]); 4527 __get_user(env->regs[18], &gregs[17]); 4528 __get_user(env->regs[19], &gregs[18]); 4529 __get_user(env->regs[20], &gregs[19]); 4530 __get_user(env->regs[21], &gregs[20]); 4531 __get_user(env->regs[22], &gregs[21]); 4532 __get_user(env->regs[23], &gregs[22]); 4533 /* gregs[23] is handled below */ 4534 /* Verify, should this be settable */ 4535 __get_user(env->regs[R_FP], &gregs[24]); 4536 /* Verify, should this be settable */ 4537 __get_user(env->regs[R_GP], &gregs[25]); 4538 /* Not really necessary no user settable bits */ 4539 __get_user(temp, &gregs[26]); 4540 __get_user(env->regs[R_EA], &gregs[27]); 4541 4542 __get_user(env->regs[R_RA], &gregs[23]); 4543 __get_user(env->regs[R_SP], &gregs[28]); 4544 4545 off = offsetof(struct target_rt_sigframe, uc.tuc_stack); 4546 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env)); 4547 if (err == -EFAULT) { 4548 return 1; 4549 } 4550 4551 *pr2 = env->regs[2]; 4552 return 0; 4553 } 4554 4555 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env, 4556 size_t frame_size) 4557 { 4558 unsigned long usp; 4559 4560 /* Default to using normal stack. */ 4561 usp = env->regs[R_SP]; 4562 4563 /* This is the X/Open sanctioned signal stack switching. */ 4564 usp = sigsp(usp, ka); 4565 4566 /* Verify, is it 32 or 64 bit aligned */ 4567 return (void *)((usp - frame_size) & -8UL); 4568 } 4569 4570 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4571 target_siginfo_t *info, 4572 target_sigset_t *set, 4573 CPUNios2State *env) 4574 { 4575 struct target_rt_sigframe *frame; 4576 int i, err = 0; 4577 4578 frame = get_sigframe(ka, env, sizeof(*frame)); 4579 4580 if (ka->sa_flags & SA_SIGINFO) { 4581 tswap_siginfo(&frame->info, info); 4582 } 4583 4584 /* Create the ucontext. */ 4585 __put_user(0, &frame->uc.tuc_flags); 4586 __put_user(0, &frame->uc.tuc_link); 4587 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 4588 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags); 4589 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 4590 err |= rt_setup_ucontext(&frame->uc, env); 4591 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 4592 __put_user((abi_ulong)set->sig[i], 4593 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 4594 } 4595 4596 if (err) { 4597 goto give_sigsegv; 4598 } 4599 4600 /* Set up to return from userspace; jump to fixed address sigreturn 4601 trampoline on kuser page. */ 4602 env->regs[R_RA] = (unsigned long) (0x1044); 4603 4604 /* Set up registers for signal handler */ 4605 env->regs[R_SP] = (unsigned long) frame; 4606 env->regs[4] = (unsigned long) sig; 4607 env->regs[5] = (unsigned long) &frame->info; 4608 env->regs[6] = (unsigned long) &frame->uc; 4609 env->regs[R_EA] = (unsigned long) ka->_sa_handler; 4610 return; 4611 4612 give_sigsegv: 4613 if (sig == TARGET_SIGSEGV) { 4614 ka->_sa_handler = TARGET_SIG_DFL; 4615 } 4616 force_sigsegv(sig); 4617 return; 4618 } 4619 4620 long do_sigreturn(CPUNios2State *env) 4621 { 4622 trace_user_do_sigreturn(env, 0); 4623 fprintf(stderr, "do_sigreturn: not implemented\n"); 4624 return -TARGET_ENOSYS; 4625 } 4626 4627 long do_rt_sigreturn(CPUNios2State *env) 4628 { 4629 /* Verify, can we follow the stack back */ 4630 abi_ulong frame_addr = env->regs[R_SP]; 4631 struct target_rt_sigframe *frame; 4632 sigset_t set; 4633 int rval; 4634 4635 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 4636 goto badframe; 4637 } 4638 4639 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 4640 do_sigprocmask(SIG_SETMASK, &set, NULL); 4641 4642 if (rt_restore_ucontext(env, &frame->uc, &rval)) { 4643 goto badframe; 4644 } 4645 4646 unlock_user_struct(frame, frame_addr, 0); 4647 return rval; 4648 4649 badframe: 4650 unlock_user_struct(frame, frame_addr, 0); 4651 force_sig(TARGET_SIGSEGV); 4652 return 0; 4653 } 4654 /* TARGET_NIOS2 */ 4655 4656 #elif defined(TARGET_OPENRISC) 4657 4658 struct target_sigcontext { 4659 struct target_pt_regs regs; 4660 abi_ulong oldmask; 4661 abi_ulong usp; 4662 }; 4663 4664 struct target_ucontext { 4665 abi_ulong tuc_flags; 4666 abi_ulong tuc_link; 4667 target_stack_t tuc_stack; 4668 struct target_sigcontext tuc_mcontext; 4669 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4670 }; 4671 4672 struct target_rt_sigframe { 4673 abi_ulong pinfo; 4674 uint64_t puc; 4675 struct target_siginfo info; 4676 struct target_sigcontext sc; 4677 struct target_ucontext uc; 4678 unsigned char retcode[16]; /* trampoline code */ 4679 }; 4680 4681 /* This is the asm-generic/ucontext.h version */ 4682 #if 0 4683 static int restore_sigcontext(CPUOpenRISCState *regs, 4684 struct target_sigcontext *sc) 4685 { 4686 unsigned int err = 0; 4687 unsigned long old_usp; 4688 4689 /* Alwys make any pending restarted system call return -EINTR */ 4690 current_thread_info()->restart_block.fn = do_no_restart_syscall; 4691 4692 /* restore the regs from &sc->regs (same as sc, since regs is first) 4693 * (sc is already checked for VERIFY_READ since the sigframe was 4694 * checked in sys_sigreturn previously) 4695 */ 4696 4697 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) { 4698 goto badframe; 4699 } 4700 4701 /* make sure the U-flag is set so user-mode cannot fool us */ 4702 4703 regs->sr &= ~SR_SM; 4704 4705 /* restore the old USP as it was before we stacked the sc etc. 4706 * (we cannot just pop the sigcontext since we aligned the sp and 4707 * stuff after pushing it) 4708 */ 4709 4710 __get_user(old_usp, &sc->usp); 4711 phx_signal("old_usp 0x%lx", old_usp); 4712 4713 __PHX__ REALLY /* ??? */ 4714 wrusp(old_usp); 4715 regs->gpr[1] = old_usp; 4716 4717 /* TODO: the other ports use regs->orig_XX to disable syscall checks 4718 * after this completes, but we don't use that mechanism. maybe we can 4719 * use it now ? 4720 */ 4721 4722 return err; 4723 4724 badframe: 4725 return 1; 4726 } 4727 #endif 4728 4729 /* Set up a signal frame. */ 4730 4731 static void setup_sigcontext(struct target_sigcontext *sc, 4732 CPUOpenRISCState *regs, 4733 unsigned long mask) 4734 { 4735 unsigned long usp = cpu_get_gpr(regs, 1); 4736 4737 /* copy the regs. they are first in sc so we can use sc directly */ 4738 4739 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/ 4740 4741 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of 4742 the signal handler. The frametype will be restored to its previous 4743 value in restore_sigcontext. */ 4744 /*regs->frametype = CRIS_FRAME_NORMAL;*/ 4745 4746 /* then some other stuff */ 4747 __put_user(mask, &sc->oldmask); 4748 __put_user(usp, &sc->usp); 4749 } 4750 4751 static inline unsigned long align_sigframe(unsigned long sp) 4752 { 4753 return sp & ~3UL; 4754 } 4755 4756 static inline abi_ulong get_sigframe(struct target_sigaction *ka, 4757 CPUOpenRISCState *regs, 4758 size_t frame_size) 4759 { 4760 unsigned long sp = cpu_get_gpr(regs, 1); 4761 int onsigstack = on_sig_stack(sp); 4762 4763 /* redzone */ 4764 /* This is the X/Open sanctioned signal stack switching. */ 4765 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 4766 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 4767 } 4768 4769 sp = align_sigframe(sp - frame_size); 4770 4771 /* 4772 * If we are on the alternate signal stack and would overflow it, don't. 4773 * Return an always-bogus address instead so we will die with SIGSEGV. 4774 */ 4775 4776 if (onsigstack && !likely(on_sig_stack(sp))) { 4777 return -1L; 4778 } 4779 4780 return sp; 4781 } 4782 4783 static void setup_rt_frame(int sig, struct target_sigaction *ka, 4784 target_siginfo_t *info, 4785 target_sigset_t *set, CPUOpenRISCState *env) 4786 { 4787 int err = 0; 4788 abi_ulong frame_addr; 4789 unsigned long return_ip; 4790 struct target_rt_sigframe *frame; 4791 abi_ulong info_addr, uc_addr; 4792 4793 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4794 trace_user_setup_rt_frame(env, frame_addr); 4795 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4796 goto give_sigsegv; 4797 } 4798 4799 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 4800 __put_user(info_addr, &frame->pinfo); 4801 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 4802 __put_user(uc_addr, &frame->puc); 4803 4804 if (ka->sa_flags & SA_SIGINFO) { 4805 tswap_siginfo(&frame->info, info); 4806 } 4807 4808 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/ 4809 __put_user(0, &frame->uc.tuc_flags); 4810 __put_user(0, &frame->uc.tuc_link); 4811 __put_user(target_sigaltstack_used.ss_sp, 4812 &frame->uc.tuc_stack.ss_sp); 4813 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)), 4814 &frame->uc.tuc_stack.ss_flags); 4815 __put_user(target_sigaltstack_used.ss_size, 4816 &frame->uc.tuc_stack.ss_size); 4817 setup_sigcontext(&frame->sc, env, set->sig[0]); 4818 4819 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ 4820 4821 /* trampoline - the desired return ip is the retcode itself */ 4822 return_ip = (unsigned long)&frame->retcode; 4823 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ 4824 __put_user(0xa960, (short *)(frame->retcode + 0)); 4825 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); 4826 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); 4827 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); 4828 4829 if (err) { 4830 goto give_sigsegv; 4831 } 4832 4833 /* TODO what is the current->exec_domain stuff and invmap ? */ 4834 4835 /* Set up registers for signal handler */ 4836 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ 4837 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */ 4838 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */ 4839 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */ 4840 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */ 4841 4842 /* actually move the usp to reflect the stacked frame */ 4843 cpu_set_gpr(env, 1, (unsigned long)frame); 4844 4845 return; 4846 4847 give_sigsegv: 4848 unlock_user_struct(frame, frame_addr, 1); 4849 force_sigsegv(sig); 4850 } 4851 4852 long do_sigreturn(CPUOpenRISCState *env) 4853 { 4854 trace_user_do_sigreturn(env, 0); 4855 fprintf(stderr, "do_sigreturn: not implemented\n"); 4856 return -TARGET_ENOSYS; 4857 } 4858 4859 long do_rt_sigreturn(CPUOpenRISCState *env) 4860 { 4861 trace_user_do_rt_sigreturn(env, 0); 4862 fprintf(stderr, "do_rt_sigreturn: not implemented\n"); 4863 return -TARGET_ENOSYS; 4864 } 4865 /* TARGET_OPENRISC */ 4866 4867 #elif defined(TARGET_S390X) 4868 4869 #define __NUM_GPRS 16 4870 #define __NUM_FPRS 16 4871 #define __NUM_ACRS 16 4872 4873 #define S390_SYSCALL_SIZE 2 4874 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */ 4875 4876 #define _SIGCONTEXT_NSIG 64 4877 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */ 4878 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 4879 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) 4880 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */ 4881 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00) 4882 4883 typedef struct { 4884 target_psw_t psw; 4885 target_ulong gprs[__NUM_GPRS]; 4886 unsigned int acrs[__NUM_ACRS]; 4887 } target_s390_regs_common; 4888 4889 typedef struct { 4890 unsigned int fpc; 4891 double fprs[__NUM_FPRS]; 4892 } target_s390_fp_regs; 4893 4894 typedef struct { 4895 target_s390_regs_common regs; 4896 target_s390_fp_regs fpregs; 4897 } target_sigregs; 4898 4899 struct target_sigcontext { 4900 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS]; 4901 target_sigregs *sregs; 4902 }; 4903 4904 typedef struct { 4905 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4906 struct target_sigcontext sc; 4907 target_sigregs sregs; 4908 int signo; 4909 uint8_t retcode[S390_SYSCALL_SIZE]; 4910 } sigframe; 4911 4912 struct target_ucontext { 4913 target_ulong tuc_flags; 4914 struct target_ucontext *tuc_link; 4915 target_stack_t tuc_stack; 4916 target_sigregs tuc_mcontext; 4917 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 4918 }; 4919 4920 typedef struct { 4921 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; 4922 uint8_t retcode[S390_SYSCALL_SIZE]; 4923 struct target_siginfo info; 4924 struct target_ucontext uc; 4925 } rt_sigframe; 4926 4927 static inline abi_ulong 4928 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size) 4929 { 4930 abi_ulong sp; 4931 4932 /* Default to using normal stack */ 4933 sp = env->regs[15]; 4934 4935 /* This is the X/Open sanctioned signal stack switching. */ 4936 if (ka->sa_flags & TARGET_SA_ONSTACK) { 4937 if (!sas_ss_flags(sp)) { 4938 sp = target_sigaltstack_used.ss_sp + 4939 target_sigaltstack_used.ss_size; 4940 } 4941 } 4942 4943 /* This is the legacy signal stack switching. */ 4944 else if (/* FIXME !user_mode(regs) */ 0 && 4945 !(ka->sa_flags & TARGET_SA_RESTORER) && 4946 ka->sa_restorer) { 4947 sp = (abi_ulong) ka->sa_restorer; 4948 } 4949 4950 return (sp - frame_size) & -8ul; 4951 } 4952 4953 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs) 4954 { 4955 int i; 4956 //save_access_regs(current->thread.acrs); FIXME 4957 4958 /* Copy a 'clean' PSW mask to the user to avoid leaking 4959 information about whether PER is currently on. */ 4960 __put_user(env->psw.mask, &sregs->regs.psw.mask); 4961 __put_user(env->psw.addr, &sregs->regs.psw.addr); 4962 for (i = 0; i < 16; i++) { 4963 __put_user(env->regs[i], &sregs->regs.gprs[i]); 4964 } 4965 for (i = 0; i < 16; i++) { 4966 __put_user(env->aregs[i], &sregs->regs.acrs[i]); 4967 } 4968 /* 4969 * We have to store the fp registers to current->thread.fp_regs 4970 * to merge them with the emulated registers. 4971 */ 4972 //save_fp_regs(¤t->thread.fp_regs); FIXME 4973 for (i = 0; i < 16; i++) { 4974 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]); 4975 } 4976 } 4977 4978 static void setup_frame(int sig, struct target_sigaction *ka, 4979 target_sigset_t *set, CPUS390XState *env) 4980 { 4981 sigframe *frame; 4982 abi_ulong frame_addr; 4983 4984 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 4985 trace_user_setup_frame(env, frame_addr); 4986 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 4987 goto give_sigsegv; 4988 } 4989 4990 __put_user(set->sig[0], &frame->sc.oldmask[0]); 4991 4992 save_sigregs(env, &frame->sregs); 4993 4994 __put_user((abi_ulong)(unsigned long)&frame->sregs, 4995 (abi_ulong *)&frame->sc.sregs); 4996 4997 /* Set up to return from userspace. If provided, use a stub 4998 already in userspace. */ 4999 if (ka->sa_flags & TARGET_SA_RESTORER) { 5000 env->regs[14] = (unsigned long) 5001 ka->sa_restorer | PSW_ADDR_AMODE; 5002 } else { 5003 env->regs[14] = (frame_addr + offsetof(sigframe, retcode)) 5004 | PSW_ADDR_AMODE; 5005 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, 5006 (uint16_t *)(frame->retcode)); 5007 } 5008 5009 /* Set up backchain. */ 5010 __put_user(env->regs[15], (abi_ulong *) frame); 5011 5012 /* Set up registers for signal handler */ 5013 env->regs[15] = frame_addr; 5014 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 5015 5016 env->regs[2] = sig; //map_signal(sig); 5017 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc); 5018 5019 /* We forgot to include these in the sigcontext. 5020 To avoid breaking binary compatibility, they are passed as args. */ 5021 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no; 5022 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr; 5023 5024 /* Place signal number on stack to allow backtrace from handler. */ 5025 __put_user(env->regs[2], &frame->signo); 5026 unlock_user_struct(frame, frame_addr, 1); 5027 return; 5028 5029 give_sigsegv: 5030 force_sigsegv(sig); 5031 } 5032 5033 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5034 target_siginfo_t *info, 5035 target_sigset_t *set, CPUS390XState *env) 5036 { 5037 int i; 5038 rt_sigframe *frame; 5039 abi_ulong frame_addr; 5040 5041 frame_addr = get_sigframe(ka, env, sizeof *frame); 5042 trace_user_setup_rt_frame(env, frame_addr); 5043 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5044 goto give_sigsegv; 5045 } 5046 5047 tswap_siginfo(&frame->info, info); 5048 5049 /* Create the ucontext. */ 5050 __put_user(0, &frame->uc.tuc_flags); 5051 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); 5052 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 5053 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 5054 &frame->uc.tuc_stack.ss_flags); 5055 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 5056 save_sigregs(env, &frame->uc.tuc_mcontext); 5057 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 5058 __put_user((abi_ulong)set->sig[i], 5059 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); 5060 } 5061 5062 /* Set up to return from userspace. If provided, use a stub 5063 already in userspace. */ 5064 if (ka->sa_flags & TARGET_SA_RESTORER) { 5065 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; 5066 } else { 5067 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; 5068 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, 5069 (uint16_t *)(frame->retcode)); 5070 } 5071 5072 /* Set up backchain. */ 5073 __put_user(env->regs[15], (abi_ulong *) frame); 5074 5075 /* Set up registers for signal handler */ 5076 env->regs[15] = frame_addr; 5077 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; 5078 5079 env->regs[2] = sig; //map_signal(sig); 5080 env->regs[3] = frame_addr + offsetof(typeof(*frame), info); 5081 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); 5082 return; 5083 5084 give_sigsegv: 5085 force_sigsegv(sig); 5086 } 5087 5088 static int 5089 restore_sigregs(CPUS390XState *env, target_sigregs *sc) 5090 { 5091 int err = 0; 5092 int i; 5093 5094 for (i = 0; i < 16; i++) { 5095 __get_user(env->regs[i], &sc->regs.gprs[i]); 5096 } 5097 5098 __get_user(env->psw.mask, &sc->regs.psw.mask); 5099 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr, 5100 (unsigned long long)env->psw.addr); 5101 __get_user(env->psw.addr, &sc->regs.psw.addr); 5102 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */ 5103 5104 for (i = 0; i < 16; i++) { 5105 __get_user(env->aregs[i], &sc->regs.acrs[i]); 5106 } 5107 for (i = 0; i < 16; i++) { 5108 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]); 5109 } 5110 5111 return err; 5112 } 5113 5114 long do_sigreturn(CPUS390XState *env) 5115 { 5116 sigframe *frame; 5117 abi_ulong frame_addr = env->regs[15]; 5118 target_sigset_t target_set; 5119 sigset_t set; 5120 5121 trace_user_do_sigreturn(env, frame_addr); 5122 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5123 goto badframe; 5124 } 5125 __get_user(target_set.sig[0], &frame->sc.oldmask[0]); 5126 5127 target_to_host_sigset_internal(&set, &target_set); 5128 set_sigmask(&set); /* ~_BLOCKABLE? */ 5129 5130 if (restore_sigregs(env, &frame->sregs)) { 5131 goto badframe; 5132 } 5133 5134 unlock_user_struct(frame, frame_addr, 0); 5135 return -TARGET_QEMU_ESIGRETURN; 5136 5137 badframe: 5138 force_sig(TARGET_SIGSEGV); 5139 return -TARGET_QEMU_ESIGRETURN; 5140 } 5141 5142 long do_rt_sigreturn(CPUS390XState *env) 5143 { 5144 rt_sigframe *frame; 5145 abi_ulong frame_addr = env->regs[15]; 5146 sigset_t set; 5147 5148 trace_user_do_rt_sigreturn(env, frame_addr); 5149 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 5150 goto badframe; 5151 } 5152 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 5153 5154 set_sigmask(&set); /* ~_BLOCKABLE? */ 5155 5156 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { 5157 goto badframe; 5158 } 5159 5160 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, 5161 get_sp_from_cpustate(env)) == -EFAULT) { 5162 goto badframe; 5163 } 5164 unlock_user_struct(frame, frame_addr, 0); 5165 return -TARGET_QEMU_ESIGRETURN; 5166 5167 badframe: 5168 unlock_user_struct(frame, frame_addr, 0); 5169 force_sig(TARGET_SIGSEGV); 5170 return -TARGET_QEMU_ESIGRETURN; 5171 } 5172 5173 #elif defined(TARGET_PPC) 5174 5175 /* Size of dummy stack frame allocated when calling signal handler. 5176 See arch/powerpc/include/asm/ptrace.h. */ 5177 #if defined(TARGET_PPC64) 5178 #define SIGNAL_FRAMESIZE 128 5179 #else 5180 #define SIGNAL_FRAMESIZE 64 5181 #endif 5182 5183 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC; 5184 on 64-bit PPC, sigcontext and mcontext are one and the same. */ 5185 struct target_mcontext { 5186 target_ulong mc_gregs[48]; 5187 /* Includes fpscr. */ 5188 uint64_t mc_fregs[33]; 5189 #if defined(TARGET_PPC64) 5190 /* Pointer to the vector regs */ 5191 target_ulong v_regs; 5192 #else 5193 target_ulong mc_pad[2]; 5194 #endif 5195 /* We need to handle Altivec and SPE at the same time, which no 5196 kernel needs to do. Fortunately, the kernel defines this bit to 5197 be Altivec-register-large all the time, rather than trying to 5198 twiddle it based on the specific platform. */ 5199 union { 5200 /* SPE vector registers. One extra for SPEFSCR. */ 5201 uint32_t spe[33]; 5202 /* Altivec vector registers. The packing of VSCR and VRSAVE 5203 varies depending on whether we're PPC64 or not: PPC64 splits 5204 them apart; PPC32 stuffs them together. 5205 We also need to account for the VSX registers on PPC64 5206 */ 5207 #if defined(TARGET_PPC64) 5208 #define QEMU_NVRREG (34 + 16) 5209 /* On ppc64, this mcontext structure is naturally *unaligned*, 5210 * or rather it is aligned on a 8 bytes boundary but not on 5211 * a 16 bytes one. This pad fixes it up. This is also why the 5212 * vector regs are referenced by the v_regs pointer above so 5213 * any amount of padding can be added here 5214 */ 5215 target_ulong pad; 5216 #else 5217 /* On ppc32, we are already aligned to 16 bytes */ 5218 #define QEMU_NVRREG 33 5219 #endif 5220 /* We cannot use ppc_avr_t here as we do *not* want the implied 5221 * 16-bytes alignment that would result from it. This would have 5222 * the effect of making the whole struct target_mcontext aligned 5223 * which breaks the layout of struct target_ucontext on ppc64. 5224 */ 5225 uint64_t altivec[QEMU_NVRREG][2]; 5226 #undef QEMU_NVRREG 5227 } mc_vregs; 5228 }; 5229 5230 /* See arch/powerpc/include/asm/sigcontext.h. */ 5231 struct target_sigcontext { 5232 target_ulong _unused[4]; 5233 int32_t signal; 5234 #if defined(TARGET_PPC64) 5235 int32_t pad0; 5236 #endif 5237 target_ulong handler; 5238 target_ulong oldmask; 5239 target_ulong regs; /* struct pt_regs __user * */ 5240 #if defined(TARGET_PPC64) 5241 struct target_mcontext mcontext; 5242 #endif 5243 }; 5244 5245 /* Indices for target_mcontext.mc_gregs, below. 5246 See arch/powerpc/include/asm/ptrace.h for details. */ 5247 enum { 5248 TARGET_PT_R0 = 0, 5249 TARGET_PT_R1 = 1, 5250 TARGET_PT_R2 = 2, 5251 TARGET_PT_R3 = 3, 5252 TARGET_PT_R4 = 4, 5253 TARGET_PT_R5 = 5, 5254 TARGET_PT_R6 = 6, 5255 TARGET_PT_R7 = 7, 5256 TARGET_PT_R8 = 8, 5257 TARGET_PT_R9 = 9, 5258 TARGET_PT_R10 = 10, 5259 TARGET_PT_R11 = 11, 5260 TARGET_PT_R12 = 12, 5261 TARGET_PT_R13 = 13, 5262 TARGET_PT_R14 = 14, 5263 TARGET_PT_R15 = 15, 5264 TARGET_PT_R16 = 16, 5265 TARGET_PT_R17 = 17, 5266 TARGET_PT_R18 = 18, 5267 TARGET_PT_R19 = 19, 5268 TARGET_PT_R20 = 20, 5269 TARGET_PT_R21 = 21, 5270 TARGET_PT_R22 = 22, 5271 TARGET_PT_R23 = 23, 5272 TARGET_PT_R24 = 24, 5273 TARGET_PT_R25 = 25, 5274 TARGET_PT_R26 = 26, 5275 TARGET_PT_R27 = 27, 5276 TARGET_PT_R28 = 28, 5277 TARGET_PT_R29 = 29, 5278 TARGET_PT_R30 = 30, 5279 TARGET_PT_R31 = 31, 5280 TARGET_PT_NIP = 32, 5281 TARGET_PT_MSR = 33, 5282 TARGET_PT_ORIG_R3 = 34, 5283 TARGET_PT_CTR = 35, 5284 TARGET_PT_LNK = 36, 5285 TARGET_PT_XER = 37, 5286 TARGET_PT_CCR = 38, 5287 /* Yes, there are two registers with #39. One is 64-bit only. */ 5288 TARGET_PT_MQ = 39, 5289 TARGET_PT_SOFTE = 39, 5290 TARGET_PT_TRAP = 40, 5291 TARGET_PT_DAR = 41, 5292 TARGET_PT_DSISR = 42, 5293 TARGET_PT_RESULT = 43, 5294 TARGET_PT_REGS_COUNT = 44 5295 }; 5296 5297 5298 struct target_ucontext { 5299 target_ulong tuc_flags; 5300 target_ulong tuc_link; /* ucontext_t __user * */ 5301 struct target_sigaltstack tuc_stack; 5302 #if !defined(TARGET_PPC64) 5303 int32_t tuc_pad[7]; 5304 target_ulong tuc_regs; /* struct mcontext __user * 5305 points to uc_mcontext field */ 5306 #endif 5307 target_sigset_t tuc_sigmask; 5308 #if defined(TARGET_PPC64) 5309 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */ 5310 struct target_sigcontext tuc_sigcontext; 5311 #else 5312 int32_t tuc_maskext[30]; 5313 int32_t tuc_pad2[3]; 5314 struct target_mcontext tuc_mcontext; 5315 #endif 5316 }; 5317 5318 /* See arch/powerpc/kernel/signal_32.c. */ 5319 struct target_sigframe { 5320 struct target_sigcontext sctx; 5321 struct target_mcontext mctx; 5322 int32_t abigap[56]; 5323 }; 5324 5325 #if defined(TARGET_PPC64) 5326 5327 #define TARGET_TRAMP_SIZE 6 5328 5329 struct target_rt_sigframe { 5330 /* sys_rt_sigreturn requires the ucontext be the first field */ 5331 struct target_ucontext uc; 5332 target_ulong _unused[2]; 5333 uint32_t trampoline[TARGET_TRAMP_SIZE]; 5334 target_ulong pinfo; /* struct siginfo __user * */ 5335 target_ulong puc; /* void __user * */ 5336 struct target_siginfo info; 5337 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 5338 char abigap[288]; 5339 } __attribute__((aligned(16))); 5340 5341 #else 5342 5343 struct target_rt_sigframe { 5344 struct target_siginfo info; 5345 struct target_ucontext uc; 5346 int32_t abigap[56]; 5347 }; 5348 5349 #endif 5350 5351 #if defined(TARGET_PPC64) 5352 5353 struct target_func_ptr { 5354 target_ulong entry; 5355 target_ulong toc; 5356 }; 5357 5358 #endif 5359 5360 /* We use the mc_pad field for the signal return trampoline. */ 5361 #define tramp mc_pad 5362 5363 /* See arch/powerpc/kernel/signal.c. */ 5364 static target_ulong get_sigframe(struct target_sigaction *ka, 5365 CPUPPCState *env, 5366 int frame_size) 5367 { 5368 target_ulong oldsp; 5369 5370 oldsp = env->gpr[1]; 5371 5372 if ((ka->sa_flags & TARGET_SA_ONSTACK) && 5373 (sas_ss_flags(oldsp) == 0)) { 5374 oldsp = (target_sigaltstack_used.ss_sp 5375 + target_sigaltstack_used.ss_size); 5376 } 5377 5378 return (oldsp - frame_size) & ~0xFUL; 5379 } 5380 5381 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \ 5382 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN))) 5383 #define PPC_VEC_HI 0 5384 #define PPC_VEC_LO 1 5385 #else 5386 #define PPC_VEC_HI 1 5387 #define PPC_VEC_LO 0 5388 #endif 5389 5390 5391 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) 5392 { 5393 target_ulong msr = env->msr; 5394 int i; 5395 target_ulong ccr = 0; 5396 5397 /* In general, the kernel attempts to be intelligent about what it 5398 needs to save for Altivec/FP/SPE registers. We don't care that 5399 much, so we just go ahead and save everything. */ 5400 5401 /* Save general registers. */ 5402 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5403 __put_user(env->gpr[i], &frame->mc_gregs[i]); 5404 } 5405 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 5406 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 5407 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 5408 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 5409 5410 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 5411 ccr |= env->crf[i] << (32 - ((i + 1) * 4)); 5412 } 5413 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 5414 5415 /* Save Altivec registers if necessary. */ 5416 if (env->insns_flags & PPC_ALTIVEC) { 5417 uint32_t *vrsave; 5418 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 5419 ppc_avr_t *avr = &env->avr[i]; 5420 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i]; 5421 5422 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 5423 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 5424 } 5425 /* Set MSR_VR in the saved MSR value to indicate that 5426 frame->mc_vregs contains valid data. */ 5427 msr |= MSR_VR; 5428 #if defined(TARGET_PPC64) 5429 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33]; 5430 /* 64-bit needs to put a pointer to the vectors in the frame */ 5431 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs); 5432 #else 5433 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32]; 5434 #endif 5435 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave); 5436 } 5437 5438 /* Save VSX second halves */ 5439 if (env->insns_flags2 & PPC2_VSX) { 5440 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 5441 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) { 5442 __put_user(env->vsr[i], &vsregs[i]); 5443 } 5444 } 5445 5446 /* Save floating point registers. */ 5447 if (env->insns_flags & PPC_FLOAT) { 5448 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 5449 __put_user(env->fpr[i], &frame->mc_fregs[i]); 5450 } 5451 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]); 5452 } 5453 5454 /* Save SPE registers. The kernel only saves the high half. */ 5455 if (env->insns_flags & PPC_SPE) { 5456 #if defined(TARGET_PPC64) 5457 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5458 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]); 5459 } 5460 #else 5461 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 5462 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]); 5463 } 5464 #endif 5465 /* Set MSR_SPE in the saved MSR value to indicate that 5466 frame->mc_vregs contains valid data. */ 5467 msr |= MSR_SPE; 5468 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 5469 } 5470 5471 /* Store MSR. */ 5472 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 5473 } 5474 5475 static void encode_trampoline(int sigret, uint32_t *tramp) 5476 { 5477 /* Set up the sigreturn trampoline: li r0,sigret; sc. */ 5478 if (sigret) { 5479 __put_user(0x38000000 | sigret, &tramp[0]); 5480 __put_user(0x44000002, &tramp[1]); 5481 } 5482 } 5483 5484 static void restore_user_regs(CPUPPCState *env, 5485 struct target_mcontext *frame, int sig) 5486 { 5487 target_ulong save_r2 = 0; 5488 target_ulong msr; 5489 target_ulong ccr; 5490 5491 int i; 5492 5493 if (!sig) { 5494 save_r2 = env->gpr[2]; 5495 } 5496 5497 /* Restore general registers. */ 5498 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5499 __get_user(env->gpr[i], &frame->mc_gregs[i]); 5500 } 5501 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); 5502 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); 5503 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); 5504 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); 5505 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); 5506 5507 for (i = 0; i < ARRAY_SIZE(env->crf); i++) { 5508 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; 5509 } 5510 5511 if (!sig) { 5512 env->gpr[2] = save_r2; 5513 } 5514 /* Restore MSR. */ 5515 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]); 5516 5517 /* If doing signal return, restore the previous little-endian mode. */ 5518 if (sig) 5519 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE)); 5520 5521 /* Restore Altivec registers if necessary. */ 5522 if (env->insns_flags & PPC_ALTIVEC) { 5523 ppc_avr_t *v_regs; 5524 uint32_t *vrsave; 5525 #if defined(TARGET_PPC64) 5526 uint64_t v_addr; 5527 /* 64-bit needs to recover the pointer to the vectors from the frame */ 5528 __get_user(v_addr, &frame->v_regs); 5529 v_regs = g2h(v_addr); 5530 #else 5531 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec; 5532 #endif 5533 for (i = 0; i < ARRAY_SIZE(env->avr); i++) { 5534 ppc_avr_t *avr = &env->avr[i]; 5535 ppc_avr_t *vreg = &v_regs[i]; 5536 5537 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]); 5538 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]); 5539 } 5540 /* Set MSR_VEC in the saved MSR value to indicate that 5541 frame->mc_vregs contains valid data. */ 5542 #if defined(TARGET_PPC64) 5543 vrsave = (uint32_t *)&v_regs[33]; 5544 #else 5545 vrsave = (uint32_t *)&v_regs[32]; 5546 #endif 5547 __get_user(env->spr[SPR_VRSAVE], vrsave); 5548 } 5549 5550 /* Restore VSX second halves */ 5551 if (env->insns_flags2 & PPC2_VSX) { 5552 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34]; 5553 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) { 5554 __get_user(env->vsr[i], &vsregs[i]); 5555 } 5556 } 5557 5558 /* Restore floating point registers. */ 5559 if (env->insns_flags & PPC_FLOAT) { 5560 uint64_t fpscr; 5561 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { 5562 __get_user(env->fpr[i], &frame->mc_fregs[i]); 5563 } 5564 __get_user(fpscr, &frame->mc_fregs[32]); 5565 env->fpscr = (uint32_t) fpscr; 5566 } 5567 5568 /* Save SPE registers. The kernel only saves the high half. */ 5569 if (env->insns_flags & PPC_SPE) { 5570 #if defined(TARGET_PPC64) 5571 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 5572 uint32_t hi; 5573 5574 __get_user(hi, &frame->mc_vregs.spe[i]); 5575 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); 5576 } 5577 #else 5578 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { 5579 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]); 5580 } 5581 #endif 5582 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]); 5583 } 5584 } 5585 5586 #if !defined(TARGET_PPC64) 5587 static void setup_frame(int sig, struct target_sigaction *ka, 5588 target_sigset_t *set, CPUPPCState *env) 5589 { 5590 struct target_sigframe *frame; 5591 struct target_sigcontext *sc; 5592 target_ulong frame_addr, newsp; 5593 int err = 0; 5594 5595 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 5596 trace_user_setup_frame(env, frame_addr); 5597 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) 5598 goto sigsegv; 5599 sc = &frame->sctx; 5600 5601 __put_user(ka->_sa_handler, &sc->handler); 5602 __put_user(set->sig[0], &sc->oldmask); 5603 __put_user(set->sig[1], &sc->_unused[3]); 5604 __put_user(h2g(&frame->mctx), &sc->regs); 5605 __put_user(sig, &sc->signal); 5606 5607 /* Save user regs. */ 5608 save_user_regs(env, &frame->mctx); 5609 5610 /* Construct the trampoline code on the stack. */ 5611 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); 5612 5613 /* The kernel checks for the presence of a VDSO here. We don't 5614 emulate a vdso, so use a sigreturn system call. */ 5615 env->lr = (target_ulong) h2g(frame->mctx.tramp); 5616 5617 /* Turn off all fp exceptions. */ 5618 env->fpscr = 0; 5619 5620 /* Create a stack frame for the caller of the handler. */ 5621 newsp = frame_addr - SIGNAL_FRAMESIZE; 5622 err |= put_user(env->gpr[1], newsp, target_ulong); 5623 5624 if (err) 5625 goto sigsegv; 5626 5627 /* Set up registers for signal handler. */ 5628 env->gpr[1] = newsp; 5629 env->gpr[3] = sig; 5630 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx); 5631 5632 env->nip = (target_ulong) ka->_sa_handler; 5633 5634 /* Signal handlers are entered in big-endian mode. */ 5635 env->msr &= ~(1ull << MSR_LE); 5636 5637 unlock_user_struct(frame, frame_addr, 1); 5638 return; 5639 5640 sigsegv: 5641 unlock_user_struct(frame, frame_addr, 1); 5642 force_sigsegv(sig); 5643 } 5644 #endif /* !defined(TARGET_PPC64) */ 5645 5646 static void setup_rt_frame(int sig, struct target_sigaction *ka, 5647 target_siginfo_t *info, 5648 target_sigset_t *set, CPUPPCState *env) 5649 { 5650 struct target_rt_sigframe *rt_sf; 5651 uint32_t *trampptr = 0; 5652 struct target_mcontext *mctx = 0; 5653 target_ulong rt_sf_addr, newsp = 0; 5654 int i, err = 0; 5655 #if defined(TARGET_PPC64) 5656 struct target_sigcontext *sc = 0; 5657 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info; 5658 #endif 5659 5660 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); 5661 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) 5662 goto sigsegv; 5663 5664 tswap_siginfo(&rt_sf->info, info); 5665 5666 __put_user(0, &rt_sf->uc.tuc_flags); 5667 __put_user(0, &rt_sf->uc.tuc_link); 5668 __put_user((target_ulong)target_sigaltstack_used.ss_sp, 5669 &rt_sf->uc.tuc_stack.ss_sp); 5670 __put_user(sas_ss_flags(env->gpr[1]), 5671 &rt_sf->uc.tuc_stack.ss_flags); 5672 __put_user(target_sigaltstack_used.ss_size, 5673 &rt_sf->uc.tuc_stack.ss_size); 5674 #if !defined(TARGET_PPC64) 5675 __put_user(h2g (&rt_sf->uc.tuc_mcontext), 5676 &rt_sf->uc.tuc_regs); 5677 #endif 5678 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 5679 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); 5680 } 5681 5682 #if defined(TARGET_PPC64) 5683 mctx = &rt_sf->uc.tuc_sigcontext.mcontext; 5684 trampptr = &rt_sf->trampoline[0]; 5685 5686 sc = &rt_sf->uc.tuc_sigcontext; 5687 __put_user(h2g(mctx), &sc->regs); 5688 __put_user(sig, &sc->signal); 5689 #else 5690 mctx = &rt_sf->uc.tuc_mcontext; 5691 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; 5692 #endif 5693 5694 save_user_regs(env, mctx); 5695 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); 5696 5697 /* The kernel checks for the presence of a VDSO here. We don't 5698 emulate a vdso, so use a sigreturn system call. */ 5699 env->lr = (target_ulong) h2g(trampptr); 5700 5701 /* Turn off all fp exceptions. */ 5702 env->fpscr = 0; 5703 5704 /* Create a stack frame for the caller of the handler. */ 5705 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); 5706 err |= put_user(env->gpr[1], newsp, target_ulong); 5707 5708 if (err) 5709 goto sigsegv; 5710 5711 /* Set up registers for signal handler. */ 5712 env->gpr[1] = newsp; 5713 env->gpr[3] = (target_ulong) sig; 5714 env->gpr[4] = (target_ulong) h2g(&rt_sf->info); 5715 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); 5716 env->gpr[6] = (target_ulong) h2g(rt_sf); 5717 5718 #if defined(TARGET_PPC64) 5719 if (get_ppc64_abi(image) < 2) { 5720 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */ 5721 struct target_func_ptr *handler = 5722 (struct target_func_ptr *)g2h(ka->_sa_handler); 5723 env->nip = tswapl(handler->entry); 5724 env->gpr[2] = tswapl(handler->toc); 5725 } else { 5726 /* ELFv2 PPC64 function pointers are entry points, but R12 5727 * must also be set */ 5728 env->nip = tswapl((target_ulong) ka->_sa_handler); 5729 env->gpr[12] = env->nip; 5730 } 5731 #else 5732 env->nip = (target_ulong) ka->_sa_handler; 5733 #endif 5734 5735 /* Signal handlers are entered in big-endian mode. */ 5736 env->msr &= ~(1ull << MSR_LE); 5737 5738 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5739 return; 5740 5741 sigsegv: 5742 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5743 force_sigsegv(sig); 5744 5745 } 5746 5747 #if !defined(TARGET_PPC64) 5748 long do_sigreturn(CPUPPCState *env) 5749 { 5750 struct target_sigcontext *sc = NULL; 5751 struct target_mcontext *sr = NULL; 5752 target_ulong sr_addr = 0, sc_addr; 5753 sigset_t blocked; 5754 target_sigset_t set; 5755 5756 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; 5757 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) 5758 goto sigsegv; 5759 5760 #if defined(TARGET_PPC64) 5761 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); 5762 #else 5763 __get_user(set.sig[0], &sc->oldmask); 5764 __get_user(set.sig[1], &sc->_unused[3]); 5765 #endif 5766 target_to_host_sigset_internal(&blocked, &set); 5767 set_sigmask(&blocked); 5768 5769 __get_user(sr_addr, &sc->regs); 5770 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) 5771 goto sigsegv; 5772 restore_user_regs(env, sr, 1); 5773 5774 unlock_user_struct(sr, sr_addr, 1); 5775 unlock_user_struct(sc, sc_addr, 1); 5776 return -TARGET_QEMU_ESIGRETURN; 5777 5778 sigsegv: 5779 unlock_user_struct(sr, sr_addr, 1); 5780 unlock_user_struct(sc, sc_addr, 1); 5781 force_sig(TARGET_SIGSEGV); 5782 return -TARGET_QEMU_ESIGRETURN; 5783 } 5784 #endif /* !defined(TARGET_PPC64) */ 5785 5786 /* See arch/powerpc/kernel/signal_32.c. */ 5787 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig) 5788 { 5789 struct target_mcontext *mcp; 5790 target_ulong mcp_addr; 5791 sigset_t blocked; 5792 target_sigset_t set; 5793 5794 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask), 5795 sizeof (set))) 5796 return 1; 5797 5798 #if defined(TARGET_PPC64) 5799 mcp_addr = h2g(ucp) + 5800 offsetof(struct target_ucontext, tuc_sigcontext.mcontext); 5801 #else 5802 __get_user(mcp_addr, &ucp->tuc_regs); 5803 #endif 5804 5805 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1)) 5806 return 1; 5807 5808 target_to_host_sigset_internal(&blocked, &set); 5809 set_sigmask(&blocked); 5810 restore_user_regs(env, mcp, sig); 5811 5812 unlock_user_struct(mcp, mcp_addr, 1); 5813 return 0; 5814 } 5815 5816 long do_rt_sigreturn(CPUPPCState *env) 5817 { 5818 struct target_rt_sigframe *rt_sf = NULL; 5819 target_ulong rt_sf_addr; 5820 5821 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16; 5822 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1)) 5823 goto sigsegv; 5824 5825 if (do_setcontext(&rt_sf->uc, env, 1)) 5826 goto sigsegv; 5827 5828 do_sigaltstack(rt_sf_addr 5829 + offsetof(struct target_rt_sigframe, uc.tuc_stack), 5830 0, env->gpr[1]); 5831 5832 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5833 return -TARGET_QEMU_ESIGRETURN; 5834 5835 sigsegv: 5836 unlock_user_struct(rt_sf, rt_sf_addr, 1); 5837 force_sig(TARGET_SIGSEGV); 5838 return -TARGET_QEMU_ESIGRETURN; 5839 } 5840 5841 #elif defined(TARGET_M68K) 5842 5843 struct target_sigcontext { 5844 abi_ulong sc_mask; 5845 abi_ulong sc_usp; 5846 abi_ulong sc_d0; 5847 abi_ulong sc_d1; 5848 abi_ulong sc_a0; 5849 abi_ulong sc_a1; 5850 unsigned short sc_sr; 5851 abi_ulong sc_pc; 5852 }; 5853 5854 struct target_sigframe 5855 { 5856 abi_ulong pretcode; 5857 int sig; 5858 int code; 5859 abi_ulong psc; 5860 char retcode[8]; 5861 abi_ulong extramask[TARGET_NSIG_WORDS-1]; 5862 struct target_sigcontext sc; 5863 }; 5864 5865 typedef int target_greg_t; 5866 #define TARGET_NGREG 18 5867 typedef target_greg_t target_gregset_t[TARGET_NGREG]; 5868 5869 typedef struct target_fpregset { 5870 int f_fpcntl[3]; 5871 int f_fpregs[8*3]; 5872 } target_fpregset_t; 5873 5874 struct target_mcontext { 5875 int version; 5876 target_gregset_t gregs; 5877 target_fpregset_t fpregs; 5878 }; 5879 5880 #define TARGET_MCONTEXT_VERSION 2 5881 5882 struct target_ucontext { 5883 abi_ulong tuc_flags; 5884 abi_ulong tuc_link; 5885 target_stack_t tuc_stack; 5886 struct target_mcontext tuc_mcontext; 5887 abi_long tuc_filler[80]; 5888 target_sigset_t tuc_sigmask; 5889 }; 5890 5891 struct target_rt_sigframe 5892 { 5893 abi_ulong pretcode; 5894 int sig; 5895 abi_ulong pinfo; 5896 abi_ulong puc; 5897 char retcode[8]; 5898 struct target_siginfo info; 5899 struct target_ucontext uc; 5900 }; 5901 5902 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env, 5903 abi_ulong mask) 5904 { 5905 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env); 5906 __put_user(mask, &sc->sc_mask); 5907 __put_user(env->aregs[7], &sc->sc_usp); 5908 __put_user(env->dregs[0], &sc->sc_d0); 5909 __put_user(env->dregs[1], &sc->sc_d1); 5910 __put_user(env->aregs[0], &sc->sc_a0); 5911 __put_user(env->aregs[1], &sc->sc_a1); 5912 __put_user(sr, &sc->sc_sr); 5913 __put_user(env->pc, &sc->sc_pc); 5914 } 5915 5916 static void 5917 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc) 5918 { 5919 int temp; 5920 5921 __get_user(env->aregs[7], &sc->sc_usp); 5922 __get_user(env->dregs[0], &sc->sc_d0); 5923 __get_user(env->dregs[1], &sc->sc_d1); 5924 __get_user(env->aregs[0], &sc->sc_a0); 5925 __get_user(env->aregs[1], &sc->sc_a1); 5926 __get_user(env->pc, &sc->sc_pc); 5927 __get_user(temp, &sc->sc_sr); 5928 cpu_m68k_set_ccr(env, temp); 5929 } 5930 5931 /* 5932 * Determine which stack to use.. 5933 */ 5934 static inline abi_ulong 5935 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs, 5936 size_t frame_size) 5937 { 5938 unsigned long sp; 5939 5940 sp = regs->aregs[7]; 5941 5942 /* This is the X/Open sanctioned signal stack switching. */ 5943 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) { 5944 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 5945 } 5946 5947 return ((sp - frame_size) & -8UL); 5948 } 5949 5950 static void setup_frame(int sig, struct target_sigaction *ka, 5951 target_sigset_t *set, CPUM68KState *env) 5952 { 5953 struct target_sigframe *frame; 5954 abi_ulong frame_addr; 5955 abi_ulong retcode_addr; 5956 abi_ulong sc_addr; 5957 int i; 5958 5959 frame_addr = get_sigframe(ka, env, sizeof *frame); 5960 trace_user_setup_frame(env, frame_addr); 5961 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 5962 goto give_sigsegv; 5963 } 5964 5965 __put_user(sig, &frame->sig); 5966 5967 sc_addr = frame_addr + offsetof(struct target_sigframe, sc); 5968 __put_user(sc_addr, &frame->psc); 5969 5970 setup_sigcontext(&frame->sc, env, set->sig[0]); 5971 5972 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 5973 __put_user(set->sig[i], &frame->extramask[i - 1]); 5974 } 5975 5976 /* Set up to return from userspace. */ 5977 5978 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 5979 __put_user(retcode_addr, &frame->pretcode); 5980 5981 /* moveq #,d0; trap #0 */ 5982 5983 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), 5984 (uint32_t *)(frame->retcode)); 5985 5986 /* Set up to return from userspace */ 5987 5988 env->aregs[7] = frame_addr; 5989 env->pc = ka->_sa_handler; 5990 5991 unlock_user_struct(frame, frame_addr, 1); 5992 return; 5993 5994 give_sigsegv: 5995 force_sigsegv(sig); 5996 } 5997 5998 static inline void target_rt_save_fpu_state(struct target_ucontext *uc, 5999 CPUM68KState *env) 6000 { 6001 int i; 6002 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs; 6003 6004 __put_user(env->fpcr, &fpregs->f_fpcntl[0]); 6005 __put_user(env->fpsr, &fpregs->f_fpcntl[1]); 6006 /* fpiar is not emulated */ 6007 6008 for (i = 0; i < 8; i++) { 6009 uint32_t high = env->fregs[i].d.high << 16; 6010 __put_user(high, &fpregs->f_fpregs[i * 3]); 6011 __put_user(env->fregs[i].d.low, 6012 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]); 6013 } 6014 } 6015 6016 static inline int target_rt_setup_ucontext(struct target_ucontext *uc, 6017 CPUM68KState *env) 6018 { 6019 target_greg_t *gregs = uc->tuc_mcontext.gregs; 6020 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env); 6021 6022 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version); 6023 __put_user(env->dregs[0], &gregs[0]); 6024 __put_user(env->dregs[1], &gregs[1]); 6025 __put_user(env->dregs[2], &gregs[2]); 6026 __put_user(env->dregs[3], &gregs[3]); 6027 __put_user(env->dregs[4], &gregs[4]); 6028 __put_user(env->dregs[5], &gregs[5]); 6029 __put_user(env->dregs[6], &gregs[6]); 6030 __put_user(env->dregs[7], &gregs[7]); 6031 __put_user(env->aregs[0], &gregs[8]); 6032 __put_user(env->aregs[1], &gregs[9]); 6033 __put_user(env->aregs[2], &gregs[10]); 6034 __put_user(env->aregs[3], &gregs[11]); 6035 __put_user(env->aregs[4], &gregs[12]); 6036 __put_user(env->aregs[5], &gregs[13]); 6037 __put_user(env->aregs[6], &gregs[14]); 6038 __put_user(env->aregs[7], &gregs[15]); 6039 __put_user(env->pc, &gregs[16]); 6040 __put_user(sr, &gregs[17]); 6041 6042 target_rt_save_fpu_state(uc, env); 6043 6044 return 0; 6045 } 6046 6047 static inline void target_rt_restore_fpu_state(CPUM68KState *env, 6048 struct target_ucontext *uc) 6049 { 6050 int i; 6051 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs; 6052 uint32_t fpcr; 6053 6054 __get_user(fpcr, &fpregs->f_fpcntl[0]); 6055 cpu_m68k_set_fpcr(env, fpcr); 6056 __get_user(env->fpsr, &fpregs->f_fpcntl[1]); 6057 /* fpiar is not emulated */ 6058 6059 for (i = 0; i < 8; i++) { 6060 uint32_t high; 6061 __get_user(high, &fpregs->f_fpregs[i * 3]); 6062 env->fregs[i].d.high = high >> 16; 6063 __get_user(env->fregs[i].d.low, 6064 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]); 6065 } 6066 } 6067 6068 static inline int target_rt_restore_ucontext(CPUM68KState *env, 6069 struct target_ucontext *uc) 6070 { 6071 int temp; 6072 target_greg_t *gregs = uc->tuc_mcontext.gregs; 6073 6074 __get_user(temp, &uc->tuc_mcontext.version); 6075 if (temp != TARGET_MCONTEXT_VERSION) 6076 goto badframe; 6077 6078 /* restore passed registers */ 6079 __get_user(env->dregs[0], &gregs[0]); 6080 __get_user(env->dregs[1], &gregs[1]); 6081 __get_user(env->dregs[2], &gregs[2]); 6082 __get_user(env->dregs[3], &gregs[3]); 6083 __get_user(env->dregs[4], &gregs[4]); 6084 __get_user(env->dregs[5], &gregs[5]); 6085 __get_user(env->dregs[6], &gregs[6]); 6086 __get_user(env->dregs[7], &gregs[7]); 6087 __get_user(env->aregs[0], &gregs[8]); 6088 __get_user(env->aregs[1], &gregs[9]); 6089 __get_user(env->aregs[2], &gregs[10]); 6090 __get_user(env->aregs[3], &gregs[11]); 6091 __get_user(env->aregs[4], &gregs[12]); 6092 __get_user(env->aregs[5], &gregs[13]); 6093 __get_user(env->aregs[6], &gregs[14]); 6094 __get_user(env->aregs[7], &gregs[15]); 6095 __get_user(env->pc, &gregs[16]); 6096 __get_user(temp, &gregs[17]); 6097 cpu_m68k_set_ccr(env, temp); 6098 6099 target_rt_restore_fpu_state(env, uc); 6100 6101 return 0; 6102 6103 badframe: 6104 return 1; 6105 } 6106 6107 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6108 target_siginfo_t *info, 6109 target_sigset_t *set, CPUM68KState *env) 6110 { 6111 struct target_rt_sigframe *frame; 6112 abi_ulong frame_addr; 6113 abi_ulong retcode_addr; 6114 abi_ulong info_addr; 6115 abi_ulong uc_addr; 6116 int err = 0; 6117 int i; 6118 6119 frame_addr = get_sigframe(ka, env, sizeof *frame); 6120 trace_user_setup_rt_frame(env, frame_addr); 6121 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6122 goto give_sigsegv; 6123 } 6124 6125 __put_user(sig, &frame->sig); 6126 6127 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); 6128 __put_user(info_addr, &frame->pinfo); 6129 6130 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); 6131 __put_user(uc_addr, &frame->puc); 6132 6133 tswap_siginfo(&frame->info, info); 6134 6135 /* Create the ucontext */ 6136 6137 __put_user(0, &frame->uc.tuc_flags); 6138 __put_user(0, &frame->uc.tuc_link); 6139 __put_user(target_sigaltstack_used.ss_sp, 6140 &frame->uc.tuc_stack.ss_sp); 6141 __put_user(sas_ss_flags(env->aregs[7]), 6142 &frame->uc.tuc_stack.ss_flags); 6143 __put_user(target_sigaltstack_used.ss_size, 6144 &frame->uc.tuc_stack.ss_size); 6145 err |= target_rt_setup_ucontext(&frame->uc, env); 6146 6147 if (err) 6148 goto give_sigsegv; 6149 6150 for(i = 0; i < TARGET_NSIG_WORDS; i++) { 6151 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 6152 } 6153 6154 /* Set up to return from userspace. */ 6155 6156 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); 6157 __put_user(retcode_addr, &frame->pretcode); 6158 6159 /* moveq #,d0; notb d0; trap #0 */ 6160 6161 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), 6162 (uint32_t *)(frame->retcode + 0)); 6163 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); 6164 6165 if (err) 6166 goto give_sigsegv; 6167 6168 /* Set up to return from userspace */ 6169 6170 env->aregs[7] = frame_addr; 6171 env->pc = ka->_sa_handler; 6172 6173 unlock_user_struct(frame, frame_addr, 1); 6174 return; 6175 6176 give_sigsegv: 6177 unlock_user_struct(frame, frame_addr, 1); 6178 force_sigsegv(sig); 6179 } 6180 6181 long do_sigreturn(CPUM68KState *env) 6182 { 6183 struct target_sigframe *frame; 6184 abi_ulong frame_addr = env->aregs[7] - 4; 6185 target_sigset_t target_set; 6186 sigset_t set; 6187 int i; 6188 6189 trace_user_do_sigreturn(env, frame_addr); 6190 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 6191 goto badframe; 6192 6193 /* set blocked signals */ 6194 6195 __get_user(target_set.sig[0], &frame->sc.sc_mask); 6196 6197 for(i = 1; i < TARGET_NSIG_WORDS; i++) { 6198 __get_user(target_set.sig[i], &frame->extramask[i - 1]); 6199 } 6200 6201 target_to_host_sigset_internal(&set, &target_set); 6202 set_sigmask(&set); 6203 6204 /* restore registers */ 6205 6206 restore_sigcontext(env, &frame->sc); 6207 6208 unlock_user_struct(frame, frame_addr, 0); 6209 return -TARGET_QEMU_ESIGRETURN; 6210 6211 badframe: 6212 force_sig(TARGET_SIGSEGV); 6213 return -TARGET_QEMU_ESIGRETURN; 6214 } 6215 6216 long do_rt_sigreturn(CPUM68KState *env) 6217 { 6218 struct target_rt_sigframe *frame; 6219 abi_ulong frame_addr = env->aregs[7] - 4; 6220 sigset_t set; 6221 6222 trace_user_do_rt_sigreturn(env, frame_addr); 6223 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) 6224 goto badframe; 6225 6226 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 6227 set_sigmask(&set); 6228 6229 /* restore registers */ 6230 6231 if (target_rt_restore_ucontext(env, &frame->uc)) 6232 goto badframe; 6233 6234 if (do_sigaltstack(frame_addr + 6235 offsetof(struct target_rt_sigframe, uc.tuc_stack), 6236 0, get_sp_from_cpustate(env)) == -EFAULT) 6237 goto badframe; 6238 6239 unlock_user_struct(frame, frame_addr, 0); 6240 return -TARGET_QEMU_ESIGRETURN; 6241 6242 badframe: 6243 unlock_user_struct(frame, frame_addr, 0); 6244 force_sig(TARGET_SIGSEGV); 6245 return -TARGET_QEMU_ESIGRETURN; 6246 } 6247 6248 #elif defined(TARGET_ALPHA) 6249 6250 struct target_sigcontext { 6251 abi_long sc_onstack; 6252 abi_long sc_mask; 6253 abi_long sc_pc; 6254 abi_long sc_ps; 6255 abi_long sc_regs[32]; 6256 abi_long sc_ownedfp; 6257 abi_long sc_fpregs[32]; 6258 abi_ulong sc_fpcr; 6259 abi_ulong sc_fp_control; 6260 abi_ulong sc_reserved1; 6261 abi_ulong sc_reserved2; 6262 abi_ulong sc_ssize; 6263 abi_ulong sc_sbase; 6264 abi_ulong sc_traparg_a0; 6265 abi_ulong sc_traparg_a1; 6266 abi_ulong sc_traparg_a2; 6267 abi_ulong sc_fp_trap_pc; 6268 abi_ulong sc_fp_trigger_sum; 6269 abi_ulong sc_fp_trigger_inst; 6270 }; 6271 6272 struct target_ucontext { 6273 abi_ulong tuc_flags; 6274 abi_ulong tuc_link; 6275 abi_ulong tuc_osf_sigmask; 6276 target_stack_t tuc_stack; 6277 struct target_sigcontext tuc_mcontext; 6278 target_sigset_t tuc_sigmask; 6279 }; 6280 6281 struct target_sigframe { 6282 struct target_sigcontext sc; 6283 unsigned int retcode[3]; 6284 }; 6285 6286 struct target_rt_sigframe { 6287 target_siginfo_t info; 6288 struct target_ucontext uc; 6289 unsigned int retcode[3]; 6290 }; 6291 6292 #define INSN_MOV_R30_R16 0x47fe0410 6293 #define INSN_LDI_R0 0x201f0000 6294 #define INSN_CALLSYS 0x00000083 6295 6296 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env, 6297 abi_ulong frame_addr, target_sigset_t *set) 6298 { 6299 int i; 6300 6301 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); 6302 __put_user(set->sig[0], &sc->sc_mask); 6303 __put_user(env->pc, &sc->sc_pc); 6304 __put_user(8, &sc->sc_ps); 6305 6306 for (i = 0; i < 31; ++i) { 6307 __put_user(env->ir[i], &sc->sc_regs[i]); 6308 } 6309 __put_user(0, &sc->sc_regs[31]); 6310 6311 for (i = 0; i < 31; ++i) { 6312 __put_user(env->fir[i], &sc->sc_fpregs[i]); 6313 } 6314 __put_user(0, &sc->sc_fpregs[31]); 6315 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr); 6316 6317 __put_user(0, &sc->sc_traparg_a0); /* FIXME */ 6318 __put_user(0, &sc->sc_traparg_a1); /* FIXME */ 6319 __put_user(0, &sc->sc_traparg_a2); /* FIXME */ 6320 } 6321 6322 static void restore_sigcontext(CPUAlphaState *env, 6323 struct target_sigcontext *sc) 6324 { 6325 uint64_t fpcr; 6326 int i; 6327 6328 __get_user(env->pc, &sc->sc_pc); 6329 6330 for (i = 0; i < 31; ++i) { 6331 __get_user(env->ir[i], &sc->sc_regs[i]); 6332 } 6333 for (i = 0; i < 31; ++i) { 6334 __get_user(env->fir[i], &sc->sc_fpregs[i]); 6335 } 6336 6337 __get_user(fpcr, &sc->sc_fpcr); 6338 cpu_alpha_store_fpcr(env, fpcr); 6339 } 6340 6341 static inline abi_ulong get_sigframe(struct target_sigaction *sa, 6342 CPUAlphaState *env, 6343 unsigned long framesize) 6344 { 6345 abi_ulong sp = env->ir[IR_SP]; 6346 6347 /* This is the X/Open sanctioned signal stack switching. */ 6348 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 6349 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 6350 } 6351 return (sp - framesize) & -32; 6352 } 6353 6354 static void setup_frame(int sig, struct target_sigaction *ka, 6355 target_sigset_t *set, CPUAlphaState *env) 6356 { 6357 abi_ulong frame_addr, r26; 6358 struct target_sigframe *frame; 6359 int err = 0; 6360 6361 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 6362 trace_user_setup_frame(env, frame_addr); 6363 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6364 goto give_sigsegv; 6365 } 6366 6367 setup_sigcontext(&frame->sc, env, frame_addr, set); 6368 6369 if (ka->sa_restorer) { 6370 r26 = ka->sa_restorer; 6371 } else { 6372 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 6373 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, 6374 &frame->retcode[1]); 6375 __put_user(INSN_CALLSYS, &frame->retcode[2]); 6376 /* imb() */ 6377 r26 = frame_addr + offsetof(struct target_sigframe, retcode); 6378 } 6379 6380 unlock_user_struct(frame, frame_addr, 1); 6381 6382 if (err) { 6383 give_sigsegv: 6384 force_sigsegv(sig); 6385 return; 6386 } 6387 6388 env->ir[IR_RA] = r26; 6389 env->ir[IR_PV] = env->pc = ka->_sa_handler; 6390 env->ir[IR_A0] = sig; 6391 env->ir[IR_A1] = 0; 6392 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc); 6393 env->ir[IR_SP] = frame_addr; 6394 } 6395 6396 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6397 target_siginfo_t *info, 6398 target_sigset_t *set, CPUAlphaState *env) 6399 { 6400 abi_ulong frame_addr, r26; 6401 struct target_rt_sigframe *frame; 6402 int i, err = 0; 6403 6404 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 6405 trace_user_setup_rt_frame(env, frame_addr); 6406 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6407 goto give_sigsegv; 6408 } 6409 6410 tswap_siginfo(&frame->info, info); 6411 6412 __put_user(0, &frame->uc.tuc_flags); 6413 __put_user(0, &frame->uc.tuc_link); 6414 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); 6415 __put_user(target_sigaltstack_used.ss_sp, 6416 &frame->uc.tuc_stack.ss_sp); 6417 __put_user(sas_ss_flags(env->ir[IR_SP]), 6418 &frame->uc.tuc_stack.ss_flags); 6419 __put_user(target_sigaltstack_used.ss_size, 6420 &frame->uc.tuc_stack.ss_size); 6421 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); 6422 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 6423 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 6424 } 6425 6426 if (ka->sa_restorer) { 6427 r26 = ka->sa_restorer; 6428 } else { 6429 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); 6430 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, 6431 &frame->retcode[1]); 6432 __put_user(INSN_CALLSYS, &frame->retcode[2]); 6433 /* imb(); */ 6434 r26 = frame_addr + offsetof(struct target_sigframe, retcode); 6435 } 6436 6437 if (err) { 6438 give_sigsegv: 6439 force_sigsegv(sig); 6440 return; 6441 } 6442 6443 env->ir[IR_RA] = r26; 6444 env->ir[IR_PV] = env->pc = ka->_sa_handler; 6445 env->ir[IR_A0] = sig; 6446 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); 6447 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 6448 env->ir[IR_SP] = frame_addr; 6449 } 6450 6451 long do_sigreturn(CPUAlphaState *env) 6452 { 6453 struct target_sigcontext *sc; 6454 abi_ulong sc_addr = env->ir[IR_A0]; 6455 target_sigset_t target_set; 6456 sigset_t set; 6457 6458 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { 6459 goto badframe; 6460 } 6461 6462 target_sigemptyset(&target_set); 6463 __get_user(target_set.sig[0], &sc->sc_mask); 6464 6465 target_to_host_sigset_internal(&set, &target_set); 6466 set_sigmask(&set); 6467 6468 restore_sigcontext(env, sc); 6469 unlock_user_struct(sc, sc_addr, 0); 6470 return -TARGET_QEMU_ESIGRETURN; 6471 6472 badframe: 6473 force_sig(TARGET_SIGSEGV); 6474 return -TARGET_QEMU_ESIGRETURN; 6475 } 6476 6477 long do_rt_sigreturn(CPUAlphaState *env) 6478 { 6479 abi_ulong frame_addr = env->ir[IR_A0]; 6480 struct target_rt_sigframe *frame; 6481 sigset_t set; 6482 6483 trace_user_do_rt_sigreturn(env, frame_addr); 6484 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 6485 goto badframe; 6486 } 6487 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 6488 set_sigmask(&set); 6489 6490 restore_sigcontext(env, &frame->uc.tuc_mcontext); 6491 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 6492 uc.tuc_stack), 6493 0, env->ir[IR_SP]) == -EFAULT) { 6494 goto badframe; 6495 } 6496 6497 unlock_user_struct(frame, frame_addr, 0); 6498 return -TARGET_QEMU_ESIGRETURN; 6499 6500 6501 badframe: 6502 unlock_user_struct(frame, frame_addr, 0); 6503 force_sig(TARGET_SIGSEGV); 6504 return -TARGET_QEMU_ESIGRETURN; 6505 } 6506 6507 #elif defined(TARGET_TILEGX) 6508 6509 struct target_sigcontext { 6510 union { 6511 /* General-purpose registers. */ 6512 abi_ulong gregs[56]; 6513 struct { 6514 abi_ulong __gregs[53]; 6515 abi_ulong tp; /* Aliases gregs[TREG_TP]. */ 6516 abi_ulong sp; /* Aliases gregs[TREG_SP]. */ 6517 abi_ulong lr; /* Aliases gregs[TREG_LR]. */ 6518 }; 6519 }; 6520 abi_ulong pc; /* Program counter. */ 6521 abi_ulong ics; /* In Interrupt Critical Section? */ 6522 abi_ulong faultnum; /* Fault number. */ 6523 abi_ulong pad[5]; 6524 }; 6525 6526 struct target_ucontext { 6527 abi_ulong tuc_flags; 6528 abi_ulong tuc_link; 6529 target_stack_t tuc_stack; 6530 struct target_sigcontext tuc_mcontext; 6531 target_sigset_t tuc_sigmask; /* mask last for extensibility */ 6532 }; 6533 6534 struct target_rt_sigframe { 6535 unsigned char save_area[16]; /* caller save area */ 6536 struct target_siginfo info; 6537 struct target_ucontext uc; 6538 abi_ulong retcode[2]; 6539 }; 6540 6541 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */ 6542 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */ 6543 6544 6545 static void setup_sigcontext(struct target_sigcontext *sc, 6546 CPUArchState *env, int signo) 6547 { 6548 int i; 6549 6550 for (i = 0; i < TILEGX_R_COUNT; ++i) { 6551 __put_user(env->regs[i], &sc->gregs[i]); 6552 } 6553 6554 __put_user(env->pc, &sc->pc); 6555 __put_user(0, &sc->ics); 6556 __put_user(signo, &sc->faultnum); 6557 } 6558 6559 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc) 6560 { 6561 int i; 6562 6563 for (i = 0; i < TILEGX_R_COUNT; ++i) { 6564 __get_user(env->regs[i], &sc->gregs[i]); 6565 } 6566 6567 __get_user(env->pc, &sc->pc); 6568 } 6569 6570 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env, 6571 size_t frame_size) 6572 { 6573 unsigned long sp = env->regs[TILEGX_R_SP]; 6574 6575 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) { 6576 return -1UL; 6577 } 6578 6579 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) { 6580 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 6581 } 6582 6583 sp -= frame_size; 6584 sp &= -16UL; 6585 return sp; 6586 } 6587 6588 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6589 target_siginfo_t *info, 6590 target_sigset_t *set, CPUArchState *env) 6591 { 6592 abi_ulong frame_addr; 6593 struct target_rt_sigframe *frame; 6594 unsigned long restorer; 6595 6596 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 6597 trace_user_setup_rt_frame(env, frame_addr); 6598 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6599 goto give_sigsegv; 6600 } 6601 6602 /* Always write at least the signal number for the stack backtracer. */ 6603 if (ka->sa_flags & TARGET_SA_SIGINFO) { 6604 /* At sigreturn time, restore the callee-save registers too. */ 6605 tswap_siginfo(&frame->info, info); 6606 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */ 6607 } else { 6608 __put_user(info->si_signo, &frame->info.si_signo); 6609 } 6610 6611 /* Create the ucontext. */ 6612 __put_user(0, &frame->uc.tuc_flags); 6613 __put_user(0, &frame->uc.tuc_link); 6614 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 6615 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]), 6616 &frame->uc.tuc_stack.ss_flags); 6617 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); 6618 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo); 6619 6620 if (ka->sa_flags & TARGET_SA_RESTORER) { 6621 restorer = (unsigned long) ka->sa_restorer; 6622 } else { 6623 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]); 6624 __put_user(INSN_SWINT1, &frame->retcode[1]); 6625 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode); 6626 } 6627 env->pc = (unsigned long) ka->_sa_handler; 6628 env->regs[TILEGX_R_SP] = (unsigned long) frame; 6629 env->regs[TILEGX_R_LR] = restorer; 6630 env->regs[0] = (unsigned long) sig; 6631 env->regs[1] = (unsigned long) &frame->info; 6632 env->regs[2] = (unsigned long) &frame->uc; 6633 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */ 6634 6635 unlock_user_struct(frame, frame_addr, 1); 6636 return; 6637 6638 give_sigsegv: 6639 force_sigsegv(sig); 6640 } 6641 6642 long do_rt_sigreturn(CPUTLGState *env) 6643 { 6644 abi_ulong frame_addr = env->regs[TILEGX_R_SP]; 6645 struct target_rt_sigframe *frame; 6646 sigset_t set; 6647 6648 trace_user_do_rt_sigreturn(env, frame_addr); 6649 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 6650 goto badframe; 6651 } 6652 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 6653 set_sigmask(&set); 6654 6655 restore_sigcontext(env, &frame->uc.tuc_mcontext); 6656 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 6657 uc.tuc_stack), 6658 0, env->regs[TILEGX_R_SP]) == -EFAULT) { 6659 goto badframe; 6660 } 6661 6662 unlock_user_struct(frame, frame_addr, 0); 6663 return -TARGET_QEMU_ESIGRETURN; 6664 6665 6666 badframe: 6667 unlock_user_struct(frame, frame_addr, 0); 6668 force_sig(TARGET_SIGSEGV); 6669 return -TARGET_QEMU_ESIGRETURN; 6670 } 6671 6672 #elif defined(TARGET_RISCV) 6673 6674 /* Signal handler invocation must be transparent for the code being 6675 interrupted. Complete CPU (hart) state is saved on entry and restored 6676 before returning from the handler. Process sigmask is also saved to block 6677 signals while the handler is running. The handler gets its own stack, 6678 which also doubles as storage for the CPU state and sigmask. 6679 6680 The code below is qemu re-implementation of arch/riscv/kernel/signal.c */ 6681 6682 struct target_sigcontext { 6683 abi_long pc; 6684 abi_long gpr[31]; /* x0 is not present, so all offsets must be -1 */ 6685 uint64_t fpr[32]; 6686 uint32_t fcsr; 6687 }; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */ 6688 6689 struct target_ucontext { 6690 unsigned long uc_flags; 6691 struct target_ucontext *uc_link; 6692 target_stack_t uc_stack; 6693 struct target_sigcontext uc_mcontext; 6694 target_sigset_t uc_sigmask; 6695 }; 6696 6697 struct target_rt_sigframe { 6698 uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */ 6699 struct target_siginfo info; 6700 struct target_ucontext uc; 6701 }; 6702 6703 static abi_ulong get_sigframe(struct target_sigaction *ka, 6704 CPURISCVState *regs, size_t framesize) 6705 { 6706 abi_ulong sp = regs->gpr[xSP]; 6707 int onsigstack = on_sig_stack(sp); 6708 6709 /* redzone */ 6710 /* This is the X/Open sanctioned signal stack switching. */ 6711 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) { 6712 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 6713 } 6714 6715 sp -= framesize; 6716 sp &= ~3UL; /* align sp on 4-byte boundary */ 6717 6718 /* If we are on the alternate signal stack and would overflow it, don't. 6719 Return an always-bogus address instead so we will die with SIGSEGV. */ 6720 if (onsigstack && !likely(on_sig_stack(sp))) { 6721 return -1L; 6722 } 6723 6724 return sp; 6725 } 6726 6727 static void setup_sigcontext(struct target_sigcontext *sc, CPURISCVState *env) 6728 { 6729 int i; 6730 6731 __put_user(env->pc, &sc->pc); 6732 6733 for (i = 1; i < 32; i++) { 6734 __put_user(env->gpr[i], &sc->gpr[i - 1]); 6735 } 6736 for (i = 0; i < 32; i++) { 6737 __put_user(env->fpr[i], &sc->fpr[i]); 6738 } 6739 6740 uint32_t fcsr = csr_read_helper(env, CSR_FCSR); /*riscv_get_fcsr(env);*/ 6741 __put_user(fcsr, &sc->fcsr); 6742 } 6743 6744 static void setup_ucontext(struct target_ucontext *uc, 6745 CPURISCVState *env, target_sigset_t *set) 6746 { 6747 abi_ulong ss_sp = (target_ulong)target_sigaltstack_used.ss_sp; 6748 abi_ulong ss_flags = sas_ss_flags(env->gpr[xSP]); 6749 abi_ulong ss_size = target_sigaltstack_used.ss_size; 6750 6751 __put_user(0, &(uc->uc_flags)); 6752 __put_user(0, &(uc->uc_link)); 6753 6754 __put_user(ss_sp, &(uc->uc_stack.ss_sp)); 6755 __put_user(ss_flags, &(uc->uc_stack.ss_flags)); 6756 __put_user(ss_size, &(uc->uc_stack.ss_size)); 6757 6758 int i; 6759 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 6760 __put_user(set->sig[i], &(uc->uc_sigmask.sig[i])); 6761 } 6762 6763 setup_sigcontext(&uc->uc_mcontext, env); 6764 } 6765 6766 static inline void install_sigtramp(uint32_t *tramp) 6767 { 6768 __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */ 6769 __put_user(0x00000073, tramp + 1); /* ecall */ 6770 } 6771 6772 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6773 target_siginfo_t *info, 6774 target_sigset_t *set, CPURISCVState *env) 6775 { 6776 abi_ulong frame_addr; 6777 struct target_rt_sigframe *frame; 6778 6779 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 6780 trace_user_setup_rt_frame(env, frame_addr); 6781 6782 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6783 goto badframe; 6784 } 6785 6786 setup_ucontext(&frame->uc, env, set); 6787 tswap_siginfo(&frame->info, info); 6788 install_sigtramp(frame->tramp); 6789 6790 env->pc = ka->_sa_handler; 6791 env->gpr[xSP] = frame_addr; 6792 env->gpr[xA0] = sig; 6793 env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info); 6794 env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc); 6795 env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp); 6796 6797 return; 6798 6799 badframe: 6800 unlock_user_struct(frame, frame_addr, 1); 6801 if (sig == TARGET_SIGSEGV) { 6802 ka->_sa_handler = TARGET_SIG_DFL; 6803 } 6804 force_sig(TARGET_SIGSEGV); 6805 } 6806 6807 static void restore_sigcontext(CPURISCVState *env, struct target_sigcontext *sc) 6808 { 6809 int i; 6810 6811 __get_user(env->pc, &sc->pc); 6812 6813 for (i = 1; i < 32; ++i) { 6814 __get_user(env->gpr[i], &sc->gpr[i - 1]); 6815 } 6816 for (i = 0; i < 32; ++i) { 6817 __get_user(env->fpr[i], &sc->fpr[i]); 6818 } 6819 6820 uint32_t fcsr; 6821 __get_user(fcsr, &sc->fcsr); 6822 csr_write_helper(env, fcsr, CSR_FCSR); 6823 } 6824 6825 static void restore_ucontext(CPURISCVState *env, struct target_ucontext *uc) 6826 { 6827 sigset_t blocked; 6828 target_sigset_t target_set; 6829 int i; 6830 6831 target_sigemptyset(&target_set); 6832 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 6833 __get_user(target_set.sig[i], &(uc->uc_sigmask.sig[i])); 6834 } 6835 6836 target_to_host_sigset_internal(&blocked, &target_set); 6837 set_sigmask(&blocked); 6838 6839 restore_sigcontext(env, &uc->uc_mcontext); 6840 } 6841 6842 long do_rt_sigreturn(CPURISCVState *env) 6843 { 6844 struct target_rt_sigframe *frame; 6845 abi_ulong frame_addr; 6846 6847 frame_addr = env->gpr[xSP]; 6848 trace_user_do_sigreturn(env, frame_addr); 6849 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 6850 goto badframe; 6851 } 6852 6853 restore_ucontext(env, &frame->uc); 6854 6855 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 6856 uc.uc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) { 6857 goto badframe; 6858 } 6859 6860 unlock_user_struct(frame, frame_addr, 0); 6861 return -TARGET_QEMU_ESIGRETURN; 6862 6863 badframe: 6864 unlock_user_struct(frame, frame_addr, 0); 6865 force_sig(TARGET_SIGSEGV); 6866 return 0; 6867 } 6868 6869 #elif defined(TARGET_HPPA) 6870 6871 struct target_sigcontext { 6872 abi_ulong sc_flags; 6873 abi_ulong sc_gr[32]; 6874 uint64_t sc_fr[32]; 6875 abi_ulong sc_iasq[2]; 6876 abi_ulong sc_iaoq[2]; 6877 abi_ulong sc_sar; 6878 }; 6879 6880 struct target_ucontext { 6881 abi_uint tuc_flags; 6882 abi_ulong tuc_link; 6883 target_stack_t tuc_stack; 6884 abi_uint pad[1]; 6885 struct target_sigcontext tuc_mcontext; 6886 target_sigset_t tuc_sigmask; 6887 }; 6888 6889 struct target_rt_sigframe { 6890 abi_uint tramp[9]; 6891 target_siginfo_t info; 6892 struct target_ucontext uc; 6893 /* hidden location of upper halves of pa2.0 64-bit gregs */ 6894 }; 6895 6896 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env) 6897 { 6898 int flags = 0; 6899 int i; 6900 6901 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */ 6902 6903 if (env->iaoq_f < TARGET_PAGE_SIZE) { 6904 /* In the gateway page, executing a syscall. */ 6905 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */ 6906 __put_user(env->gr[31], &sc->sc_iaoq[0]); 6907 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]); 6908 } else { 6909 __put_user(env->iaoq_f, &sc->sc_iaoq[0]); 6910 __put_user(env->iaoq_b, &sc->sc_iaoq[1]); 6911 } 6912 __put_user(0, &sc->sc_iasq[0]); 6913 __put_user(0, &sc->sc_iasq[1]); 6914 __put_user(flags, &sc->sc_flags); 6915 6916 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]); 6917 for (i = 1; i < 32; ++i) { 6918 __put_user(env->gr[i], &sc->sc_gr[i]); 6919 } 6920 6921 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]); 6922 for (i = 1; i < 32; ++i) { 6923 __put_user(env->fr[i], &sc->sc_fr[i]); 6924 } 6925 6926 __put_user(env->cr[CR_SAR], &sc->sc_sar); 6927 } 6928 6929 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc) 6930 { 6931 target_ulong psw; 6932 int i; 6933 6934 __get_user(psw, &sc->sc_gr[0]); 6935 cpu_hppa_put_psw(env, psw); 6936 6937 for (i = 1; i < 32; ++i) { 6938 __get_user(env->gr[i], &sc->sc_gr[i]); 6939 } 6940 for (i = 0; i < 32; ++i) { 6941 __get_user(env->fr[i], &sc->sc_fr[i]); 6942 } 6943 cpu_hppa_loaded_fr0(env); 6944 6945 __get_user(env->iaoq_f, &sc->sc_iaoq[0]); 6946 __get_user(env->iaoq_b, &sc->sc_iaoq[1]); 6947 __get_user(env->cr[CR_SAR], &sc->sc_sar); 6948 } 6949 6950 /* No, this doesn't look right, but it's copied straight from the kernel. */ 6951 #define PARISC_RT_SIGFRAME_SIZE32 \ 6952 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64) 6953 6954 static void setup_rt_frame(int sig, struct target_sigaction *ka, 6955 target_siginfo_t *info, 6956 target_sigset_t *set, CPUArchState *env) 6957 { 6958 abi_ulong frame_addr, sp, haddr; 6959 struct target_rt_sigframe *frame; 6960 int i; 6961 6962 sp = env->gr[30]; 6963 if (ka->sa_flags & TARGET_SA_ONSTACK) { 6964 if (sas_ss_flags(sp) == 0) { 6965 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f; 6966 } 6967 } 6968 frame_addr = QEMU_ALIGN_UP(sp, 64); 6969 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32; 6970 6971 trace_user_setup_rt_frame(env, frame_addr); 6972 6973 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 6974 goto give_sigsegv; 6975 } 6976 6977 tswap_siginfo(&frame->info, info); 6978 frame->uc.tuc_flags = 0; 6979 frame->uc.tuc_link = 0; 6980 6981 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); 6982 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), 6983 &frame->uc.tuc_stack.ss_flags); 6984 __put_user(target_sigaltstack_used.ss_size, 6985 &frame->uc.tuc_stack.ss_size); 6986 6987 for (i = 0; i < TARGET_NSIG_WORDS; i++) { 6988 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 6989 } 6990 6991 setup_sigcontext(&frame->uc.tuc_mcontext, env); 6992 6993 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */ 6994 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */ 6995 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */ 6996 __put_user(0x08000240, frame->tramp + 3); /* nop */ 6997 6998 unlock_user_struct(frame, frame_addr, 1); 6999 7000 env->gr[2] = h2g(frame->tramp); 7001 env->gr[30] = sp; 7002 env->gr[26] = sig; 7003 env->gr[25] = h2g(&frame->info); 7004 env->gr[24] = h2g(&frame->uc); 7005 7006 haddr = ka->_sa_handler; 7007 if (haddr & 2) { 7008 /* Function descriptor. */ 7009 target_ulong *fdesc, dest; 7010 7011 haddr &= -4; 7012 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) { 7013 goto give_sigsegv; 7014 } 7015 __get_user(dest, fdesc); 7016 __get_user(env->gr[19], fdesc + 1); 7017 unlock_user_struct(fdesc, haddr, 1); 7018 haddr = dest; 7019 } 7020 env->iaoq_f = haddr; 7021 env->iaoq_b = haddr + 4; 7022 return; 7023 7024 give_sigsegv: 7025 force_sigsegv(sig); 7026 } 7027 7028 long do_rt_sigreturn(CPUArchState *env) 7029 { 7030 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32; 7031 struct target_rt_sigframe *frame; 7032 sigset_t set; 7033 7034 trace_user_do_rt_sigreturn(env, frame_addr); 7035 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 7036 goto badframe; 7037 } 7038 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 7039 set_sigmask(&set); 7040 7041 restore_sigcontext(env, &frame->uc.tuc_mcontext); 7042 unlock_user_struct(frame, frame_addr, 0); 7043 7044 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, 7045 uc.tuc_stack), 7046 0, env->gr[30]) == -EFAULT) { 7047 goto badframe; 7048 } 7049 7050 unlock_user_struct(frame, frame_addr, 0); 7051 return -TARGET_QEMU_ESIGRETURN; 7052 7053 badframe: 7054 force_sig(TARGET_SIGSEGV); 7055 return -TARGET_QEMU_ESIGRETURN; 7056 } 7057 7058 #elif defined(TARGET_XTENSA) 7059 7060 struct target_sigcontext { 7061 abi_ulong sc_pc; 7062 abi_ulong sc_ps; 7063 abi_ulong sc_lbeg; 7064 abi_ulong sc_lend; 7065 abi_ulong sc_lcount; 7066 abi_ulong sc_sar; 7067 abi_ulong sc_acclo; 7068 abi_ulong sc_acchi; 7069 abi_ulong sc_a[16]; 7070 abi_ulong sc_xtregs; 7071 }; 7072 7073 struct target_ucontext { 7074 abi_ulong tuc_flags; 7075 abi_ulong tuc_link; 7076 target_stack_t tuc_stack; 7077 struct target_sigcontext tuc_mcontext; 7078 target_sigset_t tuc_sigmask; 7079 }; 7080 7081 struct target_rt_sigframe { 7082 target_siginfo_t info; 7083 struct target_ucontext uc; 7084 /* TODO: xtregs */ 7085 uint8_t retcode[6]; 7086 abi_ulong window[4]; 7087 }; 7088 7089 static abi_ulong get_sigframe(struct target_sigaction *sa, 7090 CPUXtensaState *env, 7091 unsigned long framesize) 7092 { 7093 abi_ulong sp = env->regs[1]; 7094 7095 /* This is the X/Open sanctioned signal stack switching. */ 7096 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) { 7097 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; 7098 } 7099 return (sp - framesize) & -16; 7100 } 7101 7102 static int flush_window_regs(CPUXtensaState *env) 7103 { 7104 uint32_t wb = env->sregs[WINDOW_BASE]; 7105 uint32_t ws = xtensa_replicate_windowstart(env) >> (wb + 1); 7106 unsigned d = ctz32(ws) + 1; 7107 unsigned i; 7108 int ret = 0; 7109 7110 for (i = d; i < env->config->nareg / 4; i += d) { 7111 uint32_t ssp, osp; 7112 unsigned j; 7113 7114 ws >>= d; 7115 xtensa_rotate_window(env, d); 7116 7117 if (ws & 0x1) { 7118 ssp = env->regs[5]; 7119 d = 1; 7120 } else if (ws & 0x2) { 7121 ssp = env->regs[9]; 7122 ret |= get_user_ual(osp, env->regs[1] - 12); 7123 osp -= 32; 7124 d = 2; 7125 } else if (ws & 0x4) { 7126 ssp = env->regs[13]; 7127 ret |= get_user_ual(osp, env->regs[1] - 12); 7128 osp -= 48; 7129 d = 3; 7130 } else { 7131 g_assert_not_reached(); 7132 } 7133 7134 for (j = 0; j < 4; ++j) { 7135 ret |= put_user_ual(env->regs[j], ssp - 16 + j * 4); 7136 } 7137 for (j = 4; j < d * 4; ++j) { 7138 ret |= put_user_ual(env->regs[j], osp - 16 + j * 4); 7139 } 7140 } 7141 xtensa_rotate_window(env, d); 7142 g_assert(env->sregs[WINDOW_BASE] == wb); 7143 return ret == 0; 7144 } 7145 7146 static int setup_sigcontext(struct target_rt_sigframe *frame, 7147 CPUXtensaState *env) 7148 { 7149 struct target_sigcontext *sc = &frame->uc.tuc_mcontext; 7150 int i; 7151 7152 __put_user(env->pc, &sc->sc_pc); 7153 __put_user(env->sregs[PS], &sc->sc_ps); 7154 __put_user(env->sregs[LBEG], &sc->sc_lbeg); 7155 __put_user(env->sregs[LEND], &sc->sc_lend); 7156 __put_user(env->sregs[LCOUNT], &sc->sc_lcount); 7157 if (!flush_window_regs(env)) { 7158 return 0; 7159 } 7160 for (i = 0; i < 16; ++i) { 7161 __put_user(env->regs[i], sc->sc_a + i); 7162 } 7163 __put_user(0, &sc->sc_xtregs); 7164 /* TODO: xtregs */ 7165 return 1; 7166 } 7167 7168 static void setup_rt_frame(int sig, struct target_sigaction *ka, 7169 target_siginfo_t *info, 7170 target_sigset_t *set, CPUXtensaState *env) 7171 { 7172 abi_ulong frame_addr; 7173 struct target_rt_sigframe *frame; 7174 uint32_t ra; 7175 int i; 7176 7177 frame_addr = get_sigframe(ka, env, sizeof(*frame)); 7178 trace_user_setup_rt_frame(env, frame_addr); 7179 7180 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { 7181 goto give_sigsegv; 7182 } 7183 7184 if (ka->sa_flags & SA_SIGINFO) { 7185 tswap_siginfo(&frame->info, info); 7186 } 7187 7188 __put_user(0, &frame->uc.tuc_flags); 7189 __put_user(0, &frame->uc.tuc_link); 7190 __put_user(target_sigaltstack_used.ss_sp, 7191 &frame->uc.tuc_stack.ss_sp); 7192 __put_user(sas_ss_flags(env->regs[1]), 7193 &frame->uc.tuc_stack.ss_flags); 7194 __put_user(target_sigaltstack_used.ss_size, 7195 &frame->uc.tuc_stack.ss_size); 7196 if (!setup_sigcontext(frame, env)) { 7197 unlock_user_struct(frame, frame_addr, 0); 7198 goto give_sigsegv; 7199 } 7200 for (i = 0; i < TARGET_NSIG_WORDS; ++i) { 7201 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); 7202 } 7203 7204 if (ka->sa_flags & TARGET_SA_RESTORER) { 7205 ra = ka->sa_restorer; 7206 } else { 7207 ra = frame_addr + offsetof(struct target_rt_sigframe, retcode); 7208 #ifdef TARGET_WORDS_BIGENDIAN 7209 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ 7210 __put_user(0x22, &frame->retcode[0]); 7211 __put_user(0x0a, &frame->retcode[1]); 7212 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]); 7213 /* Generate instruction: SYSCALL */ 7214 __put_user(0x00, &frame->retcode[3]); 7215 __put_user(0x05, &frame->retcode[4]); 7216 __put_user(0x00, &frame->retcode[5]); 7217 #else 7218 /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ 7219 __put_user(0x22, &frame->retcode[0]); 7220 __put_user(0xa0, &frame->retcode[1]); 7221 __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]); 7222 /* Generate instruction: SYSCALL */ 7223 __put_user(0x00, &frame->retcode[3]); 7224 __put_user(0x50, &frame->retcode[4]); 7225 __put_user(0x00, &frame->retcode[5]); 7226 #endif 7227 } 7228 env->sregs[PS] = PS_UM | (3 << PS_RING_SHIFT); 7229 if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER)) { 7230 env->sregs[PS] |= PS_WOE | (1 << PS_CALLINC_SHIFT); 7231 } 7232 memset(env->regs, 0, sizeof(env->regs)); 7233 env->pc = ka->_sa_handler; 7234 env->regs[1] = frame_addr; 7235 env->sregs[WINDOW_BASE] = 0; 7236 env->sregs[WINDOW_START] = 1; 7237 7238 env->regs[4] = (ra & 0x3fffffff) | 0x40000000; 7239 env->regs[6] = sig; 7240 env->regs[7] = frame_addr + offsetof(struct target_rt_sigframe, info); 7241 env->regs[8] = frame_addr + offsetof(struct target_rt_sigframe, uc); 7242 unlock_user_struct(frame, frame_addr, 1); 7243 return; 7244 7245 give_sigsegv: 7246 force_sigsegv(sig); 7247 return; 7248 } 7249 7250 static void restore_sigcontext(CPUXtensaState *env, 7251 struct target_rt_sigframe *frame) 7252 { 7253 struct target_sigcontext *sc = &frame->uc.tuc_mcontext; 7254 uint32_t ps; 7255 int i; 7256 7257 __get_user(env->pc, &sc->sc_pc); 7258 __get_user(ps, &sc->sc_ps); 7259 __get_user(env->sregs[LBEG], &sc->sc_lbeg); 7260 __get_user(env->sregs[LEND], &sc->sc_lend); 7261 __get_user(env->sregs[LCOUNT], &sc->sc_lcount); 7262 7263 env->sregs[WINDOW_BASE] = 0; 7264 env->sregs[WINDOW_START] = 1; 7265 env->sregs[PS] = deposit32(env->sregs[PS], 7266 PS_CALLINC_SHIFT, 7267 PS_CALLINC_LEN, 7268 extract32(ps, PS_CALLINC_SHIFT, 7269 PS_CALLINC_LEN)); 7270 for (i = 0; i < 16; ++i) { 7271 __get_user(env->regs[i], sc->sc_a + i); 7272 } 7273 /* TODO: xtregs */ 7274 } 7275 7276 long do_rt_sigreturn(CPUXtensaState *env) 7277 { 7278 abi_ulong frame_addr = env->regs[1]; 7279 struct target_rt_sigframe *frame; 7280 sigset_t set; 7281 7282 trace_user_do_rt_sigreturn(env, frame_addr); 7283 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { 7284 goto badframe; 7285 } 7286 target_to_host_sigset(&set, &frame->uc.tuc_sigmask); 7287 set_sigmask(&set); 7288 7289 restore_sigcontext(env, frame); 7290 7291 if (do_sigaltstack(frame_addr + 7292 offsetof(struct target_rt_sigframe, uc.tuc_stack), 7293 0, get_sp_from_cpustate(env)) == -TARGET_EFAULT) { 7294 goto badframe; 7295 } 7296 unlock_user_struct(frame, frame_addr, 0); 7297 return -TARGET_QEMU_ESIGRETURN; 7298 7299 badframe: 7300 unlock_user_struct(frame, frame_addr, 0); 7301 force_sig(TARGET_SIGSEGV); 7302 return -TARGET_QEMU_ESIGRETURN; 7303 } 7304 7305 #else 7306 #error Target needs to add support for signal handling 7307 #endif 7308 7309 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 7310 struct emulated_sigtable *k) 7311 { 7312 CPUState *cpu = ENV_GET_CPU(cpu_env); 7313 abi_ulong handler; 7314 sigset_t set; 7315 target_sigset_t target_old_set; 7316 struct target_sigaction *sa; 7317 TaskState *ts = cpu->opaque; 7318 7319 trace_user_handle_signal(cpu_env, sig); 7320 /* dequeue signal */ 7321 k->pending = 0; 7322 7323 sig = gdb_handlesig(cpu, sig); 7324 if (!sig) { 7325 sa = NULL; 7326 handler = TARGET_SIG_IGN; 7327 } else { 7328 sa = &sigact_table[sig - 1]; 7329 handler = sa->_sa_handler; 7330 } 7331 7332 if (do_strace) { 7333 print_taken_signal(sig, &k->info); 7334 } 7335 7336 if (handler == TARGET_SIG_DFL) { 7337 /* default handler : ignore some signal. The other are job control or fatal */ 7338 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 7339 kill(getpid(),SIGSTOP); 7340 } else if (sig != TARGET_SIGCHLD && 7341 sig != TARGET_SIGURG && 7342 sig != TARGET_SIGWINCH && 7343 sig != TARGET_SIGCONT) { 7344 dump_core_and_abort(sig); 7345 } 7346 } else if (handler == TARGET_SIG_IGN) { 7347 /* ignore sig */ 7348 } else if (handler == TARGET_SIG_ERR) { 7349 dump_core_and_abort(sig); 7350 } else { 7351 /* compute the blocked signals during the handler execution */ 7352 sigset_t *blocked_set; 7353 7354 target_to_host_sigset(&set, &sa->sa_mask); 7355 /* SA_NODEFER indicates that the current signal should not be 7356 blocked during the handler */ 7357 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 7358 sigaddset(&set, target_to_host_signal(sig)); 7359 7360 /* save the previous blocked signal state to restore it at the 7361 end of the signal execution (see do_sigreturn) */ 7362 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 7363 7364 /* block signals in the handler */ 7365 blocked_set = ts->in_sigsuspend ? 7366 &ts->sigsuspend_mask : &ts->signal_mask; 7367 sigorset(&ts->signal_mask, blocked_set, &set); 7368 ts->in_sigsuspend = 0; 7369 7370 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 7371 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 7372 { 7373 CPUX86State *env = cpu_env; 7374 if (env->eflags & VM_MASK) 7375 save_v86_state(env); 7376 } 7377 #endif 7378 /* prepare the stack frame of the virtual CPU */ 7379 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ 7380 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \ 7381 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \ 7382 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \ 7383 || defined(TARGET_RISCV) || defined(TARGET_XTENSA) 7384 /* These targets do not have traditional signals. */ 7385 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 7386 #else 7387 if (sa->sa_flags & TARGET_SA_SIGINFO) 7388 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 7389 else 7390 setup_frame(sig, sa, &target_old_set, cpu_env); 7391 #endif 7392 if (sa->sa_flags & TARGET_SA_RESETHAND) { 7393 sa->_sa_handler = TARGET_SIG_DFL; 7394 } 7395 } 7396 } 7397 7398 void process_pending_signals(CPUArchState *cpu_env) 7399 { 7400 CPUState *cpu = ENV_GET_CPU(cpu_env); 7401 int sig; 7402 TaskState *ts = cpu->opaque; 7403 sigset_t set; 7404 sigset_t *blocked_set; 7405 7406 while (atomic_read(&ts->signal_pending)) { 7407 /* FIXME: This is not threadsafe. */ 7408 sigfillset(&set); 7409 sigprocmask(SIG_SETMASK, &set, 0); 7410 7411 restart_scan: 7412 sig = ts->sync_signal.pending; 7413 if (sig) { 7414 /* Synchronous signals are forced, 7415 * see force_sig_info() and callers in Linux 7416 * Note that not all of our queue_signal() calls in QEMU correspond 7417 * to force_sig_info() calls in Linux (some are send_sig_info()). 7418 * However it seems like a kernel bug to me to allow the process 7419 * to block a synchronous signal since it could then just end up 7420 * looping round and round indefinitely. 7421 */ 7422 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 7423 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 7424 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 7425 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 7426 } 7427 7428 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 7429 } 7430 7431 for (sig = 1; sig <= TARGET_NSIG; sig++) { 7432 blocked_set = ts->in_sigsuspend ? 7433 &ts->sigsuspend_mask : &ts->signal_mask; 7434 7435 if (ts->sigtab[sig - 1].pending && 7436 (!sigismember(blocked_set, 7437 target_to_host_signal_table[sig]))) { 7438 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 7439 /* Restart scan from the beginning, as handle_pending_signal 7440 * might have resulted in a new synchronous signal (eg SIGSEGV). 7441 */ 7442 goto restart_scan; 7443 } 7444 } 7445 7446 /* if no signal is pending, unblock signals and recheck (the act 7447 * of unblocking might cause us to take another host signal which 7448 * will set signal_pending again). 7449 */ 7450 atomic_set(&ts->signal_pending, 0); 7451 ts->in_sigsuspend = 0; 7452 set = ts->signal_mask; 7453 sigdelset(&set, SIGSEGV); 7454 sigdelset(&set, SIGBUS); 7455 sigprocmask(SIG_SETMASK, &set, 0); 7456 } 7457 ts->in_sigsuspend = 0; 7458 } 7459