1 /* 2 * Emulation of Linux signals 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include <sys/ucontext.h> 22 #include <sys/resource.h> 23 24 #include "qemu.h" 25 #include "user-internals.h" 26 #include "strace.h" 27 #include "loader.h" 28 #include "trace.h" 29 #include "signal-common.h" 30 31 static struct target_sigaction sigact_table[TARGET_NSIG]; 32 33 static void host_signal_handler(int host_signum, siginfo_t *info, 34 void *puc); 35 36 37 /* 38 * System includes define _NSIG as SIGRTMAX + 1, 39 * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX 40 * and the first signal is SIGHUP defined as 1 41 * Signal number 0 is reserved for use as kill(pid, 0), to test whether 42 * a process exists without sending it a signal. 43 */ 44 #ifdef __SIGRTMAX 45 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG); 46 #endif 47 static uint8_t host_to_target_signal_table[_NSIG] = { 48 [SIGHUP] = TARGET_SIGHUP, 49 [SIGINT] = TARGET_SIGINT, 50 [SIGQUIT] = TARGET_SIGQUIT, 51 [SIGILL] = TARGET_SIGILL, 52 [SIGTRAP] = TARGET_SIGTRAP, 53 [SIGABRT] = TARGET_SIGABRT, 54 /* [SIGIOT] = TARGET_SIGIOT,*/ 55 [SIGBUS] = TARGET_SIGBUS, 56 [SIGFPE] = TARGET_SIGFPE, 57 [SIGKILL] = TARGET_SIGKILL, 58 [SIGUSR1] = TARGET_SIGUSR1, 59 [SIGSEGV] = TARGET_SIGSEGV, 60 [SIGUSR2] = TARGET_SIGUSR2, 61 [SIGPIPE] = TARGET_SIGPIPE, 62 [SIGALRM] = TARGET_SIGALRM, 63 [SIGTERM] = TARGET_SIGTERM, 64 #ifdef SIGSTKFLT 65 [SIGSTKFLT] = TARGET_SIGSTKFLT, 66 #endif 67 [SIGCHLD] = TARGET_SIGCHLD, 68 [SIGCONT] = TARGET_SIGCONT, 69 [SIGSTOP] = TARGET_SIGSTOP, 70 [SIGTSTP] = TARGET_SIGTSTP, 71 [SIGTTIN] = TARGET_SIGTTIN, 72 [SIGTTOU] = TARGET_SIGTTOU, 73 [SIGURG] = TARGET_SIGURG, 74 [SIGXCPU] = TARGET_SIGXCPU, 75 [SIGXFSZ] = TARGET_SIGXFSZ, 76 [SIGVTALRM] = TARGET_SIGVTALRM, 77 [SIGPROF] = TARGET_SIGPROF, 78 [SIGWINCH] = TARGET_SIGWINCH, 79 [SIGIO] = TARGET_SIGIO, 80 [SIGPWR] = TARGET_SIGPWR, 81 [SIGSYS] = TARGET_SIGSYS, 82 /* next signals stay the same */ 83 }; 84 85 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1]; 86 87 /* valid sig is between 1 and _NSIG - 1 */ 88 int host_to_target_signal(int sig) 89 { 90 if (sig < 1 || sig >= _NSIG) { 91 return sig; 92 } 93 return host_to_target_signal_table[sig]; 94 } 95 96 /* valid sig is between 1 and TARGET_NSIG */ 97 int target_to_host_signal(int sig) 98 { 99 if (sig < 1 || sig > TARGET_NSIG) { 100 return sig; 101 } 102 return target_to_host_signal_table[sig]; 103 } 104 105 static inline void target_sigaddset(target_sigset_t *set, int signum) 106 { 107 signum--; 108 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 109 set->sig[signum / TARGET_NSIG_BPW] |= mask; 110 } 111 112 static inline int target_sigismember(const target_sigset_t *set, int signum) 113 { 114 signum--; 115 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW); 116 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0); 117 } 118 119 void host_to_target_sigset_internal(target_sigset_t *d, 120 const sigset_t *s) 121 { 122 int host_sig, target_sig; 123 target_sigemptyset(d); 124 for (host_sig = 1; host_sig < _NSIG; host_sig++) { 125 target_sig = host_to_target_signal(host_sig); 126 if (target_sig < 1 || target_sig > TARGET_NSIG) { 127 continue; 128 } 129 if (sigismember(s, host_sig)) { 130 target_sigaddset(d, target_sig); 131 } 132 } 133 } 134 135 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s) 136 { 137 target_sigset_t d1; 138 int i; 139 140 host_to_target_sigset_internal(&d1, s); 141 for(i = 0;i < TARGET_NSIG_WORDS; i++) 142 d->sig[i] = tswapal(d1.sig[i]); 143 } 144 145 void target_to_host_sigset_internal(sigset_t *d, 146 const target_sigset_t *s) 147 { 148 int host_sig, target_sig; 149 sigemptyset(d); 150 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) { 151 host_sig = target_to_host_signal(target_sig); 152 if (host_sig < 1 || host_sig >= _NSIG) { 153 continue; 154 } 155 if (target_sigismember(s, target_sig)) { 156 sigaddset(d, host_sig); 157 } 158 } 159 } 160 161 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s) 162 { 163 target_sigset_t s1; 164 int i; 165 166 for(i = 0;i < TARGET_NSIG_WORDS; i++) 167 s1.sig[i] = tswapal(s->sig[i]); 168 target_to_host_sigset_internal(d, &s1); 169 } 170 171 void host_to_target_old_sigset(abi_ulong *old_sigset, 172 const sigset_t *sigset) 173 { 174 target_sigset_t d; 175 host_to_target_sigset(&d, sigset); 176 *old_sigset = d.sig[0]; 177 } 178 179 void target_to_host_old_sigset(sigset_t *sigset, 180 const abi_ulong *old_sigset) 181 { 182 target_sigset_t d; 183 int i; 184 185 d.sig[0] = *old_sigset; 186 for(i = 1;i < TARGET_NSIG_WORDS; i++) 187 d.sig[i] = 0; 188 target_to_host_sigset(sigset, &d); 189 } 190 191 int block_signals(void) 192 { 193 TaskState *ts = (TaskState *)thread_cpu->opaque; 194 sigset_t set; 195 196 /* It's OK to block everything including SIGSEGV, because we won't 197 * run any further guest code before unblocking signals in 198 * process_pending_signals(). 199 */ 200 sigfillset(&set); 201 sigprocmask(SIG_SETMASK, &set, 0); 202 203 return qatomic_xchg(&ts->signal_pending, 1); 204 } 205 206 /* Wrapper for sigprocmask function 207 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset 208 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if 209 * a signal was already pending and the syscall must be restarted, or 210 * 0 on success. 211 * If set is NULL, this is guaranteed not to fail. 212 */ 213 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) 214 { 215 TaskState *ts = (TaskState *)thread_cpu->opaque; 216 217 if (oldset) { 218 *oldset = ts->signal_mask; 219 } 220 221 if (set) { 222 int i; 223 224 if (block_signals()) { 225 return -TARGET_ERESTARTSYS; 226 } 227 228 switch (how) { 229 case SIG_BLOCK: 230 sigorset(&ts->signal_mask, &ts->signal_mask, set); 231 break; 232 case SIG_UNBLOCK: 233 for (i = 1; i <= NSIG; ++i) { 234 if (sigismember(set, i)) { 235 sigdelset(&ts->signal_mask, i); 236 } 237 } 238 break; 239 case SIG_SETMASK: 240 ts->signal_mask = *set; 241 break; 242 default: 243 g_assert_not_reached(); 244 } 245 246 /* Silently ignore attempts to change blocking status of KILL or STOP */ 247 sigdelset(&ts->signal_mask, SIGKILL); 248 sigdelset(&ts->signal_mask, SIGSTOP); 249 } 250 return 0; 251 } 252 253 #if !defined(TARGET_NIOS2) 254 /* Just set the guest's signal mask to the specified value; the 255 * caller is assumed to have called block_signals() already. 256 */ 257 void set_sigmask(const sigset_t *set) 258 { 259 TaskState *ts = (TaskState *)thread_cpu->opaque; 260 261 ts->signal_mask = *set; 262 } 263 #endif 264 265 /* sigaltstack management */ 266 267 int on_sig_stack(unsigned long sp) 268 { 269 TaskState *ts = (TaskState *)thread_cpu->opaque; 270 271 return (sp - ts->sigaltstack_used.ss_sp 272 < ts->sigaltstack_used.ss_size); 273 } 274 275 int sas_ss_flags(unsigned long sp) 276 { 277 TaskState *ts = (TaskState *)thread_cpu->opaque; 278 279 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE 280 : on_sig_stack(sp) ? SS_ONSTACK : 0); 281 } 282 283 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka) 284 { 285 /* 286 * This is the X/Open sanctioned signal stack switching. 287 */ 288 TaskState *ts = (TaskState *)thread_cpu->opaque; 289 290 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) { 291 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size; 292 } 293 return sp; 294 } 295 296 void target_save_altstack(target_stack_t *uss, CPUArchState *env) 297 { 298 TaskState *ts = (TaskState *)thread_cpu->opaque; 299 300 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp); 301 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags); 302 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size); 303 } 304 305 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env) 306 { 307 TaskState *ts = (TaskState *)thread_cpu->opaque; 308 size_t minstacksize = TARGET_MINSIGSTKSZ; 309 target_stack_t ss; 310 311 #if defined(TARGET_PPC64) 312 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */ 313 struct image_info *image = ts->info; 314 if (get_ppc64_abi(image) > 1) { 315 minstacksize = 4096; 316 } 317 #endif 318 319 __get_user(ss.ss_sp, &uss->ss_sp); 320 __get_user(ss.ss_size, &uss->ss_size); 321 __get_user(ss.ss_flags, &uss->ss_flags); 322 323 if (on_sig_stack(get_sp_from_cpustate(env))) { 324 return -TARGET_EPERM; 325 } 326 327 switch (ss.ss_flags) { 328 default: 329 return -TARGET_EINVAL; 330 331 case TARGET_SS_DISABLE: 332 ss.ss_size = 0; 333 ss.ss_sp = 0; 334 break; 335 336 case TARGET_SS_ONSTACK: 337 case 0: 338 if (ss.ss_size < minstacksize) { 339 return -TARGET_ENOMEM; 340 } 341 break; 342 } 343 344 ts->sigaltstack_used.ss_sp = ss.ss_sp; 345 ts->sigaltstack_used.ss_size = ss.ss_size; 346 return 0; 347 } 348 349 /* siginfo conversion */ 350 351 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, 352 const siginfo_t *info) 353 { 354 int sig = host_to_target_signal(info->si_signo); 355 int si_code = info->si_code; 356 int si_type; 357 tinfo->si_signo = sig; 358 tinfo->si_errno = 0; 359 tinfo->si_code = info->si_code; 360 361 /* This memset serves two purposes: 362 * (1) ensure we don't leak random junk to the guest later 363 * (2) placate false positives from gcc about fields 364 * being used uninitialized if it chooses to inline both this 365 * function and tswap_siginfo() into host_to_target_siginfo(). 366 */ 367 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad)); 368 369 /* This is awkward, because we have to use a combination of 370 * the si_code and si_signo to figure out which of the union's 371 * members are valid. (Within the host kernel it is always possible 372 * to tell, but the kernel carefully avoids giving userspace the 373 * high 16 bits of si_code, so we don't have the information to 374 * do this the easy way...) We therefore make our best guess, 375 * bearing in mind that a guest can spoof most of the si_codes 376 * via rt_sigqueueinfo() if it likes. 377 * 378 * Once we have made our guess, we record it in the top 16 bits of 379 * the si_code, so that tswap_siginfo() later can use it. 380 * tswap_siginfo() will strip these top bits out before writing 381 * si_code to the guest (sign-extending the lower bits). 382 */ 383 384 switch (si_code) { 385 case SI_USER: 386 case SI_TKILL: 387 case SI_KERNEL: 388 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel. 389 * These are the only unspoofable si_code values. 390 */ 391 tinfo->_sifields._kill._pid = info->si_pid; 392 tinfo->_sifields._kill._uid = info->si_uid; 393 si_type = QEMU_SI_KILL; 394 break; 395 default: 396 /* Everything else is spoofable. Make best guess based on signal */ 397 switch (sig) { 398 case TARGET_SIGCHLD: 399 tinfo->_sifields._sigchld._pid = info->si_pid; 400 tinfo->_sifields._sigchld._uid = info->si_uid; 401 tinfo->_sifields._sigchld._status = info->si_status; 402 tinfo->_sifields._sigchld._utime = info->si_utime; 403 tinfo->_sifields._sigchld._stime = info->si_stime; 404 si_type = QEMU_SI_CHLD; 405 break; 406 case TARGET_SIGIO: 407 tinfo->_sifields._sigpoll._band = info->si_band; 408 tinfo->_sifields._sigpoll._fd = info->si_fd; 409 si_type = QEMU_SI_POLL; 410 break; 411 default: 412 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */ 413 tinfo->_sifields._rt._pid = info->si_pid; 414 tinfo->_sifields._rt._uid = info->si_uid; 415 /* XXX: potential problem if 64 bit */ 416 tinfo->_sifields._rt._sigval.sival_ptr 417 = (abi_ulong)(unsigned long)info->si_value.sival_ptr; 418 si_type = QEMU_SI_RT; 419 break; 420 } 421 break; 422 } 423 424 tinfo->si_code = deposit32(si_code, 16, 16, si_type); 425 } 426 427 void tswap_siginfo(target_siginfo_t *tinfo, 428 const target_siginfo_t *info) 429 { 430 int si_type = extract32(info->si_code, 16, 16); 431 int si_code = sextract32(info->si_code, 0, 16); 432 433 __put_user(info->si_signo, &tinfo->si_signo); 434 __put_user(info->si_errno, &tinfo->si_errno); 435 __put_user(si_code, &tinfo->si_code); 436 437 /* We can use our internal marker of which fields in the structure 438 * are valid, rather than duplicating the guesswork of 439 * host_to_target_siginfo_noswap() here. 440 */ 441 switch (si_type) { 442 case QEMU_SI_KILL: 443 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid); 444 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid); 445 break; 446 case QEMU_SI_TIMER: 447 __put_user(info->_sifields._timer._timer1, 448 &tinfo->_sifields._timer._timer1); 449 __put_user(info->_sifields._timer._timer2, 450 &tinfo->_sifields._timer._timer2); 451 break; 452 case QEMU_SI_POLL: 453 __put_user(info->_sifields._sigpoll._band, 454 &tinfo->_sifields._sigpoll._band); 455 __put_user(info->_sifields._sigpoll._fd, 456 &tinfo->_sifields._sigpoll._fd); 457 break; 458 case QEMU_SI_FAULT: 459 __put_user(info->_sifields._sigfault._addr, 460 &tinfo->_sifields._sigfault._addr); 461 break; 462 case QEMU_SI_CHLD: 463 __put_user(info->_sifields._sigchld._pid, 464 &tinfo->_sifields._sigchld._pid); 465 __put_user(info->_sifields._sigchld._uid, 466 &tinfo->_sifields._sigchld._uid); 467 __put_user(info->_sifields._sigchld._status, 468 &tinfo->_sifields._sigchld._status); 469 __put_user(info->_sifields._sigchld._utime, 470 &tinfo->_sifields._sigchld._utime); 471 __put_user(info->_sifields._sigchld._stime, 472 &tinfo->_sifields._sigchld._stime); 473 break; 474 case QEMU_SI_RT: 475 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid); 476 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid); 477 __put_user(info->_sifields._rt._sigval.sival_ptr, 478 &tinfo->_sifields._rt._sigval.sival_ptr); 479 break; 480 default: 481 g_assert_not_reached(); 482 } 483 } 484 485 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) 486 { 487 target_siginfo_t tgt_tmp; 488 host_to_target_siginfo_noswap(&tgt_tmp, info); 489 tswap_siginfo(tinfo, &tgt_tmp); 490 } 491 492 /* XXX: we support only POSIX RT signals are used. */ 493 /* XXX: find a solution for 64 bit (additional malloced data is needed) */ 494 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) 495 { 496 /* This conversion is used only for the rt_sigqueueinfo syscall, 497 * and so we know that the _rt fields are the valid ones. 498 */ 499 abi_ulong sival_ptr; 500 501 __get_user(info->si_signo, &tinfo->si_signo); 502 __get_user(info->si_errno, &tinfo->si_errno); 503 __get_user(info->si_code, &tinfo->si_code); 504 __get_user(info->si_pid, &tinfo->_sifields._rt._pid); 505 __get_user(info->si_uid, &tinfo->_sifields._rt._uid); 506 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr); 507 info->si_value.sival_ptr = (void *)(long)sival_ptr; 508 } 509 510 static int fatal_signal (int sig) 511 { 512 switch (sig) { 513 case TARGET_SIGCHLD: 514 case TARGET_SIGURG: 515 case TARGET_SIGWINCH: 516 /* Ignored by default. */ 517 return 0; 518 case TARGET_SIGCONT: 519 case TARGET_SIGSTOP: 520 case TARGET_SIGTSTP: 521 case TARGET_SIGTTIN: 522 case TARGET_SIGTTOU: 523 /* Job control signals. */ 524 return 0; 525 default: 526 return 1; 527 } 528 } 529 530 /* returns 1 if given signal should dump core if not handled */ 531 static int core_dump_signal(int sig) 532 { 533 switch (sig) { 534 case TARGET_SIGABRT: 535 case TARGET_SIGFPE: 536 case TARGET_SIGILL: 537 case TARGET_SIGQUIT: 538 case TARGET_SIGSEGV: 539 case TARGET_SIGTRAP: 540 case TARGET_SIGBUS: 541 return (1); 542 default: 543 return (0); 544 } 545 } 546 547 static void signal_table_init(void) 548 { 549 int host_sig, target_sig, count; 550 551 /* 552 * Signals are supported starting from TARGET_SIGRTMIN and going up 553 * until we run out of host realtime signals. 554 * glibc at least uses only the lower 2 rt signals and probably 555 * nobody's using the upper ones. 556 * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32) 557 * To fix this properly we need to do manual signal delivery multiplexed 558 * over a single host signal. 559 * Attempts for configure "missing" signals via sigaction will be 560 * silently ignored. 561 */ 562 for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) { 563 target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN; 564 if (target_sig <= TARGET_NSIG) { 565 host_to_target_signal_table[host_sig] = target_sig; 566 } 567 } 568 569 /* generate signal conversion tables */ 570 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) { 571 target_to_host_signal_table[target_sig] = _NSIG; /* poison */ 572 } 573 for (host_sig = 1; host_sig < _NSIG; host_sig++) { 574 if (host_to_target_signal_table[host_sig] == 0) { 575 host_to_target_signal_table[host_sig] = host_sig; 576 } 577 target_sig = host_to_target_signal_table[host_sig]; 578 if (target_sig <= TARGET_NSIG) { 579 target_to_host_signal_table[target_sig] = host_sig; 580 } 581 } 582 583 if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) { 584 for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) { 585 if (target_to_host_signal_table[target_sig] == _NSIG) { 586 count++; 587 } 588 } 589 trace_signal_table_init(count); 590 } 591 } 592 593 void signal_init(void) 594 { 595 TaskState *ts = (TaskState *)thread_cpu->opaque; 596 struct sigaction act; 597 struct sigaction oact; 598 int i; 599 int host_sig; 600 601 /* initialize signal conversion tables */ 602 signal_table_init(); 603 604 /* Set the signal mask from the host mask. */ 605 sigprocmask(0, 0, &ts->signal_mask); 606 607 sigfillset(&act.sa_mask); 608 act.sa_flags = SA_SIGINFO; 609 act.sa_sigaction = host_signal_handler; 610 for(i = 1; i <= TARGET_NSIG; i++) { 611 #ifdef CONFIG_GPROF 612 if (i == TARGET_SIGPROF) { 613 continue; 614 } 615 #endif 616 host_sig = target_to_host_signal(i); 617 sigaction(host_sig, NULL, &oact); 618 if (oact.sa_sigaction == (void *)SIG_IGN) { 619 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN; 620 } else if (oact.sa_sigaction == (void *)SIG_DFL) { 621 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL; 622 } 623 /* If there's already a handler installed then something has 624 gone horribly wrong, so don't even try to handle that case. */ 625 /* Install some handlers for our own use. We need at least 626 SIGSEGV and SIGBUS, to detect exceptions. We can not just 627 trap all signals because it affects syscall interrupt 628 behavior. But do trap all default-fatal signals. */ 629 if (fatal_signal (i)) 630 sigaction(host_sig, &act, NULL); 631 } 632 } 633 634 /* Force a synchronously taken signal. The kernel force_sig() function 635 * also forces the signal to "not blocked, not ignored", but for QEMU 636 * that work is done in process_pending_signals(). 637 */ 638 void force_sig(int sig) 639 { 640 CPUState *cpu = thread_cpu; 641 CPUArchState *env = cpu->env_ptr; 642 target_siginfo_t info; 643 644 info.si_signo = sig; 645 info.si_errno = 0; 646 info.si_code = TARGET_SI_KERNEL; 647 info._sifields._kill._pid = 0; 648 info._sifields._kill._uid = 0; 649 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); 650 } 651 652 /* Force a SIGSEGV if we couldn't write to memory trying to set 653 * up the signal frame. oldsig is the signal we were trying to handle 654 * at the point of failure. 655 */ 656 #if !defined(TARGET_RISCV) 657 void force_sigsegv(int oldsig) 658 { 659 if (oldsig == SIGSEGV) { 660 /* Make sure we don't try to deliver the signal again; this will 661 * end up with handle_pending_signal() calling dump_core_and_abort(). 662 */ 663 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL; 664 } 665 force_sig(TARGET_SIGSEGV); 666 } 667 668 #endif 669 670 /* abort execution with signal */ 671 static void QEMU_NORETURN dump_core_and_abort(int target_sig) 672 { 673 CPUState *cpu = thread_cpu; 674 CPUArchState *env = cpu->env_ptr; 675 TaskState *ts = (TaskState *)cpu->opaque; 676 int host_sig, core_dumped = 0; 677 struct sigaction act; 678 679 host_sig = target_to_host_signal(target_sig); 680 trace_user_force_sig(env, target_sig, host_sig); 681 gdb_signalled(env, target_sig); 682 683 /* dump core if supported by target binary format */ 684 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { 685 stop_all_tasks(); 686 core_dumped = 687 ((*ts->bprm->core_dump)(target_sig, env) == 0); 688 } 689 if (core_dumped) { 690 /* we already dumped the core of target process, we don't want 691 * a coredump of qemu itself */ 692 struct rlimit nodump; 693 getrlimit(RLIMIT_CORE, &nodump); 694 nodump.rlim_cur=0; 695 setrlimit(RLIMIT_CORE, &nodump); 696 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n", 697 target_sig, strsignal(host_sig), "core dumped" ); 698 } 699 700 /* The proper exit code for dying from an uncaught signal is 701 * -<signal>. The kernel doesn't allow exit() or _exit() to pass 702 * a negative value. To get the proper exit code we need to 703 * actually die from an uncaught signal. Here the default signal 704 * handler is installed, we send ourself a signal and we wait for 705 * it to arrive. */ 706 sigfillset(&act.sa_mask); 707 act.sa_handler = SIG_DFL; 708 act.sa_flags = 0; 709 sigaction(host_sig, &act, NULL); 710 711 /* For some reason raise(host_sig) doesn't send the signal when 712 * statically linked on x86-64. */ 713 kill(getpid(), host_sig); 714 715 /* Make sure the signal isn't masked (just reuse the mask inside 716 of act) */ 717 sigdelset(&act.sa_mask, host_sig); 718 sigsuspend(&act.sa_mask); 719 720 /* unreachable */ 721 abort(); 722 } 723 724 /* queue a signal so that it will be send to the virtual CPU as soon 725 as possible */ 726 int queue_signal(CPUArchState *env, int sig, int si_type, 727 target_siginfo_t *info) 728 { 729 CPUState *cpu = env_cpu(env); 730 TaskState *ts = cpu->opaque; 731 732 trace_user_queue_signal(env, sig); 733 734 info->si_code = deposit32(info->si_code, 16, 16, si_type); 735 736 ts->sync_signal.info = *info; 737 ts->sync_signal.pending = sig; 738 /* signal that a new signal is pending */ 739 qatomic_set(&ts->signal_pending, 1); 740 return 1; /* indicates that the signal was queued */ 741 } 742 743 #ifndef HAVE_SAFE_SYSCALL 744 static inline void rewind_if_in_safe_syscall(void *puc) 745 { 746 /* Default version: never rewind */ 747 } 748 #endif 749 750 static void host_signal_handler(int host_signum, siginfo_t *info, 751 void *puc) 752 { 753 CPUArchState *env = thread_cpu->env_ptr; 754 CPUState *cpu = env_cpu(env); 755 TaskState *ts = cpu->opaque; 756 757 int sig; 758 target_siginfo_t tinfo; 759 ucontext_t *uc = puc; 760 struct emulated_sigtable *k; 761 762 /* the CPU emulator uses some host signals to detect exceptions, 763 we forward to it some signals */ 764 if ((host_signum == SIGSEGV || host_signum == SIGBUS) 765 && info->si_code > 0) { 766 if (cpu_signal_handler(host_signum, info, puc)) 767 return; 768 } 769 770 /* get target signal number */ 771 sig = host_to_target_signal(host_signum); 772 if (sig < 1 || sig > TARGET_NSIG) 773 return; 774 trace_user_host_signal(env, host_signum, sig); 775 776 rewind_if_in_safe_syscall(puc); 777 778 host_to_target_siginfo_noswap(&tinfo, info); 779 k = &ts->sigtab[sig - 1]; 780 k->info = tinfo; 781 k->pending = sig; 782 ts->signal_pending = 1; 783 784 /* Block host signals until target signal handler entered. We 785 * can't block SIGSEGV or SIGBUS while we're executing guest 786 * code in case the guest code provokes one in the window between 787 * now and it getting out to the main loop. Signals will be 788 * unblocked again in process_pending_signals(). 789 * 790 * WARNING: we cannot use sigfillset() here because the uc_sigmask 791 * field is a kernel sigset_t, which is much smaller than the 792 * libc sigset_t which sigfillset() operates on. Using sigfillset() 793 * would write 0xff bytes off the end of the structure and trash 794 * data on the struct. 795 * We can't use sizeof(uc->uc_sigmask) either, because the libc 796 * headers define the struct field with the wrong (too large) type. 797 */ 798 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE); 799 sigdelset(&uc->uc_sigmask, SIGSEGV); 800 sigdelset(&uc->uc_sigmask, SIGBUS); 801 802 /* interrupt the virtual CPU as soon as possible */ 803 cpu_exit(thread_cpu); 804 } 805 806 /* do_sigaltstack() returns target values and errnos. */ 807 /* compare linux/kernel/signal.c:do_sigaltstack() */ 808 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, 809 CPUArchState *env) 810 { 811 target_stack_t oss, *uoss = NULL; 812 abi_long ret = -TARGET_EFAULT; 813 814 if (uoss_addr) { 815 /* Verify writability now, but do not alter user memory yet. */ 816 if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) { 817 goto out; 818 } 819 target_save_altstack(&oss, env); 820 } 821 822 if (uss_addr) { 823 target_stack_t *uss; 824 825 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) { 826 goto out; 827 } 828 ret = target_restore_altstack(uss, env); 829 if (ret) { 830 goto out; 831 } 832 } 833 834 if (uoss_addr) { 835 memcpy(uoss, &oss, sizeof(oss)); 836 unlock_user_struct(uoss, uoss_addr, 1); 837 uoss = NULL; 838 } 839 ret = 0; 840 841 out: 842 if (uoss) { 843 unlock_user_struct(uoss, uoss_addr, 0); 844 } 845 return ret; 846 } 847 848 /* do_sigaction() return target values and host errnos */ 849 int do_sigaction(int sig, const struct target_sigaction *act, 850 struct target_sigaction *oact, abi_ulong ka_restorer) 851 { 852 struct target_sigaction *k; 853 struct sigaction act1; 854 int host_sig; 855 int ret = 0; 856 857 trace_signal_do_sigaction_guest(sig, TARGET_NSIG); 858 859 if (sig < 1 || sig > TARGET_NSIG) { 860 return -TARGET_EINVAL; 861 } 862 863 if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) { 864 return -TARGET_EINVAL; 865 } 866 867 if (block_signals()) { 868 return -TARGET_ERESTARTSYS; 869 } 870 871 k = &sigact_table[sig - 1]; 872 if (oact) { 873 __put_user(k->_sa_handler, &oact->_sa_handler); 874 __put_user(k->sa_flags, &oact->sa_flags); 875 #ifdef TARGET_ARCH_HAS_SA_RESTORER 876 __put_user(k->sa_restorer, &oact->sa_restorer); 877 #endif 878 /* Not swapped. */ 879 oact->sa_mask = k->sa_mask; 880 } 881 if (act) { 882 /* FIXME: This is not threadsafe. */ 883 __get_user(k->_sa_handler, &act->_sa_handler); 884 __get_user(k->sa_flags, &act->sa_flags); 885 #ifdef TARGET_ARCH_HAS_SA_RESTORER 886 __get_user(k->sa_restorer, &act->sa_restorer); 887 #endif 888 #ifdef TARGET_ARCH_HAS_KA_RESTORER 889 k->ka_restorer = ka_restorer; 890 #endif 891 /* To be swapped in target_to_host_sigset. */ 892 k->sa_mask = act->sa_mask; 893 894 /* we update the host linux signal state */ 895 host_sig = target_to_host_signal(sig); 896 trace_signal_do_sigaction_host(host_sig, TARGET_NSIG); 897 if (host_sig > SIGRTMAX) { 898 /* we don't have enough host signals to map all target signals */ 899 qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n", 900 sig); 901 /* 902 * we don't return an error here because some programs try to 903 * register an handler for all possible rt signals even if they 904 * don't need it. 905 * An error here can abort them whereas there can be no problem 906 * to not have the signal available later. 907 * This is the case for golang, 908 * See https://github.com/golang/go/issues/33746 909 * So we silently ignore the error. 910 */ 911 return 0; 912 } 913 if (host_sig != SIGSEGV && host_sig != SIGBUS) { 914 sigfillset(&act1.sa_mask); 915 act1.sa_flags = SA_SIGINFO; 916 if (k->sa_flags & TARGET_SA_RESTART) 917 act1.sa_flags |= SA_RESTART; 918 /* NOTE: it is important to update the host kernel signal 919 ignore state to avoid getting unexpected interrupted 920 syscalls */ 921 if (k->_sa_handler == TARGET_SIG_IGN) { 922 act1.sa_sigaction = (void *)SIG_IGN; 923 } else if (k->_sa_handler == TARGET_SIG_DFL) { 924 if (fatal_signal (sig)) 925 act1.sa_sigaction = host_signal_handler; 926 else 927 act1.sa_sigaction = (void *)SIG_DFL; 928 } else { 929 act1.sa_sigaction = host_signal_handler; 930 } 931 ret = sigaction(host_sig, &act1, NULL); 932 } 933 } 934 return ret; 935 } 936 937 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 938 struct emulated_sigtable *k) 939 { 940 CPUState *cpu = env_cpu(cpu_env); 941 abi_ulong handler; 942 sigset_t set; 943 target_sigset_t target_old_set; 944 struct target_sigaction *sa; 945 TaskState *ts = cpu->opaque; 946 947 trace_user_handle_signal(cpu_env, sig); 948 /* dequeue signal */ 949 k->pending = 0; 950 951 sig = gdb_handlesig(cpu, sig); 952 if (!sig) { 953 sa = NULL; 954 handler = TARGET_SIG_IGN; 955 } else { 956 sa = &sigact_table[sig - 1]; 957 handler = sa->_sa_handler; 958 } 959 960 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) { 961 print_taken_signal(sig, &k->info); 962 } 963 964 if (handler == TARGET_SIG_DFL) { 965 /* default handler : ignore some signal. The other are job control or fatal */ 966 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { 967 kill(getpid(),SIGSTOP); 968 } else if (sig != TARGET_SIGCHLD && 969 sig != TARGET_SIGURG && 970 sig != TARGET_SIGWINCH && 971 sig != TARGET_SIGCONT) { 972 dump_core_and_abort(sig); 973 } 974 } else if (handler == TARGET_SIG_IGN) { 975 /* ignore sig */ 976 } else if (handler == TARGET_SIG_ERR) { 977 dump_core_and_abort(sig); 978 } else { 979 /* compute the blocked signals during the handler execution */ 980 sigset_t *blocked_set; 981 982 target_to_host_sigset(&set, &sa->sa_mask); 983 /* SA_NODEFER indicates that the current signal should not be 984 blocked during the handler */ 985 if (!(sa->sa_flags & TARGET_SA_NODEFER)) 986 sigaddset(&set, target_to_host_signal(sig)); 987 988 /* save the previous blocked signal state to restore it at the 989 end of the signal execution (see do_sigreturn) */ 990 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask); 991 992 /* block signals in the handler */ 993 blocked_set = ts->in_sigsuspend ? 994 &ts->sigsuspend_mask : &ts->signal_mask; 995 sigorset(&ts->signal_mask, blocked_set, &set); 996 ts->in_sigsuspend = 0; 997 998 /* if the CPU is in VM86 mode, we restore the 32 bit values */ 999 #if defined(TARGET_I386) && !defined(TARGET_X86_64) 1000 { 1001 CPUX86State *env = cpu_env; 1002 if (env->eflags & VM_MASK) 1003 save_v86_state(env); 1004 } 1005 #endif 1006 /* prepare the stack frame of the virtual CPU */ 1007 #if defined(TARGET_ARCH_HAS_SETUP_FRAME) 1008 if (sa->sa_flags & TARGET_SA_SIGINFO) { 1009 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 1010 } else { 1011 setup_frame(sig, sa, &target_old_set, cpu_env); 1012 } 1013 #else 1014 /* These targets do not have traditional signals. */ 1015 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); 1016 #endif 1017 if (sa->sa_flags & TARGET_SA_RESETHAND) { 1018 sa->_sa_handler = TARGET_SIG_DFL; 1019 } 1020 } 1021 } 1022 1023 void process_pending_signals(CPUArchState *cpu_env) 1024 { 1025 CPUState *cpu = env_cpu(cpu_env); 1026 int sig; 1027 TaskState *ts = cpu->opaque; 1028 sigset_t set; 1029 sigset_t *blocked_set; 1030 1031 while (qatomic_read(&ts->signal_pending)) { 1032 /* FIXME: This is not threadsafe. */ 1033 sigfillset(&set); 1034 sigprocmask(SIG_SETMASK, &set, 0); 1035 1036 restart_scan: 1037 sig = ts->sync_signal.pending; 1038 if (sig) { 1039 /* Synchronous signals are forced, 1040 * see force_sig_info() and callers in Linux 1041 * Note that not all of our queue_signal() calls in QEMU correspond 1042 * to force_sig_info() calls in Linux (some are send_sig_info()). 1043 * However it seems like a kernel bug to me to allow the process 1044 * to block a synchronous signal since it could then just end up 1045 * looping round and round indefinitely. 1046 */ 1047 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig]) 1048 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) { 1049 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]); 1050 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL; 1051 } 1052 1053 handle_pending_signal(cpu_env, sig, &ts->sync_signal); 1054 } 1055 1056 for (sig = 1; sig <= TARGET_NSIG; sig++) { 1057 blocked_set = ts->in_sigsuspend ? 1058 &ts->sigsuspend_mask : &ts->signal_mask; 1059 1060 if (ts->sigtab[sig - 1].pending && 1061 (!sigismember(blocked_set, 1062 target_to_host_signal_table[sig]))) { 1063 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]); 1064 /* Restart scan from the beginning, as handle_pending_signal 1065 * might have resulted in a new synchronous signal (eg SIGSEGV). 1066 */ 1067 goto restart_scan; 1068 } 1069 } 1070 1071 /* if no signal is pending, unblock signals and recheck (the act 1072 * of unblocking might cause us to take another host signal which 1073 * will set signal_pending again). 1074 */ 1075 qatomic_set(&ts->signal_pending, 0); 1076 ts->in_sigsuspend = 0; 1077 set = ts->signal_mask; 1078 sigdelset(&set, SIGSEGV); 1079 sigdelset(&set, SIGBUS); 1080 sigprocmask(SIG_SETMASK, &set, 0); 1081 } 1082 ts->in_sigsuspend = 0; 1083 } 1084