xref: /openbmc/qemu/linux-user/signal.c (revision 6193344f)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "exec/gdbstub.h"
22 
23 #include <sys/ucontext.h>
24 #include <sys/resource.h>
25 
26 #include "qemu.h"
27 #include "user-internals.h"
28 #include "strace.h"
29 #include "loader.h"
30 #include "trace.h"
31 #include "signal-common.h"
32 
33 static struct target_sigaction sigact_table[TARGET_NSIG];
34 
35 static void host_signal_handler(int host_signum, siginfo_t *info,
36                                 void *puc);
37 
38 
39 /*
40  * System includes define _NSIG as SIGRTMAX + 1,
41  * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
42  * and the first signal is SIGHUP defined as 1
43  * Signal number 0 is reserved for use as kill(pid, 0), to test whether
44  * a process exists without sending it a signal.
45  */
46 #ifdef __SIGRTMAX
47 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
48 #endif
49 static uint8_t host_to_target_signal_table[_NSIG] = {
50     [SIGHUP] = TARGET_SIGHUP,
51     [SIGINT] = TARGET_SIGINT,
52     [SIGQUIT] = TARGET_SIGQUIT,
53     [SIGILL] = TARGET_SIGILL,
54     [SIGTRAP] = TARGET_SIGTRAP,
55     [SIGABRT] = TARGET_SIGABRT,
56 /*    [SIGIOT] = TARGET_SIGIOT,*/
57     [SIGBUS] = TARGET_SIGBUS,
58     [SIGFPE] = TARGET_SIGFPE,
59     [SIGKILL] = TARGET_SIGKILL,
60     [SIGUSR1] = TARGET_SIGUSR1,
61     [SIGSEGV] = TARGET_SIGSEGV,
62     [SIGUSR2] = TARGET_SIGUSR2,
63     [SIGPIPE] = TARGET_SIGPIPE,
64     [SIGALRM] = TARGET_SIGALRM,
65     [SIGTERM] = TARGET_SIGTERM,
66 #ifdef SIGSTKFLT
67     [SIGSTKFLT] = TARGET_SIGSTKFLT,
68 #endif
69     [SIGCHLD] = TARGET_SIGCHLD,
70     [SIGCONT] = TARGET_SIGCONT,
71     [SIGSTOP] = TARGET_SIGSTOP,
72     [SIGTSTP] = TARGET_SIGTSTP,
73     [SIGTTIN] = TARGET_SIGTTIN,
74     [SIGTTOU] = TARGET_SIGTTOU,
75     [SIGURG] = TARGET_SIGURG,
76     [SIGXCPU] = TARGET_SIGXCPU,
77     [SIGXFSZ] = TARGET_SIGXFSZ,
78     [SIGVTALRM] = TARGET_SIGVTALRM,
79     [SIGPROF] = TARGET_SIGPROF,
80     [SIGWINCH] = TARGET_SIGWINCH,
81     [SIGIO] = TARGET_SIGIO,
82     [SIGPWR] = TARGET_SIGPWR,
83     [SIGSYS] = TARGET_SIGSYS,
84     /* next signals stay the same */
85 };
86 
87 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
88 
89 /* valid sig is between 1 and _NSIG - 1 */
90 int host_to_target_signal(int sig)
91 {
92     if (sig < 1 || sig >= _NSIG) {
93         return sig;
94     }
95     return host_to_target_signal_table[sig];
96 }
97 
98 /* valid sig is between 1 and TARGET_NSIG */
99 int target_to_host_signal(int sig)
100 {
101     if (sig < 1 || sig > TARGET_NSIG) {
102         return sig;
103     }
104     return target_to_host_signal_table[sig];
105 }
106 
107 static inline void target_sigaddset(target_sigset_t *set, int signum)
108 {
109     signum--;
110     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111     set->sig[signum / TARGET_NSIG_BPW] |= mask;
112 }
113 
114 static inline int target_sigismember(const target_sigset_t *set, int signum)
115 {
116     signum--;
117     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
118     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
119 }
120 
121 void host_to_target_sigset_internal(target_sigset_t *d,
122                                     const sigset_t *s)
123 {
124     int host_sig, target_sig;
125     target_sigemptyset(d);
126     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
127         target_sig = host_to_target_signal(host_sig);
128         if (target_sig < 1 || target_sig > TARGET_NSIG) {
129             continue;
130         }
131         if (sigismember(s, host_sig)) {
132             target_sigaddset(d, target_sig);
133         }
134     }
135 }
136 
137 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
138 {
139     target_sigset_t d1;
140     int i;
141 
142     host_to_target_sigset_internal(&d1, s);
143     for(i = 0;i < TARGET_NSIG_WORDS; i++)
144         d->sig[i] = tswapal(d1.sig[i]);
145 }
146 
147 void target_to_host_sigset_internal(sigset_t *d,
148                                     const target_sigset_t *s)
149 {
150     int host_sig, target_sig;
151     sigemptyset(d);
152     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
153         host_sig = target_to_host_signal(target_sig);
154         if (host_sig < 1 || host_sig >= _NSIG) {
155             continue;
156         }
157         if (target_sigismember(s, target_sig)) {
158             sigaddset(d, host_sig);
159         }
160     }
161 }
162 
163 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
164 {
165     target_sigset_t s1;
166     int i;
167 
168     for(i = 0;i < TARGET_NSIG_WORDS; i++)
169         s1.sig[i] = tswapal(s->sig[i]);
170     target_to_host_sigset_internal(d, &s1);
171 }
172 
173 void host_to_target_old_sigset(abi_ulong *old_sigset,
174                                const sigset_t *sigset)
175 {
176     target_sigset_t d;
177     host_to_target_sigset(&d, sigset);
178     *old_sigset = d.sig[0];
179 }
180 
181 void target_to_host_old_sigset(sigset_t *sigset,
182                                const abi_ulong *old_sigset)
183 {
184     target_sigset_t d;
185     int i;
186 
187     d.sig[0] = *old_sigset;
188     for(i = 1;i < TARGET_NSIG_WORDS; i++)
189         d.sig[i] = 0;
190     target_to_host_sigset(sigset, &d);
191 }
192 
193 int block_signals(void)
194 {
195     TaskState *ts = (TaskState *)thread_cpu->opaque;
196     sigset_t set;
197 
198     /* It's OK to block everything including SIGSEGV, because we won't
199      * run any further guest code before unblocking signals in
200      * process_pending_signals().
201      */
202     sigfillset(&set);
203     sigprocmask(SIG_SETMASK, &set, 0);
204 
205     return qatomic_xchg(&ts->signal_pending, 1);
206 }
207 
208 /* Wrapper for sigprocmask function
209  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
210  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
211  * a signal was already pending and the syscall must be restarted, or
212  * 0 on success.
213  * If set is NULL, this is guaranteed not to fail.
214  */
215 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
216 {
217     TaskState *ts = (TaskState *)thread_cpu->opaque;
218 
219     if (oldset) {
220         *oldset = ts->signal_mask;
221     }
222 
223     if (set) {
224         int i;
225 
226         if (block_signals()) {
227             return -TARGET_ERESTARTSYS;
228         }
229 
230         switch (how) {
231         case SIG_BLOCK:
232             sigorset(&ts->signal_mask, &ts->signal_mask, set);
233             break;
234         case SIG_UNBLOCK:
235             for (i = 1; i <= NSIG; ++i) {
236                 if (sigismember(set, i)) {
237                     sigdelset(&ts->signal_mask, i);
238                 }
239             }
240             break;
241         case SIG_SETMASK:
242             ts->signal_mask = *set;
243             break;
244         default:
245             g_assert_not_reached();
246         }
247 
248         /* Silently ignore attempts to change blocking status of KILL or STOP */
249         sigdelset(&ts->signal_mask, SIGKILL);
250         sigdelset(&ts->signal_mask, SIGSTOP);
251     }
252     return 0;
253 }
254 
255 #if !defined(TARGET_NIOS2)
256 /* Just set the guest's signal mask to the specified value; the
257  * caller is assumed to have called block_signals() already.
258  */
259 void set_sigmask(const sigset_t *set)
260 {
261     TaskState *ts = (TaskState *)thread_cpu->opaque;
262 
263     ts->signal_mask = *set;
264 }
265 #endif
266 
267 /* sigaltstack management */
268 
269 int on_sig_stack(unsigned long sp)
270 {
271     TaskState *ts = (TaskState *)thread_cpu->opaque;
272 
273     return (sp - ts->sigaltstack_used.ss_sp
274             < ts->sigaltstack_used.ss_size);
275 }
276 
277 int sas_ss_flags(unsigned long sp)
278 {
279     TaskState *ts = (TaskState *)thread_cpu->opaque;
280 
281     return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
282             : on_sig_stack(sp) ? SS_ONSTACK : 0);
283 }
284 
285 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
286 {
287     /*
288      * This is the X/Open sanctioned signal stack switching.
289      */
290     TaskState *ts = (TaskState *)thread_cpu->opaque;
291 
292     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
293         return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
294     }
295     return sp;
296 }
297 
298 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
299 {
300     TaskState *ts = (TaskState *)thread_cpu->opaque;
301 
302     __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
303     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
304     __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
305 }
306 
307 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
308 {
309     TaskState *ts = (TaskState *)thread_cpu->opaque;
310     size_t minstacksize = TARGET_MINSIGSTKSZ;
311     target_stack_t ss;
312 
313 #if defined(TARGET_PPC64)
314     /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
315     struct image_info *image = ts->info;
316     if (get_ppc64_abi(image) > 1) {
317         minstacksize = 4096;
318     }
319 #endif
320 
321     __get_user(ss.ss_sp, &uss->ss_sp);
322     __get_user(ss.ss_size, &uss->ss_size);
323     __get_user(ss.ss_flags, &uss->ss_flags);
324 
325     if (on_sig_stack(get_sp_from_cpustate(env))) {
326         return -TARGET_EPERM;
327     }
328 
329     switch (ss.ss_flags) {
330     default:
331         return -TARGET_EINVAL;
332 
333     case TARGET_SS_DISABLE:
334         ss.ss_size = 0;
335         ss.ss_sp = 0;
336         break;
337 
338     case TARGET_SS_ONSTACK:
339     case 0:
340         if (ss.ss_size < minstacksize) {
341             return -TARGET_ENOMEM;
342         }
343         break;
344     }
345 
346     ts->sigaltstack_used.ss_sp = ss.ss_sp;
347     ts->sigaltstack_used.ss_size = ss.ss_size;
348     return 0;
349 }
350 
351 /* siginfo conversion */
352 
353 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
354                                                  const siginfo_t *info)
355 {
356     int sig = host_to_target_signal(info->si_signo);
357     int si_code = info->si_code;
358     int si_type;
359     tinfo->si_signo = sig;
360     tinfo->si_errno = 0;
361     tinfo->si_code = info->si_code;
362 
363     /* This memset serves two purposes:
364      * (1) ensure we don't leak random junk to the guest later
365      * (2) placate false positives from gcc about fields
366      *     being used uninitialized if it chooses to inline both this
367      *     function and tswap_siginfo() into host_to_target_siginfo().
368      */
369     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
370 
371     /* This is awkward, because we have to use a combination of
372      * the si_code and si_signo to figure out which of the union's
373      * members are valid. (Within the host kernel it is always possible
374      * to tell, but the kernel carefully avoids giving userspace the
375      * high 16 bits of si_code, so we don't have the information to
376      * do this the easy way...) We therefore make our best guess,
377      * bearing in mind that a guest can spoof most of the si_codes
378      * via rt_sigqueueinfo() if it likes.
379      *
380      * Once we have made our guess, we record it in the top 16 bits of
381      * the si_code, so that tswap_siginfo() later can use it.
382      * tswap_siginfo() will strip these top bits out before writing
383      * si_code to the guest (sign-extending the lower bits).
384      */
385 
386     switch (si_code) {
387     case SI_USER:
388     case SI_TKILL:
389     case SI_KERNEL:
390         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
391          * These are the only unspoofable si_code values.
392          */
393         tinfo->_sifields._kill._pid = info->si_pid;
394         tinfo->_sifields._kill._uid = info->si_uid;
395         si_type = QEMU_SI_KILL;
396         break;
397     default:
398         /* Everything else is spoofable. Make best guess based on signal */
399         switch (sig) {
400         case TARGET_SIGCHLD:
401             tinfo->_sifields._sigchld._pid = info->si_pid;
402             tinfo->_sifields._sigchld._uid = info->si_uid;
403             tinfo->_sifields._sigchld._status = info->si_status;
404             tinfo->_sifields._sigchld._utime = info->si_utime;
405             tinfo->_sifields._sigchld._stime = info->si_stime;
406             si_type = QEMU_SI_CHLD;
407             break;
408         case TARGET_SIGIO:
409             tinfo->_sifields._sigpoll._band = info->si_band;
410             tinfo->_sifields._sigpoll._fd = info->si_fd;
411             si_type = QEMU_SI_POLL;
412             break;
413         default:
414             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
415             tinfo->_sifields._rt._pid = info->si_pid;
416             tinfo->_sifields._rt._uid = info->si_uid;
417             /* XXX: potential problem if 64 bit */
418             tinfo->_sifields._rt._sigval.sival_ptr
419                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
420             si_type = QEMU_SI_RT;
421             break;
422         }
423         break;
424     }
425 
426     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
427 }
428 
429 void tswap_siginfo(target_siginfo_t *tinfo,
430                    const target_siginfo_t *info)
431 {
432     int si_type = extract32(info->si_code, 16, 16);
433     int si_code = sextract32(info->si_code, 0, 16);
434 
435     __put_user(info->si_signo, &tinfo->si_signo);
436     __put_user(info->si_errno, &tinfo->si_errno);
437     __put_user(si_code, &tinfo->si_code);
438 
439     /* We can use our internal marker of which fields in the structure
440      * are valid, rather than duplicating the guesswork of
441      * host_to_target_siginfo_noswap() here.
442      */
443     switch (si_type) {
444     case QEMU_SI_KILL:
445         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
446         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
447         break;
448     case QEMU_SI_TIMER:
449         __put_user(info->_sifields._timer._timer1,
450                    &tinfo->_sifields._timer._timer1);
451         __put_user(info->_sifields._timer._timer2,
452                    &tinfo->_sifields._timer._timer2);
453         break;
454     case QEMU_SI_POLL:
455         __put_user(info->_sifields._sigpoll._band,
456                    &tinfo->_sifields._sigpoll._band);
457         __put_user(info->_sifields._sigpoll._fd,
458                    &tinfo->_sifields._sigpoll._fd);
459         break;
460     case QEMU_SI_FAULT:
461         __put_user(info->_sifields._sigfault._addr,
462                    &tinfo->_sifields._sigfault._addr);
463         break;
464     case QEMU_SI_CHLD:
465         __put_user(info->_sifields._sigchld._pid,
466                    &tinfo->_sifields._sigchld._pid);
467         __put_user(info->_sifields._sigchld._uid,
468                    &tinfo->_sifields._sigchld._uid);
469         __put_user(info->_sifields._sigchld._status,
470                    &tinfo->_sifields._sigchld._status);
471         __put_user(info->_sifields._sigchld._utime,
472                    &tinfo->_sifields._sigchld._utime);
473         __put_user(info->_sifields._sigchld._stime,
474                    &tinfo->_sifields._sigchld._stime);
475         break;
476     case QEMU_SI_RT:
477         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
478         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
479         __put_user(info->_sifields._rt._sigval.sival_ptr,
480                    &tinfo->_sifields._rt._sigval.sival_ptr);
481         break;
482     default:
483         g_assert_not_reached();
484     }
485 }
486 
487 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
488 {
489     target_siginfo_t tgt_tmp;
490     host_to_target_siginfo_noswap(&tgt_tmp, info);
491     tswap_siginfo(tinfo, &tgt_tmp);
492 }
493 
494 /* XXX: we support only POSIX RT signals are used. */
495 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
496 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
497 {
498     /* This conversion is used only for the rt_sigqueueinfo syscall,
499      * and so we know that the _rt fields are the valid ones.
500      */
501     abi_ulong sival_ptr;
502 
503     __get_user(info->si_signo, &tinfo->si_signo);
504     __get_user(info->si_errno, &tinfo->si_errno);
505     __get_user(info->si_code, &tinfo->si_code);
506     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
507     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
508     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
509     info->si_value.sival_ptr = (void *)(long)sival_ptr;
510 }
511 
512 static int fatal_signal (int sig)
513 {
514     switch (sig) {
515     case TARGET_SIGCHLD:
516     case TARGET_SIGURG:
517     case TARGET_SIGWINCH:
518         /* Ignored by default.  */
519         return 0;
520     case TARGET_SIGCONT:
521     case TARGET_SIGSTOP:
522     case TARGET_SIGTSTP:
523     case TARGET_SIGTTIN:
524     case TARGET_SIGTTOU:
525         /* Job control signals.  */
526         return 0;
527     default:
528         return 1;
529     }
530 }
531 
532 /* returns 1 if given signal should dump core if not handled */
533 static int core_dump_signal(int sig)
534 {
535     switch (sig) {
536     case TARGET_SIGABRT:
537     case TARGET_SIGFPE:
538     case TARGET_SIGILL:
539     case TARGET_SIGQUIT:
540     case TARGET_SIGSEGV:
541     case TARGET_SIGTRAP:
542     case TARGET_SIGBUS:
543         return (1);
544     default:
545         return (0);
546     }
547 }
548 
549 static void signal_table_init(void)
550 {
551     int host_sig, target_sig, count;
552 
553     /*
554      * Signals are supported starting from TARGET_SIGRTMIN and going up
555      * until we run out of host realtime signals.
556      * glibc at least uses only the lower 2 rt signals and probably
557      * nobody's using the upper ones.
558      * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
559      * To fix this properly we need to do manual signal delivery multiplexed
560      * over a single host signal.
561      * Attempts for configure "missing" signals via sigaction will be
562      * silently ignored.
563      */
564     for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
565         target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
566         if (target_sig <= TARGET_NSIG) {
567             host_to_target_signal_table[host_sig] = target_sig;
568         }
569     }
570 
571     /* generate signal conversion tables */
572     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
573         target_to_host_signal_table[target_sig] = _NSIG; /* poison */
574     }
575     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
576         if (host_to_target_signal_table[host_sig] == 0) {
577             host_to_target_signal_table[host_sig] = host_sig;
578         }
579         target_sig = host_to_target_signal_table[host_sig];
580         if (target_sig <= TARGET_NSIG) {
581             target_to_host_signal_table[target_sig] = host_sig;
582         }
583     }
584 
585     if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
586         for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
587             if (target_to_host_signal_table[target_sig] == _NSIG) {
588                 count++;
589             }
590         }
591         trace_signal_table_init(count);
592     }
593 }
594 
595 void signal_init(void)
596 {
597     TaskState *ts = (TaskState *)thread_cpu->opaque;
598     struct sigaction act;
599     struct sigaction oact;
600     int i;
601     int host_sig;
602 
603     /* initialize signal conversion tables */
604     signal_table_init();
605 
606     /* Set the signal mask from the host mask. */
607     sigprocmask(0, 0, &ts->signal_mask);
608 
609     sigfillset(&act.sa_mask);
610     act.sa_flags = SA_SIGINFO;
611     act.sa_sigaction = host_signal_handler;
612     for(i = 1; i <= TARGET_NSIG; i++) {
613 #ifdef CONFIG_GPROF
614         if (i == TARGET_SIGPROF) {
615             continue;
616         }
617 #endif
618         host_sig = target_to_host_signal(i);
619         sigaction(host_sig, NULL, &oact);
620         if (oact.sa_sigaction == (void *)SIG_IGN) {
621             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
622         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
623             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
624         }
625         /* If there's already a handler installed then something has
626            gone horribly wrong, so don't even try to handle that case.  */
627         /* Install some handlers for our own use.  We need at least
628            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
629            trap all signals because it affects syscall interrupt
630            behavior.  But do trap all default-fatal signals.  */
631         if (fatal_signal (i))
632             sigaction(host_sig, &act, NULL);
633     }
634 }
635 
636 /* Force a synchronously taken signal. The kernel force_sig() function
637  * also forces the signal to "not blocked, not ignored", but for QEMU
638  * that work is done in process_pending_signals().
639  */
640 void force_sig(int sig)
641 {
642     CPUState *cpu = thread_cpu;
643     CPUArchState *env = cpu->env_ptr;
644     target_siginfo_t info = {};
645 
646     info.si_signo = sig;
647     info.si_errno = 0;
648     info.si_code = TARGET_SI_KERNEL;
649     info._sifields._kill._pid = 0;
650     info._sifields._kill._uid = 0;
651     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
652 }
653 
654 /*
655  * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
656  * 'force' part is handled in process_pending_signals().
657  */
658 void force_sig_fault(int sig, int code, abi_ulong addr)
659 {
660     CPUState *cpu = thread_cpu;
661     CPUArchState *env = cpu->env_ptr;
662     target_siginfo_t info = {};
663 
664     info.si_signo = sig;
665     info.si_errno = 0;
666     info.si_code = code;
667     info._sifields._sigfault._addr = addr;
668     queue_signal(env, sig, QEMU_SI_FAULT, &info);
669 }
670 
671 /* Force a SIGSEGV if we couldn't write to memory trying to set
672  * up the signal frame. oldsig is the signal we were trying to handle
673  * at the point of failure.
674  */
675 #if !defined(TARGET_RISCV)
676 void force_sigsegv(int oldsig)
677 {
678     if (oldsig == SIGSEGV) {
679         /* Make sure we don't try to deliver the signal again; this will
680          * end up with handle_pending_signal() calling dump_core_and_abort().
681          */
682         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
683     }
684     force_sig(TARGET_SIGSEGV);
685 }
686 
687 #endif
688 
689 /* abort execution with signal */
690 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
691 {
692     CPUState *cpu = thread_cpu;
693     CPUArchState *env = cpu->env_ptr;
694     TaskState *ts = (TaskState *)cpu->opaque;
695     int host_sig, core_dumped = 0;
696     struct sigaction act;
697 
698     host_sig = target_to_host_signal(target_sig);
699     trace_user_force_sig(env, target_sig, host_sig);
700     gdb_signalled(env, target_sig);
701 
702     /* dump core if supported by target binary format */
703     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
704         stop_all_tasks();
705         core_dumped =
706             ((*ts->bprm->core_dump)(target_sig, env) == 0);
707     }
708     if (core_dumped) {
709         /* we already dumped the core of target process, we don't want
710          * a coredump of qemu itself */
711         struct rlimit nodump;
712         getrlimit(RLIMIT_CORE, &nodump);
713         nodump.rlim_cur=0;
714         setrlimit(RLIMIT_CORE, &nodump);
715         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
716             target_sig, strsignal(host_sig), "core dumped" );
717     }
718 
719     /* The proper exit code for dying from an uncaught signal is
720      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
721      * a negative value.  To get the proper exit code we need to
722      * actually die from an uncaught signal.  Here the default signal
723      * handler is installed, we send ourself a signal and we wait for
724      * it to arrive. */
725     sigfillset(&act.sa_mask);
726     act.sa_handler = SIG_DFL;
727     act.sa_flags = 0;
728     sigaction(host_sig, &act, NULL);
729 
730     /* For some reason raise(host_sig) doesn't send the signal when
731      * statically linked on x86-64. */
732     kill(getpid(), host_sig);
733 
734     /* Make sure the signal isn't masked (just reuse the mask inside
735     of act) */
736     sigdelset(&act.sa_mask, host_sig);
737     sigsuspend(&act.sa_mask);
738 
739     /* unreachable */
740     abort();
741 }
742 
743 /* queue a signal so that it will be send to the virtual CPU as soon
744    as possible */
745 int queue_signal(CPUArchState *env, int sig, int si_type,
746                  target_siginfo_t *info)
747 {
748     CPUState *cpu = env_cpu(env);
749     TaskState *ts = cpu->opaque;
750 
751     trace_user_queue_signal(env, sig);
752 
753     info->si_code = deposit32(info->si_code, 16, 16, si_type);
754 
755     ts->sync_signal.info = *info;
756     ts->sync_signal.pending = sig;
757     /* signal that a new signal is pending */
758     qatomic_set(&ts->signal_pending, 1);
759     return 1; /* indicates that the signal was queued */
760 }
761 
762 #ifndef HAVE_SAFE_SYSCALL
763 static inline void rewind_if_in_safe_syscall(void *puc)
764 {
765     /* Default version: never rewind */
766 }
767 #endif
768 
769 static void host_signal_handler(int host_signum, siginfo_t *info,
770                                 void *puc)
771 {
772     CPUArchState *env = thread_cpu->env_ptr;
773     CPUState *cpu = env_cpu(env);
774     TaskState *ts = cpu->opaque;
775 
776     int sig;
777     target_siginfo_t tinfo;
778     ucontext_t *uc = puc;
779     struct emulated_sigtable *k;
780 
781     /* the CPU emulator uses some host signals to detect exceptions,
782        we forward to it some signals */
783     if ((host_signum == SIGSEGV || host_signum == SIGBUS)
784         && info->si_code > 0) {
785         if (cpu_signal_handler(host_signum, info, puc))
786             return;
787     }
788 
789     /* get target signal number */
790     sig = host_to_target_signal(host_signum);
791     if (sig < 1 || sig > TARGET_NSIG)
792         return;
793     trace_user_host_signal(env, host_signum, sig);
794 
795     rewind_if_in_safe_syscall(puc);
796 
797     host_to_target_siginfo_noswap(&tinfo, info);
798     k = &ts->sigtab[sig - 1];
799     k->info = tinfo;
800     k->pending = sig;
801     ts->signal_pending = 1;
802 
803     /* Block host signals until target signal handler entered. We
804      * can't block SIGSEGV or SIGBUS while we're executing guest
805      * code in case the guest code provokes one in the window between
806      * now and it getting out to the main loop. Signals will be
807      * unblocked again in process_pending_signals().
808      *
809      * WARNING: we cannot use sigfillset() here because the uc_sigmask
810      * field is a kernel sigset_t, which is much smaller than the
811      * libc sigset_t which sigfillset() operates on. Using sigfillset()
812      * would write 0xff bytes off the end of the structure and trash
813      * data on the struct.
814      * We can't use sizeof(uc->uc_sigmask) either, because the libc
815      * headers define the struct field with the wrong (too large) type.
816      */
817     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
818     sigdelset(&uc->uc_sigmask, SIGSEGV);
819     sigdelset(&uc->uc_sigmask, SIGBUS);
820 
821     /* interrupt the virtual CPU as soon as possible */
822     cpu_exit(thread_cpu);
823 }
824 
825 /* do_sigaltstack() returns target values and errnos. */
826 /* compare linux/kernel/signal.c:do_sigaltstack() */
827 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
828                         CPUArchState *env)
829 {
830     target_stack_t oss, *uoss = NULL;
831     abi_long ret = -TARGET_EFAULT;
832 
833     if (uoss_addr) {
834         /* Verify writability now, but do not alter user memory yet. */
835         if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
836             goto out;
837         }
838         target_save_altstack(&oss, env);
839     }
840 
841     if (uss_addr) {
842         target_stack_t *uss;
843 
844         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
845             goto out;
846         }
847         ret = target_restore_altstack(uss, env);
848         if (ret) {
849             goto out;
850         }
851     }
852 
853     if (uoss_addr) {
854         memcpy(uoss, &oss, sizeof(oss));
855         unlock_user_struct(uoss, uoss_addr, 1);
856         uoss = NULL;
857     }
858     ret = 0;
859 
860  out:
861     if (uoss) {
862         unlock_user_struct(uoss, uoss_addr, 0);
863     }
864     return ret;
865 }
866 
867 /* do_sigaction() return target values and host errnos */
868 int do_sigaction(int sig, const struct target_sigaction *act,
869                  struct target_sigaction *oact, abi_ulong ka_restorer)
870 {
871     struct target_sigaction *k;
872     struct sigaction act1;
873     int host_sig;
874     int ret = 0;
875 
876     trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
877 
878     if (sig < 1 || sig > TARGET_NSIG) {
879         return -TARGET_EINVAL;
880     }
881 
882     if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
883         return -TARGET_EINVAL;
884     }
885 
886     if (block_signals()) {
887         return -TARGET_ERESTARTSYS;
888     }
889 
890     k = &sigact_table[sig - 1];
891     if (oact) {
892         __put_user(k->_sa_handler, &oact->_sa_handler);
893         __put_user(k->sa_flags, &oact->sa_flags);
894 #ifdef TARGET_ARCH_HAS_SA_RESTORER
895         __put_user(k->sa_restorer, &oact->sa_restorer);
896 #endif
897         /* Not swapped.  */
898         oact->sa_mask = k->sa_mask;
899     }
900     if (act) {
901         /* FIXME: This is not threadsafe.  */
902         __get_user(k->_sa_handler, &act->_sa_handler);
903         __get_user(k->sa_flags, &act->sa_flags);
904 #ifdef TARGET_ARCH_HAS_SA_RESTORER
905         __get_user(k->sa_restorer, &act->sa_restorer);
906 #endif
907 #ifdef TARGET_ARCH_HAS_KA_RESTORER
908         k->ka_restorer = ka_restorer;
909 #endif
910         /* To be swapped in target_to_host_sigset.  */
911         k->sa_mask = act->sa_mask;
912 
913         /* we update the host linux signal state */
914         host_sig = target_to_host_signal(sig);
915         trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
916         if (host_sig > SIGRTMAX) {
917             /* we don't have enough host signals to map all target signals */
918             qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
919                           sig);
920             /*
921              * we don't return an error here because some programs try to
922              * register an handler for all possible rt signals even if they
923              * don't need it.
924              * An error here can abort them whereas there can be no problem
925              * to not have the signal available later.
926              * This is the case for golang,
927              *   See https://github.com/golang/go/issues/33746
928              * So we silently ignore the error.
929              */
930             return 0;
931         }
932         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
933             sigfillset(&act1.sa_mask);
934             act1.sa_flags = SA_SIGINFO;
935             if (k->sa_flags & TARGET_SA_RESTART)
936                 act1.sa_flags |= SA_RESTART;
937             /* NOTE: it is important to update the host kernel signal
938                ignore state to avoid getting unexpected interrupted
939                syscalls */
940             if (k->_sa_handler == TARGET_SIG_IGN) {
941                 act1.sa_sigaction = (void *)SIG_IGN;
942             } else if (k->_sa_handler == TARGET_SIG_DFL) {
943                 if (fatal_signal (sig))
944                     act1.sa_sigaction = host_signal_handler;
945                 else
946                     act1.sa_sigaction = (void *)SIG_DFL;
947             } else {
948                 act1.sa_sigaction = host_signal_handler;
949             }
950             ret = sigaction(host_sig, &act1, NULL);
951         }
952     }
953     return ret;
954 }
955 
956 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
957                                   struct emulated_sigtable *k)
958 {
959     CPUState *cpu = env_cpu(cpu_env);
960     abi_ulong handler;
961     sigset_t set;
962     target_sigset_t target_old_set;
963     struct target_sigaction *sa;
964     TaskState *ts = cpu->opaque;
965 
966     trace_user_handle_signal(cpu_env, sig);
967     /* dequeue signal */
968     k->pending = 0;
969 
970     sig = gdb_handlesig(cpu, sig);
971     if (!sig) {
972         sa = NULL;
973         handler = TARGET_SIG_IGN;
974     } else {
975         sa = &sigact_table[sig - 1];
976         handler = sa->_sa_handler;
977     }
978 
979     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
980         print_taken_signal(sig, &k->info);
981     }
982 
983     if (handler == TARGET_SIG_DFL) {
984         /* default handler : ignore some signal. The other are job control or fatal */
985         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
986             kill(getpid(),SIGSTOP);
987         } else if (sig != TARGET_SIGCHLD &&
988                    sig != TARGET_SIGURG &&
989                    sig != TARGET_SIGWINCH &&
990                    sig != TARGET_SIGCONT) {
991             dump_core_and_abort(sig);
992         }
993     } else if (handler == TARGET_SIG_IGN) {
994         /* ignore sig */
995     } else if (handler == TARGET_SIG_ERR) {
996         dump_core_and_abort(sig);
997     } else {
998         /* compute the blocked signals during the handler execution */
999         sigset_t *blocked_set;
1000 
1001         target_to_host_sigset(&set, &sa->sa_mask);
1002         /* SA_NODEFER indicates that the current signal should not be
1003            blocked during the handler */
1004         if (!(sa->sa_flags & TARGET_SA_NODEFER))
1005             sigaddset(&set, target_to_host_signal(sig));
1006 
1007         /* save the previous blocked signal state to restore it at the
1008            end of the signal execution (see do_sigreturn) */
1009         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1010 
1011         /* block signals in the handler */
1012         blocked_set = ts->in_sigsuspend ?
1013             &ts->sigsuspend_mask : &ts->signal_mask;
1014         sigorset(&ts->signal_mask, blocked_set, &set);
1015         ts->in_sigsuspend = 0;
1016 
1017         /* if the CPU is in VM86 mode, we restore the 32 bit values */
1018 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1019         {
1020             CPUX86State *env = cpu_env;
1021             if (env->eflags & VM_MASK)
1022                 save_v86_state(env);
1023         }
1024 #endif
1025         /* prepare the stack frame of the virtual CPU */
1026 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1027         if (sa->sa_flags & TARGET_SA_SIGINFO) {
1028             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1029         } else {
1030             setup_frame(sig, sa, &target_old_set, cpu_env);
1031         }
1032 #else
1033         /* These targets do not have traditional signals.  */
1034         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1035 #endif
1036         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1037             sa->_sa_handler = TARGET_SIG_DFL;
1038         }
1039     }
1040 }
1041 
1042 void process_pending_signals(CPUArchState *cpu_env)
1043 {
1044     CPUState *cpu = env_cpu(cpu_env);
1045     int sig;
1046     TaskState *ts = cpu->opaque;
1047     sigset_t set;
1048     sigset_t *blocked_set;
1049 
1050     while (qatomic_read(&ts->signal_pending)) {
1051         /* FIXME: This is not threadsafe.  */
1052         sigfillset(&set);
1053         sigprocmask(SIG_SETMASK, &set, 0);
1054 
1055     restart_scan:
1056         sig = ts->sync_signal.pending;
1057         if (sig) {
1058             /* Synchronous signals are forced,
1059              * see force_sig_info() and callers in Linux
1060              * Note that not all of our queue_signal() calls in QEMU correspond
1061              * to force_sig_info() calls in Linux (some are send_sig_info()).
1062              * However it seems like a kernel bug to me to allow the process
1063              * to block a synchronous signal since it could then just end up
1064              * looping round and round indefinitely.
1065              */
1066             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1067                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1068                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1069                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1070             }
1071 
1072             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1073         }
1074 
1075         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1076             blocked_set = ts->in_sigsuspend ?
1077                 &ts->sigsuspend_mask : &ts->signal_mask;
1078 
1079             if (ts->sigtab[sig - 1].pending &&
1080                 (!sigismember(blocked_set,
1081                               target_to_host_signal_table[sig]))) {
1082                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1083                 /* Restart scan from the beginning, as handle_pending_signal
1084                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1085                  */
1086                 goto restart_scan;
1087             }
1088         }
1089 
1090         /* if no signal is pending, unblock signals and recheck (the act
1091          * of unblocking might cause us to take another host signal which
1092          * will set signal_pending again).
1093          */
1094         qatomic_set(&ts->signal_pending, 0);
1095         ts->in_sigsuspend = 0;
1096         set = ts->signal_mask;
1097         sigdelset(&set, SIGSEGV);
1098         sigdelset(&set, SIGBUS);
1099         sigprocmask(SIG_SETMASK, &set, 0);
1100     }
1101     ts->in_sigsuspend = 0;
1102 }
1103