xref: /openbmc/qemu/linux-user/signal.c (revision 2113aed687cb0b84ad512c440c1edf6eea8fcde2)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23 
24 #include "qemu.h"
25 #include "strace.h"
26 #include "trace.h"
27 #include "signal-common.h"
28 
29 static struct target_sigaction sigact_table[TARGET_NSIG];
30 
31 static void host_signal_handler(int host_signum, siginfo_t *info,
32                                 void *puc);
33 
34 
35 /*
36  * System includes define _NSIG as SIGRTMAX + 1,
37  * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
38  * and the first signal is SIGHUP defined as 1
39  * Signal number 0 is reserved for use as kill(pid, 0), to test whether
40  * a process exists without sending it a signal.
41  */
42 #ifdef __SIGRTMAX
43 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
44 #endif
45 static uint8_t host_to_target_signal_table[_NSIG] = {
46     [SIGHUP] = TARGET_SIGHUP,
47     [SIGINT] = TARGET_SIGINT,
48     [SIGQUIT] = TARGET_SIGQUIT,
49     [SIGILL] = TARGET_SIGILL,
50     [SIGTRAP] = TARGET_SIGTRAP,
51     [SIGABRT] = TARGET_SIGABRT,
52 /*    [SIGIOT] = TARGET_SIGIOT,*/
53     [SIGBUS] = TARGET_SIGBUS,
54     [SIGFPE] = TARGET_SIGFPE,
55     [SIGKILL] = TARGET_SIGKILL,
56     [SIGUSR1] = TARGET_SIGUSR1,
57     [SIGSEGV] = TARGET_SIGSEGV,
58     [SIGUSR2] = TARGET_SIGUSR2,
59     [SIGPIPE] = TARGET_SIGPIPE,
60     [SIGALRM] = TARGET_SIGALRM,
61     [SIGTERM] = TARGET_SIGTERM,
62 #ifdef SIGSTKFLT
63     [SIGSTKFLT] = TARGET_SIGSTKFLT,
64 #endif
65     [SIGCHLD] = TARGET_SIGCHLD,
66     [SIGCONT] = TARGET_SIGCONT,
67     [SIGSTOP] = TARGET_SIGSTOP,
68     [SIGTSTP] = TARGET_SIGTSTP,
69     [SIGTTIN] = TARGET_SIGTTIN,
70     [SIGTTOU] = TARGET_SIGTTOU,
71     [SIGURG] = TARGET_SIGURG,
72     [SIGXCPU] = TARGET_SIGXCPU,
73     [SIGXFSZ] = TARGET_SIGXFSZ,
74     [SIGVTALRM] = TARGET_SIGVTALRM,
75     [SIGPROF] = TARGET_SIGPROF,
76     [SIGWINCH] = TARGET_SIGWINCH,
77     [SIGIO] = TARGET_SIGIO,
78     [SIGPWR] = TARGET_SIGPWR,
79     [SIGSYS] = TARGET_SIGSYS,
80     /* next signals stay the same */
81 };
82 
83 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
84 
85 /* valid sig is between 1 and _NSIG - 1 */
86 int host_to_target_signal(int sig)
87 {
88     if (sig < 1 || sig >= _NSIG) {
89         return sig;
90     }
91     return host_to_target_signal_table[sig];
92 }
93 
94 /* valid sig is between 1 and TARGET_NSIG */
95 int target_to_host_signal(int sig)
96 {
97     if (sig < 1 || sig > TARGET_NSIG) {
98         return sig;
99     }
100     return target_to_host_signal_table[sig];
101 }
102 
103 static inline void target_sigaddset(target_sigset_t *set, int signum)
104 {
105     signum--;
106     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
107     set->sig[signum / TARGET_NSIG_BPW] |= mask;
108 }
109 
110 static inline int target_sigismember(const target_sigset_t *set, int signum)
111 {
112     signum--;
113     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
114     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
115 }
116 
117 void host_to_target_sigset_internal(target_sigset_t *d,
118                                     const sigset_t *s)
119 {
120     int host_sig, target_sig;
121     target_sigemptyset(d);
122     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
123         target_sig = host_to_target_signal(host_sig);
124         if (target_sig < 1 || target_sig > TARGET_NSIG) {
125             continue;
126         }
127         if (sigismember(s, host_sig)) {
128             target_sigaddset(d, target_sig);
129         }
130     }
131 }
132 
133 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
134 {
135     target_sigset_t d1;
136     int i;
137 
138     host_to_target_sigset_internal(&d1, s);
139     for(i = 0;i < TARGET_NSIG_WORDS; i++)
140         d->sig[i] = tswapal(d1.sig[i]);
141 }
142 
143 void target_to_host_sigset_internal(sigset_t *d,
144                                     const target_sigset_t *s)
145 {
146     int host_sig, target_sig;
147     sigemptyset(d);
148     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
149         host_sig = target_to_host_signal(target_sig);
150         if (host_sig < 1 || host_sig >= _NSIG) {
151             continue;
152         }
153         if (target_sigismember(s, target_sig)) {
154             sigaddset(d, host_sig);
155         }
156     }
157 }
158 
159 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
160 {
161     target_sigset_t s1;
162     int i;
163 
164     for(i = 0;i < TARGET_NSIG_WORDS; i++)
165         s1.sig[i] = tswapal(s->sig[i]);
166     target_to_host_sigset_internal(d, &s1);
167 }
168 
169 void host_to_target_old_sigset(abi_ulong *old_sigset,
170                                const sigset_t *sigset)
171 {
172     target_sigset_t d;
173     host_to_target_sigset(&d, sigset);
174     *old_sigset = d.sig[0];
175 }
176 
177 void target_to_host_old_sigset(sigset_t *sigset,
178                                const abi_ulong *old_sigset)
179 {
180     target_sigset_t d;
181     int i;
182 
183     d.sig[0] = *old_sigset;
184     for(i = 1;i < TARGET_NSIG_WORDS; i++)
185         d.sig[i] = 0;
186     target_to_host_sigset(sigset, &d);
187 }
188 
189 int block_signals(void)
190 {
191     TaskState *ts = (TaskState *)thread_cpu->opaque;
192     sigset_t set;
193 
194     /* It's OK to block everything including SIGSEGV, because we won't
195      * run any further guest code before unblocking signals in
196      * process_pending_signals().
197      */
198     sigfillset(&set);
199     sigprocmask(SIG_SETMASK, &set, 0);
200 
201     return qatomic_xchg(&ts->signal_pending, 1);
202 }
203 
204 /* Wrapper for sigprocmask function
205  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
206  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
207  * a signal was already pending and the syscall must be restarted, or
208  * 0 on success.
209  * If set is NULL, this is guaranteed not to fail.
210  */
211 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
212 {
213     TaskState *ts = (TaskState *)thread_cpu->opaque;
214 
215     if (oldset) {
216         *oldset = ts->signal_mask;
217     }
218 
219     if (set) {
220         int i;
221 
222         if (block_signals()) {
223             return -TARGET_ERESTARTSYS;
224         }
225 
226         switch (how) {
227         case SIG_BLOCK:
228             sigorset(&ts->signal_mask, &ts->signal_mask, set);
229             break;
230         case SIG_UNBLOCK:
231             for (i = 1; i <= NSIG; ++i) {
232                 if (sigismember(set, i)) {
233                     sigdelset(&ts->signal_mask, i);
234                 }
235             }
236             break;
237         case SIG_SETMASK:
238             ts->signal_mask = *set;
239             break;
240         default:
241             g_assert_not_reached();
242         }
243 
244         /* Silently ignore attempts to change blocking status of KILL or STOP */
245         sigdelset(&ts->signal_mask, SIGKILL);
246         sigdelset(&ts->signal_mask, SIGSTOP);
247     }
248     return 0;
249 }
250 
251 #if !defined(TARGET_NIOS2)
252 /* Just set the guest's signal mask to the specified value; the
253  * caller is assumed to have called block_signals() already.
254  */
255 void set_sigmask(const sigset_t *set)
256 {
257     TaskState *ts = (TaskState *)thread_cpu->opaque;
258 
259     ts->signal_mask = *set;
260 }
261 #endif
262 
263 /* sigaltstack management */
264 
265 int on_sig_stack(unsigned long sp)
266 {
267     TaskState *ts = (TaskState *)thread_cpu->opaque;
268 
269     return (sp - ts->sigaltstack_used.ss_sp
270             < ts->sigaltstack_used.ss_size);
271 }
272 
273 int sas_ss_flags(unsigned long sp)
274 {
275     TaskState *ts = (TaskState *)thread_cpu->opaque;
276 
277     return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
278             : on_sig_stack(sp) ? SS_ONSTACK : 0);
279 }
280 
281 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
282 {
283     /*
284      * This is the X/Open sanctioned signal stack switching.
285      */
286     TaskState *ts = (TaskState *)thread_cpu->opaque;
287 
288     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
289         return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
290     }
291     return sp;
292 }
293 
294 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
295 {
296     TaskState *ts = (TaskState *)thread_cpu->opaque;
297 
298     __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
299     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
300     __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
301 }
302 
303 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
304 {
305     TaskState *ts = (TaskState *)thread_cpu->opaque;
306     size_t minstacksize = TARGET_MINSIGSTKSZ;
307     target_stack_t ss;
308 
309 #if defined(TARGET_PPC64)
310     /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
311     struct image_info *image = ts->info;
312     if (get_ppc64_abi(image) > 1) {
313         minstacksize = 4096;
314     }
315 #endif
316 
317     __get_user(ss.ss_sp, &uss->ss_sp);
318     __get_user(ss.ss_size, &uss->ss_size);
319     __get_user(ss.ss_flags, &uss->ss_flags);
320 
321     if (on_sig_stack(get_sp_from_cpustate(env))) {
322         return -TARGET_EPERM;
323     }
324 
325     switch (ss.ss_flags) {
326     default:
327         return -TARGET_EINVAL;
328 
329     case TARGET_SS_DISABLE:
330         ss.ss_size = 0;
331         ss.ss_sp = 0;
332         break;
333 
334     case TARGET_SS_ONSTACK:
335     case 0:
336         if (ss.ss_size < minstacksize) {
337             return -TARGET_ENOMEM;
338         }
339         break;
340     }
341 
342     ts->sigaltstack_used.ss_sp = ss.ss_sp;
343     ts->sigaltstack_used.ss_size = ss.ss_size;
344     return 0;
345 }
346 
347 /* siginfo conversion */
348 
349 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
350                                                  const siginfo_t *info)
351 {
352     int sig = host_to_target_signal(info->si_signo);
353     int si_code = info->si_code;
354     int si_type;
355     tinfo->si_signo = sig;
356     tinfo->si_errno = 0;
357     tinfo->si_code = info->si_code;
358 
359     /* This memset serves two purposes:
360      * (1) ensure we don't leak random junk to the guest later
361      * (2) placate false positives from gcc about fields
362      *     being used uninitialized if it chooses to inline both this
363      *     function and tswap_siginfo() into host_to_target_siginfo().
364      */
365     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
366 
367     /* This is awkward, because we have to use a combination of
368      * the si_code and si_signo to figure out which of the union's
369      * members are valid. (Within the host kernel it is always possible
370      * to tell, but the kernel carefully avoids giving userspace the
371      * high 16 bits of si_code, so we don't have the information to
372      * do this the easy way...) We therefore make our best guess,
373      * bearing in mind that a guest can spoof most of the si_codes
374      * via rt_sigqueueinfo() if it likes.
375      *
376      * Once we have made our guess, we record it in the top 16 bits of
377      * the si_code, so that tswap_siginfo() later can use it.
378      * tswap_siginfo() will strip these top bits out before writing
379      * si_code to the guest (sign-extending the lower bits).
380      */
381 
382     switch (si_code) {
383     case SI_USER:
384     case SI_TKILL:
385     case SI_KERNEL:
386         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
387          * These are the only unspoofable si_code values.
388          */
389         tinfo->_sifields._kill._pid = info->si_pid;
390         tinfo->_sifields._kill._uid = info->si_uid;
391         si_type = QEMU_SI_KILL;
392         break;
393     default:
394         /* Everything else is spoofable. Make best guess based on signal */
395         switch (sig) {
396         case TARGET_SIGCHLD:
397             tinfo->_sifields._sigchld._pid = info->si_pid;
398             tinfo->_sifields._sigchld._uid = info->si_uid;
399             tinfo->_sifields._sigchld._status = info->si_status;
400             tinfo->_sifields._sigchld._utime = info->si_utime;
401             tinfo->_sifields._sigchld._stime = info->si_stime;
402             si_type = QEMU_SI_CHLD;
403             break;
404         case TARGET_SIGIO:
405             tinfo->_sifields._sigpoll._band = info->si_band;
406             tinfo->_sifields._sigpoll._fd = info->si_fd;
407             si_type = QEMU_SI_POLL;
408             break;
409         default:
410             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
411             tinfo->_sifields._rt._pid = info->si_pid;
412             tinfo->_sifields._rt._uid = info->si_uid;
413             /* XXX: potential problem if 64 bit */
414             tinfo->_sifields._rt._sigval.sival_ptr
415                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
416             si_type = QEMU_SI_RT;
417             break;
418         }
419         break;
420     }
421 
422     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
423 }
424 
425 void tswap_siginfo(target_siginfo_t *tinfo,
426                    const target_siginfo_t *info)
427 {
428     int si_type = extract32(info->si_code, 16, 16);
429     int si_code = sextract32(info->si_code, 0, 16);
430 
431     __put_user(info->si_signo, &tinfo->si_signo);
432     __put_user(info->si_errno, &tinfo->si_errno);
433     __put_user(si_code, &tinfo->si_code);
434 
435     /* We can use our internal marker of which fields in the structure
436      * are valid, rather than duplicating the guesswork of
437      * host_to_target_siginfo_noswap() here.
438      */
439     switch (si_type) {
440     case QEMU_SI_KILL:
441         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
442         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
443         break;
444     case QEMU_SI_TIMER:
445         __put_user(info->_sifields._timer._timer1,
446                    &tinfo->_sifields._timer._timer1);
447         __put_user(info->_sifields._timer._timer2,
448                    &tinfo->_sifields._timer._timer2);
449         break;
450     case QEMU_SI_POLL:
451         __put_user(info->_sifields._sigpoll._band,
452                    &tinfo->_sifields._sigpoll._band);
453         __put_user(info->_sifields._sigpoll._fd,
454                    &tinfo->_sifields._sigpoll._fd);
455         break;
456     case QEMU_SI_FAULT:
457         __put_user(info->_sifields._sigfault._addr,
458                    &tinfo->_sifields._sigfault._addr);
459         break;
460     case QEMU_SI_CHLD:
461         __put_user(info->_sifields._sigchld._pid,
462                    &tinfo->_sifields._sigchld._pid);
463         __put_user(info->_sifields._sigchld._uid,
464                    &tinfo->_sifields._sigchld._uid);
465         __put_user(info->_sifields._sigchld._status,
466                    &tinfo->_sifields._sigchld._status);
467         __put_user(info->_sifields._sigchld._utime,
468                    &tinfo->_sifields._sigchld._utime);
469         __put_user(info->_sifields._sigchld._stime,
470                    &tinfo->_sifields._sigchld._stime);
471         break;
472     case QEMU_SI_RT:
473         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
474         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
475         __put_user(info->_sifields._rt._sigval.sival_ptr,
476                    &tinfo->_sifields._rt._sigval.sival_ptr);
477         break;
478     default:
479         g_assert_not_reached();
480     }
481 }
482 
483 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
484 {
485     target_siginfo_t tgt_tmp;
486     host_to_target_siginfo_noswap(&tgt_tmp, info);
487     tswap_siginfo(tinfo, &tgt_tmp);
488 }
489 
490 /* XXX: we support only POSIX RT signals are used. */
491 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
492 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
493 {
494     /* This conversion is used only for the rt_sigqueueinfo syscall,
495      * and so we know that the _rt fields are the valid ones.
496      */
497     abi_ulong sival_ptr;
498 
499     __get_user(info->si_signo, &tinfo->si_signo);
500     __get_user(info->si_errno, &tinfo->si_errno);
501     __get_user(info->si_code, &tinfo->si_code);
502     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
503     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
504     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
505     info->si_value.sival_ptr = (void *)(long)sival_ptr;
506 }
507 
508 static int fatal_signal (int sig)
509 {
510     switch (sig) {
511     case TARGET_SIGCHLD:
512     case TARGET_SIGURG:
513     case TARGET_SIGWINCH:
514         /* Ignored by default.  */
515         return 0;
516     case TARGET_SIGCONT:
517     case TARGET_SIGSTOP:
518     case TARGET_SIGTSTP:
519     case TARGET_SIGTTIN:
520     case TARGET_SIGTTOU:
521         /* Job control signals.  */
522         return 0;
523     default:
524         return 1;
525     }
526 }
527 
528 /* returns 1 if given signal should dump core if not handled */
529 static int core_dump_signal(int sig)
530 {
531     switch (sig) {
532     case TARGET_SIGABRT:
533     case TARGET_SIGFPE:
534     case TARGET_SIGILL:
535     case TARGET_SIGQUIT:
536     case TARGET_SIGSEGV:
537     case TARGET_SIGTRAP:
538     case TARGET_SIGBUS:
539         return (1);
540     default:
541         return (0);
542     }
543 }
544 
545 static void signal_table_init(void)
546 {
547     int host_sig, target_sig, count;
548 
549     /*
550      * Signals are supported starting from TARGET_SIGRTMIN and going up
551      * until we run out of host realtime signals.
552      * glibc at least uses only the lower 2 rt signals and probably
553      * nobody's using the upper ones.
554      * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
555      * To fix this properly we need to do manual signal delivery multiplexed
556      * over a single host signal.
557      * Attempts for configure "missing" signals via sigaction will be
558      * silently ignored.
559      */
560     for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
561         target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
562         if (target_sig <= TARGET_NSIG) {
563             host_to_target_signal_table[host_sig] = target_sig;
564         }
565     }
566 
567     /* generate signal conversion tables */
568     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
569         target_to_host_signal_table[target_sig] = _NSIG; /* poison */
570     }
571     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
572         if (host_to_target_signal_table[host_sig] == 0) {
573             host_to_target_signal_table[host_sig] = host_sig;
574         }
575         target_sig = host_to_target_signal_table[host_sig];
576         if (target_sig <= TARGET_NSIG) {
577             target_to_host_signal_table[target_sig] = host_sig;
578         }
579     }
580 
581     if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
582         for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
583             if (target_to_host_signal_table[target_sig] == _NSIG) {
584                 count++;
585             }
586         }
587         trace_signal_table_init(count);
588     }
589 }
590 
591 void signal_init(void)
592 {
593     TaskState *ts = (TaskState *)thread_cpu->opaque;
594     struct sigaction act;
595     struct sigaction oact;
596     int i;
597     int host_sig;
598 
599     /* initialize signal conversion tables */
600     signal_table_init();
601 
602     /* Set the signal mask from the host mask. */
603     sigprocmask(0, 0, &ts->signal_mask);
604 
605     sigfillset(&act.sa_mask);
606     act.sa_flags = SA_SIGINFO;
607     act.sa_sigaction = host_signal_handler;
608     for(i = 1; i <= TARGET_NSIG; i++) {
609 #ifdef CONFIG_GPROF
610         if (i == TARGET_SIGPROF) {
611             continue;
612         }
613 #endif
614         host_sig = target_to_host_signal(i);
615         sigaction(host_sig, NULL, &oact);
616         if (oact.sa_sigaction == (void *)SIG_IGN) {
617             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
618         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
619             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
620         }
621         /* If there's already a handler installed then something has
622            gone horribly wrong, so don't even try to handle that case.  */
623         /* Install some handlers for our own use.  We need at least
624            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
625            trap all signals because it affects syscall interrupt
626            behavior.  But do trap all default-fatal signals.  */
627         if (fatal_signal (i))
628             sigaction(host_sig, &act, NULL);
629     }
630 }
631 
632 /* Force a synchronously taken signal. The kernel force_sig() function
633  * also forces the signal to "not blocked, not ignored", but for QEMU
634  * that work is done in process_pending_signals().
635  */
636 void force_sig(int sig)
637 {
638     CPUState *cpu = thread_cpu;
639     CPUArchState *env = cpu->env_ptr;
640     target_siginfo_t info;
641 
642     info.si_signo = sig;
643     info.si_errno = 0;
644     info.si_code = TARGET_SI_KERNEL;
645     info._sifields._kill._pid = 0;
646     info._sifields._kill._uid = 0;
647     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
648 }
649 
650 /* Force a SIGSEGV if we couldn't write to memory trying to set
651  * up the signal frame. oldsig is the signal we were trying to handle
652  * at the point of failure.
653  */
654 #if !defined(TARGET_RISCV)
655 void force_sigsegv(int oldsig)
656 {
657     if (oldsig == SIGSEGV) {
658         /* Make sure we don't try to deliver the signal again; this will
659          * end up with handle_pending_signal() calling dump_core_and_abort().
660          */
661         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
662     }
663     force_sig(TARGET_SIGSEGV);
664 }
665 
666 #endif
667 
668 /* abort execution with signal */
669 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
670 {
671     CPUState *cpu = thread_cpu;
672     CPUArchState *env = cpu->env_ptr;
673     TaskState *ts = (TaskState *)cpu->opaque;
674     int host_sig, core_dumped = 0;
675     struct sigaction act;
676 
677     host_sig = target_to_host_signal(target_sig);
678     trace_user_force_sig(env, target_sig, host_sig);
679     gdb_signalled(env, target_sig);
680 
681     /* dump core if supported by target binary format */
682     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
683         stop_all_tasks();
684         core_dumped =
685             ((*ts->bprm->core_dump)(target_sig, env) == 0);
686     }
687     if (core_dumped) {
688         /* we already dumped the core of target process, we don't want
689          * a coredump of qemu itself */
690         struct rlimit nodump;
691         getrlimit(RLIMIT_CORE, &nodump);
692         nodump.rlim_cur=0;
693         setrlimit(RLIMIT_CORE, &nodump);
694         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
695             target_sig, strsignal(host_sig), "core dumped" );
696     }
697 
698     /* The proper exit code for dying from an uncaught signal is
699      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
700      * a negative value.  To get the proper exit code we need to
701      * actually die from an uncaught signal.  Here the default signal
702      * handler is installed, we send ourself a signal and we wait for
703      * it to arrive. */
704     sigfillset(&act.sa_mask);
705     act.sa_handler = SIG_DFL;
706     act.sa_flags = 0;
707     sigaction(host_sig, &act, NULL);
708 
709     /* For some reason raise(host_sig) doesn't send the signal when
710      * statically linked on x86-64. */
711     kill(getpid(), host_sig);
712 
713     /* Make sure the signal isn't masked (just reuse the mask inside
714     of act) */
715     sigdelset(&act.sa_mask, host_sig);
716     sigsuspend(&act.sa_mask);
717 
718     /* unreachable */
719     abort();
720 }
721 
722 /* queue a signal so that it will be send to the virtual CPU as soon
723    as possible */
724 int queue_signal(CPUArchState *env, int sig, int si_type,
725                  target_siginfo_t *info)
726 {
727     CPUState *cpu = env_cpu(env);
728     TaskState *ts = cpu->opaque;
729 
730     trace_user_queue_signal(env, sig);
731 
732     info->si_code = deposit32(info->si_code, 16, 16, si_type);
733 
734     ts->sync_signal.info = *info;
735     ts->sync_signal.pending = sig;
736     /* signal that a new signal is pending */
737     qatomic_set(&ts->signal_pending, 1);
738     return 1; /* indicates that the signal was queued */
739 }
740 
741 #ifndef HAVE_SAFE_SYSCALL
742 static inline void rewind_if_in_safe_syscall(void *puc)
743 {
744     /* Default version: never rewind */
745 }
746 #endif
747 
748 static void host_signal_handler(int host_signum, siginfo_t *info,
749                                 void *puc)
750 {
751     CPUArchState *env = thread_cpu->env_ptr;
752     CPUState *cpu = env_cpu(env);
753     TaskState *ts = cpu->opaque;
754 
755     int sig;
756     target_siginfo_t tinfo;
757     ucontext_t *uc = puc;
758     struct emulated_sigtable *k;
759 
760     /* the CPU emulator uses some host signals to detect exceptions,
761        we forward to it some signals */
762     if ((host_signum == SIGSEGV || host_signum == SIGBUS)
763         && info->si_code > 0) {
764         if (cpu_signal_handler(host_signum, info, puc))
765             return;
766     }
767 
768     /* get target signal number */
769     sig = host_to_target_signal(host_signum);
770     if (sig < 1 || sig > TARGET_NSIG)
771         return;
772     trace_user_host_signal(env, host_signum, sig);
773 
774     rewind_if_in_safe_syscall(puc);
775 
776     host_to_target_siginfo_noswap(&tinfo, info);
777     k = &ts->sigtab[sig - 1];
778     k->info = tinfo;
779     k->pending = sig;
780     ts->signal_pending = 1;
781 
782     /* Block host signals until target signal handler entered. We
783      * can't block SIGSEGV or SIGBUS while we're executing guest
784      * code in case the guest code provokes one in the window between
785      * now and it getting out to the main loop. Signals will be
786      * unblocked again in process_pending_signals().
787      *
788      * WARNING: we cannot use sigfillset() here because the uc_sigmask
789      * field is a kernel sigset_t, which is much smaller than the
790      * libc sigset_t which sigfillset() operates on. Using sigfillset()
791      * would write 0xff bytes off the end of the structure and trash
792      * data on the struct.
793      * We can't use sizeof(uc->uc_sigmask) either, because the libc
794      * headers define the struct field with the wrong (too large) type.
795      */
796     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
797     sigdelset(&uc->uc_sigmask, SIGSEGV);
798     sigdelset(&uc->uc_sigmask, SIGBUS);
799 
800     /* interrupt the virtual CPU as soon as possible */
801     cpu_exit(thread_cpu);
802 }
803 
804 /* do_sigaltstack() returns target values and errnos. */
805 /* compare linux/kernel/signal.c:do_sigaltstack() */
806 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
807                         CPUArchState *env)
808 {
809     target_stack_t oss, *uoss = NULL;
810     abi_long ret = -TARGET_EFAULT;
811 
812     if (uoss_addr) {
813         /* Verify writability now, but do not alter user memory yet. */
814         if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
815             goto out;
816         }
817         target_save_altstack(&oss, env);
818     }
819 
820     if (uss_addr) {
821         target_stack_t *uss;
822 
823         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
824             goto out;
825         }
826         ret = target_restore_altstack(uss, env);
827         if (ret) {
828             goto out;
829         }
830     }
831 
832     if (uoss_addr) {
833         memcpy(uoss, &oss, sizeof(oss));
834         unlock_user_struct(uoss, uoss_addr, 1);
835         uoss = NULL;
836     }
837     ret = 0;
838 
839  out:
840     if (uoss) {
841         unlock_user_struct(uoss, uoss_addr, 0);
842     }
843     return ret;
844 }
845 
846 /* do_sigaction() return target values and host errnos */
847 int do_sigaction(int sig, const struct target_sigaction *act,
848                  struct target_sigaction *oact, abi_ulong ka_restorer)
849 {
850     struct target_sigaction *k;
851     struct sigaction act1;
852     int host_sig;
853     int ret = 0;
854 
855     trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
856 
857     if (sig < 1 || sig > TARGET_NSIG) {
858         return -TARGET_EINVAL;
859     }
860 
861     if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
862         return -TARGET_EINVAL;
863     }
864 
865     if (block_signals()) {
866         return -TARGET_ERESTARTSYS;
867     }
868 
869     k = &sigact_table[sig - 1];
870     if (oact) {
871         __put_user(k->_sa_handler, &oact->_sa_handler);
872         __put_user(k->sa_flags, &oact->sa_flags);
873 #ifdef TARGET_ARCH_HAS_SA_RESTORER
874         __put_user(k->sa_restorer, &oact->sa_restorer);
875 #endif
876         /* Not swapped.  */
877         oact->sa_mask = k->sa_mask;
878     }
879     if (act) {
880         /* FIXME: This is not threadsafe.  */
881         __get_user(k->_sa_handler, &act->_sa_handler);
882         __get_user(k->sa_flags, &act->sa_flags);
883 #ifdef TARGET_ARCH_HAS_SA_RESTORER
884         __get_user(k->sa_restorer, &act->sa_restorer);
885 #endif
886 #ifdef TARGET_ARCH_HAS_KA_RESTORER
887         k->ka_restorer = ka_restorer;
888 #endif
889         /* To be swapped in target_to_host_sigset.  */
890         k->sa_mask = act->sa_mask;
891 
892         /* we update the host linux signal state */
893         host_sig = target_to_host_signal(sig);
894         trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
895         if (host_sig > SIGRTMAX) {
896             /* we don't have enough host signals to map all target signals */
897             qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
898                           sig);
899             /*
900              * we don't return an error here because some programs try to
901              * register an handler for all possible rt signals even if they
902              * don't need it.
903              * An error here can abort them whereas there can be no problem
904              * to not have the signal available later.
905              * This is the case for golang,
906              *   See https://github.com/golang/go/issues/33746
907              * So we silently ignore the error.
908              */
909             return 0;
910         }
911         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
912             sigfillset(&act1.sa_mask);
913             act1.sa_flags = SA_SIGINFO;
914             if (k->sa_flags & TARGET_SA_RESTART)
915                 act1.sa_flags |= SA_RESTART;
916             /* NOTE: it is important to update the host kernel signal
917                ignore state to avoid getting unexpected interrupted
918                syscalls */
919             if (k->_sa_handler == TARGET_SIG_IGN) {
920                 act1.sa_sigaction = (void *)SIG_IGN;
921             } else if (k->_sa_handler == TARGET_SIG_DFL) {
922                 if (fatal_signal (sig))
923                     act1.sa_sigaction = host_signal_handler;
924                 else
925                     act1.sa_sigaction = (void *)SIG_DFL;
926             } else {
927                 act1.sa_sigaction = host_signal_handler;
928             }
929             ret = sigaction(host_sig, &act1, NULL);
930         }
931     }
932     return ret;
933 }
934 
935 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
936                                   struct emulated_sigtable *k)
937 {
938     CPUState *cpu = env_cpu(cpu_env);
939     abi_ulong handler;
940     sigset_t set;
941     target_sigset_t target_old_set;
942     struct target_sigaction *sa;
943     TaskState *ts = cpu->opaque;
944 
945     trace_user_handle_signal(cpu_env, sig);
946     /* dequeue signal */
947     k->pending = 0;
948 
949     sig = gdb_handlesig(cpu, sig);
950     if (!sig) {
951         sa = NULL;
952         handler = TARGET_SIG_IGN;
953     } else {
954         sa = &sigact_table[sig - 1];
955         handler = sa->_sa_handler;
956     }
957 
958     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
959         print_taken_signal(sig, &k->info);
960     }
961 
962     if (handler == TARGET_SIG_DFL) {
963         /* default handler : ignore some signal. The other are job control or fatal */
964         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
965             kill(getpid(),SIGSTOP);
966         } else if (sig != TARGET_SIGCHLD &&
967                    sig != TARGET_SIGURG &&
968                    sig != TARGET_SIGWINCH &&
969                    sig != TARGET_SIGCONT) {
970             dump_core_and_abort(sig);
971         }
972     } else if (handler == TARGET_SIG_IGN) {
973         /* ignore sig */
974     } else if (handler == TARGET_SIG_ERR) {
975         dump_core_and_abort(sig);
976     } else {
977         /* compute the blocked signals during the handler execution */
978         sigset_t *blocked_set;
979 
980         target_to_host_sigset(&set, &sa->sa_mask);
981         /* SA_NODEFER indicates that the current signal should not be
982            blocked during the handler */
983         if (!(sa->sa_flags & TARGET_SA_NODEFER))
984             sigaddset(&set, target_to_host_signal(sig));
985 
986         /* save the previous blocked signal state to restore it at the
987            end of the signal execution (see do_sigreturn) */
988         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
989 
990         /* block signals in the handler */
991         blocked_set = ts->in_sigsuspend ?
992             &ts->sigsuspend_mask : &ts->signal_mask;
993         sigorset(&ts->signal_mask, blocked_set, &set);
994         ts->in_sigsuspend = 0;
995 
996         /* if the CPU is in VM86 mode, we restore the 32 bit values */
997 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
998         {
999             CPUX86State *env = cpu_env;
1000             if (env->eflags & VM_MASK)
1001                 save_v86_state(env);
1002         }
1003 #endif
1004         /* prepare the stack frame of the virtual CPU */
1005 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1006         if (sa->sa_flags & TARGET_SA_SIGINFO) {
1007             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1008         } else {
1009             setup_frame(sig, sa, &target_old_set, cpu_env);
1010         }
1011 #else
1012         /* These targets do not have traditional signals.  */
1013         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1014 #endif
1015         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1016             sa->_sa_handler = TARGET_SIG_DFL;
1017         }
1018     }
1019 }
1020 
1021 void process_pending_signals(CPUArchState *cpu_env)
1022 {
1023     CPUState *cpu = env_cpu(cpu_env);
1024     int sig;
1025     TaskState *ts = cpu->opaque;
1026     sigset_t set;
1027     sigset_t *blocked_set;
1028 
1029     while (qatomic_read(&ts->signal_pending)) {
1030         /* FIXME: This is not threadsafe.  */
1031         sigfillset(&set);
1032         sigprocmask(SIG_SETMASK, &set, 0);
1033 
1034     restart_scan:
1035         sig = ts->sync_signal.pending;
1036         if (sig) {
1037             /* Synchronous signals are forced,
1038              * see force_sig_info() and callers in Linux
1039              * Note that not all of our queue_signal() calls in QEMU correspond
1040              * to force_sig_info() calls in Linux (some are send_sig_info()).
1041              * However it seems like a kernel bug to me to allow the process
1042              * to block a synchronous signal since it could then just end up
1043              * looping round and round indefinitely.
1044              */
1045             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1046                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1047                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1048                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1049             }
1050 
1051             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1052         }
1053 
1054         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1055             blocked_set = ts->in_sigsuspend ?
1056                 &ts->sigsuspend_mask : &ts->signal_mask;
1057 
1058             if (ts->sigtab[sig - 1].pending &&
1059                 (!sigismember(blocked_set,
1060                               target_to_host_signal_table[sig]))) {
1061                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1062                 /* Restart scan from the beginning, as handle_pending_signal
1063                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1064                  */
1065                 goto restart_scan;
1066             }
1067         }
1068 
1069         /* if no signal is pending, unblock signals and recheck (the act
1070          * of unblocking might cause us to take another host signal which
1071          * will set signal_pending again).
1072          */
1073         qatomic_set(&ts->signal_pending, 0);
1074         ts->in_sigsuspend = 0;
1075         set = ts->signal_mask;
1076         sigdelset(&set, SIGSEGV);
1077         sigdelset(&set, SIGBUS);
1078         sigprocmask(SIG_SETMASK, &set, 0);
1079     }
1080     ts->in_sigsuspend = 0;
1081 }
1082