xref: /openbmc/qemu/linux-user/signal.c (revision 5cf434b5)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "exec/gdbstub.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 
24 #include <sys/ucontext.h>
25 #include <sys/resource.h>
26 
27 #include "qemu.h"
28 #include "user-internals.h"
29 #include "strace.h"
30 #include "loader.h"
31 #include "trace.h"
32 #include "signal-common.h"
33 #include "host-signal.h"
34 #include "user/safe-syscall.h"
35 
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37 
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39                                 void *puc);
40 
41 /* Fallback addresses into sigtramp page. */
42 abi_ulong default_sigreturn;
43 abi_ulong default_rt_sigreturn;
44 
45 /*
46  * System includes define _NSIG as SIGRTMAX + 1,
47  * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
48  * and the first signal is SIGHUP defined as 1
49  * Signal number 0 is reserved for use as kill(pid, 0), to test whether
50  * a process exists without sending it a signal.
51  */
52 #ifdef __SIGRTMAX
53 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
54 #endif
55 static uint8_t host_to_target_signal_table[_NSIG] = {
56     [SIGHUP] = TARGET_SIGHUP,
57     [SIGINT] = TARGET_SIGINT,
58     [SIGQUIT] = TARGET_SIGQUIT,
59     [SIGILL] = TARGET_SIGILL,
60     [SIGTRAP] = TARGET_SIGTRAP,
61     [SIGABRT] = TARGET_SIGABRT,
62 /*    [SIGIOT] = TARGET_SIGIOT,*/
63     [SIGBUS] = TARGET_SIGBUS,
64     [SIGFPE] = TARGET_SIGFPE,
65     [SIGKILL] = TARGET_SIGKILL,
66     [SIGUSR1] = TARGET_SIGUSR1,
67     [SIGSEGV] = TARGET_SIGSEGV,
68     [SIGUSR2] = TARGET_SIGUSR2,
69     [SIGPIPE] = TARGET_SIGPIPE,
70     [SIGALRM] = TARGET_SIGALRM,
71     [SIGTERM] = TARGET_SIGTERM,
72 #ifdef SIGSTKFLT
73     [SIGSTKFLT] = TARGET_SIGSTKFLT,
74 #endif
75     [SIGCHLD] = TARGET_SIGCHLD,
76     [SIGCONT] = TARGET_SIGCONT,
77     [SIGSTOP] = TARGET_SIGSTOP,
78     [SIGTSTP] = TARGET_SIGTSTP,
79     [SIGTTIN] = TARGET_SIGTTIN,
80     [SIGTTOU] = TARGET_SIGTTOU,
81     [SIGURG] = TARGET_SIGURG,
82     [SIGXCPU] = TARGET_SIGXCPU,
83     [SIGXFSZ] = TARGET_SIGXFSZ,
84     [SIGVTALRM] = TARGET_SIGVTALRM,
85     [SIGPROF] = TARGET_SIGPROF,
86     [SIGWINCH] = TARGET_SIGWINCH,
87     [SIGIO] = TARGET_SIGIO,
88     [SIGPWR] = TARGET_SIGPWR,
89     [SIGSYS] = TARGET_SIGSYS,
90     /* next signals stay the same */
91 };
92 
93 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
94 
95 /* valid sig is between 1 and _NSIG - 1 */
96 int host_to_target_signal(int sig)
97 {
98     if (sig < 1 || sig >= _NSIG) {
99         return sig;
100     }
101     return host_to_target_signal_table[sig];
102 }
103 
104 /* valid sig is between 1 and TARGET_NSIG */
105 int target_to_host_signal(int sig)
106 {
107     if (sig < 1 || sig > TARGET_NSIG) {
108         return sig;
109     }
110     return target_to_host_signal_table[sig];
111 }
112 
113 static inline void target_sigaddset(target_sigset_t *set, int signum)
114 {
115     signum--;
116     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
117     set->sig[signum / TARGET_NSIG_BPW] |= mask;
118 }
119 
120 static inline int target_sigismember(const target_sigset_t *set, int signum)
121 {
122     signum--;
123     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
124     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
125 }
126 
127 void host_to_target_sigset_internal(target_sigset_t *d,
128                                     const sigset_t *s)
129 {
130     int host_sig, target_sig;
131     target_sigemptyset(d);
132     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
133         target_sig = host_to_target_signal(host_sig);
134         if (target_sig < 1 || target_sig > TARGET_NSIG) {
135             continue;
136         }
137         if (sigismember(s, host_sig)) {
138             target_sigaddset(d, target_sig);
139         }
140     }
141 }
142 
143 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
144 {
145     target_sigset_t d1;
146     int i;
147 
148     host_to_target_sigset_internal(&d1, s);
149     for(i = 0;i < TARGET_NSIG_WORDS; i++)
150         d->sig[i] = tswapal(d1.sig[i]);
151 }
152 
153 void target_to_host_sigset_internal(sigset_t *d,
154                                     const target_sigset_t *s)
155 {
156     int host_sig, target_sig;
157     sigemptyset(d);
158     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
159         host_sig = target_to_host_signal(target_sig);
160         if (host_sig < 1 || host_sig >= _NSIG) {
161             continue;
162         }
163         if (target_sigismember(s, target_sig)) {
164             sigaddset(d, host_sig);
165         }
166     }
167 }
168 
169 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
170 {
171     target_sigset_t s1;
172     int i;
173 
174     for(i = 0;i < TARGET_NSIG_WORDS; i++)
175         s1.sig[i] = tswapal(s->sig[i]);
176     target_to_host_sigset_internal(d, &s1);
177 }
178 
179 void host_to_target_old_sigset(abi_ulong *old_sigset,
180                                const sigset_t *sigset)
181 {
182     target_sigset_t d;
183     host_to_target_sigset(&d, sigset);
184     *old_sigset = d.sig[0];
185 }
186 
187 void target_to_host_old_sigset(sigset_t *sigset,
188                                const abi_ulong *old_sigset)
189 {
190     target_sigset_t d;
191     int i;
192 
193     d.sig[0] = *old_sigset;
194     for(i = 1;i < TARGET_NSIG_WORDS; i++)
195         d.sig[i] = 0;
196     target_to_host_sigset(sigset, &d);
197 }
198 
199 int block_signals(void)
200 {
201     TaskState *ts = (TaskState *)thread_cpu->opaque;
202     sigset_t set;
203 
204     /* It's OK to block everything including SIGSEGV, because we won't
205      * run any further guest code before unblocking signals in
206      * process_pending_signals().
207      */
208     sigfillset(&set);
209     sigprocmask(SIG_SETMASK, &set, 0);
210 
211     return qatomic_xchg(&ts->signal_pending, 1);
212 }
213 
214 /* Wrapper for sigprocmask function
215  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
216  * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
217  * a signal was already pending and the syscall must be restarted, or
218  * 0 on success.
219  * If set is NULL, this is guaranteed not to fail.
220  */
221 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
222 {
223     TaskState *ts = (TaskState *)thread_cpu->opaque;
224 
225     if (oldset) {
226         *oldset = ts->signal_mask;
227     }
228 
229     if (set) {
230         int i;
231 
232         if (block_signals()) {
233             return -QEMU_ERESTARTSYS;
234         }
235 
236         switch (how) {
237         case SIG_BLOCK:
238             sigorset(&ts->signal_mask, &ts->signal_mask, set);
239             break;
240         case SIG_UNBLOCK:
241             for (i = 1; i <= NSIG; ++i) {
242                 if (sigismember(set, i)) {
243                     sigdelset(&ts->signal_mask, i);
244                 }
245             }
246             break;
247         case SIG_SETMASK:
248             ts->signal_mask = *set;
249             break;
250         default:
251             g_assert_not_reached();
252         }
253 
254         /* Silently ignore attempts to change blocking status of KILL or STOP */
255         sigdelset(&ts->signal_mask, SIGKILL);
256         sigdelset(&ts->signal_mask, SIGSTOP);
257     }
258     return 0;
259 }
260 
261 /* Just set the guest's signal mask to the specified value; the
262  * caller is assumed to have called block_signals() already.
263  */
264 void set_sigmask(const sigset_t *set)
265 {
266     TaskState *ts = (TaskState *)thread_cpu->opaque;
267 
268     ts->signal_mask = *set;
269 }
270 
271 /* sigaltstack management */
272 
273 int on_sig_stack(unsigned long sp)
274 {
275     TaskState *ts = (TaskState *)thread_cpu->opaque;
276 
277     return (sp - ts->sigaltstack_used.ss_sp
278             < ts->sigaltstack_used.ss_size);
279 }
280 
281 int sas_ss_flags(unsigned long sp)
282 {
283     TaskState *ts = (TaskState *)thread_cpu->opaque;
284 
285     return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
286             : on_sig_stack(sp) ? SS_ONSTACK : 0);
287 }
288 
289 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
290 {
291     /*
292      * This is the X/Open sanctioned signal stack switching.
293      */
294     TaskState *ts = (TaskState *)thread_cpu->opaque;
295 
296     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
297         return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
298     }
299     return sp;
300 }
301 
302 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
303 {
304     TaskState *ts = (TaskState *)thread_cpu->opaque;
305 
306     __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
307     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
308     __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
309 }
310 
311 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
312 {
313     TaskState *ts = (TaskState *)thread_cpu->opaque;
314     size_t minstacksize = TARGET_MINSIGSTKSZ;
315     target_stack_t ss;
316 
317 #if defined(TARGET_PPC64)
318     /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
319     struct image_info *image = ts->info;
320     if (get_ppc64_abi(image) > 1) {
321         minstacksize = 4096;
322     }
323 #endif
324 
325     __get_user(ss.ss_sp, &uss->ss_sp);
326     __get_user(ss.ss_size, &uss->ss_size);
327     __get_user(ss.ss_flags, &uss->ss_flags);
328 
329     if (on_sig_stack(get_sp_from_cpustate(env))) {
330         return -TARGET_EPERM;
331     }
332 
333     switch (ss.ss_flags) {
334     default:
335         return -TARGET_EINVAL;
336 
337     case TARGET_SS_DISABLE:
338         ss.ss_size = 0;
339         ss.ss_sp = 0;
340         break;
341 
342     case TARGET_SS_ONSTACK:
343     case 0:
344         if (ss.ss_size < minstacksize) {
345             return -TARGET_ENOMEM;
346         }
347         break;
348     }
349 
350     ts->sigaltstack_used.ss_sp = ss.ss_sp;
351     ts->sigaltstack_used.ss_size = ss.ss_size;
352     return 0;
353 }
354 
355 /* siginfo conversion */
356 
357 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
358                                                  const siginfo_t *info)
359 {
360     int sig = host_to_target_signal(info->si_signo);
361     int si_code = info->si_code;
362     int si_type;
363     tinfo->si_signo = sig;
364     tinfo->si_errno = 0;
365     tinfo->si_code = info->si_code;
366 
367     /* This memset serves two purposes:
368      * (1) ensure we don't leak random junk to the guest later
369      * (2) placate false positives from gcc about fields
370      *     being used uninitialized if it chooses to inline both this
371      *     function and tswap_siginfo() into host_to_target_siginfo().
372      */
373     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
374 
375     /* This is awkward, because we have to use a combination of
376      * the si_code and si_signo to figure out which of the union's
377      * members are valid. (Within the host kernel it is always possible
378      * to tell, but the kernel carefully avoids giving userspace the
379      * high 16 bits of si_code, so we don't have the information to
380      * do this the easy way...) We therefore make our best guess,
381      * bearing in mind that a guest can spoof most of the si_codes
382      * via rt_sigqueueinfo() if it likes.
383      *
384      * Once we have made our guess, we record it in the top 16 bits of
385      * the si_code, so that tswap_siginfo() later can use it.
386      * tswap_siginfo() will strip these top bits out before writing
387      * si_code to the guest (sign-extending the lower bits).
388      */
389 
390     switch (si_code) {
391     case SI_USER:
392     case SI_TKILL:
393     case SI_KERNEL:
394         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
395          * These are the only unspoofable si_code values.
396          */
397         tinfo->_sifields._kill._pid = info->si_pid;
398         tinfo->_sifields._kill._uid = info->si_uid;
399         si_type = QEMU_SI_KILL;
400         break;
401     default:
402         /* Everything else is spoofable. Make best guess based on signal */
403         switch (sig) {
404         case TARGET_SIGCHLD:
405             tinfo->_sifields._sigchld._pid = info->si_pid;
406             tinfo->_sifields._sigchld._uid = info->si_uid;
407             if (si_code == CLD_EXITED)
408                 tinfo->_sifields._sigchld._status = info->si_status;
409             else
410                 tinfo->_sifields._sigchld._status
411                     = host_to_target_signal(info->si_status & 0x7f)
412                         | (info->si_status & ~0x7f);
413             tinfo->_sifields._sigchld._utime = info->si_utime;
414             tinfo->_sifields._sigchld._stime = info->si_stime;
415             si_type = QEMU_SI_CHLD;
416             break;
417         case TARGET_SIGIO:
418             tinfo->_sifields._sigpoll._band = info->si_band;
419             tinfo->_sifields._sigpoll._fd = info->si_fd;
420             si_type = QEMU_SI_POLL;
421             break;
422         default:
423             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
424             tinfo->_sifields._rt._pid = info->si_pid;
425             tinfo->_sifields._rt._uid = info->si_uid;
426             /* XXX: potential problem if 64 bit */
427             tinfo->_sifields._rt._sigval.sival_ptr
428                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
429             si_type = QEMU_SI_RT;
430             break;
431         }
432         break;
433     }
434 
435     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
436 }
437 
438 void tswap_siginfo(target_siginfo_t *tinfo,
439                    const target_siginfo_t *info)
440 {
441     int si_type = extract32(info->si_code, 16, 16);
442     int si_code = sextract32(info->si_code, 0, 16);
443 
444     __put_user(info->si_signo, &tinfo->si_signo);
445     __put_user(info->si_errno, &tinfo->si_errno);
446     __put_user(si_code, &tinfo->si_code);
447 
448     /* We can use our internal marker of which fields in the structure
449      * are valid, rather than duplicating the guesswork of
450      * host_to_target_siginfo_noswap() here.
451      */
452     switch (si_type) {
453     case QEMU_SI_KILL:
454         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
455         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
456         break;
457     case QEMU_SI_TIMER:
458         __put_user(info->_sifields._timer._timer1,
459                    &tinfo->_sifields._timer._timer1);
460         __put_user(info->_sifields._timer._timer2,
461                    &tinfo->_sifields._timer._timer2);
462         break;
463     case QEMU_SI_POLL:
464         __put_user(info->_sifields._sigpoll._band,
465                    &tinfo->_sifields._sigpoll._band);
466         __put_user(info->_sifields._sigpoll._fd,
467                    &tinfo->_sifields._sigpoll._fd);
468         break;
469     case QEMU_SI_FAULT:
470         __put_user(info->_sifields._sigfault._addr,
471                    &tinfo->_sifields._sigfault._addr);
472         break;
473     case QEMU_SI_CHLD:
474         __put_user(info->_sifields._sigchld._pid,
475                    &tinfo->_sifields._sigchld._pid);
476         __put_user(info->_sifields._sigchld._uid,
477                    &tinfo->_sifields._sigchld._uid);
478         __put_user(info->_sifields._sigchld._status,
479                    &tinfo->_sifields._sigchld._status);
480         __put_user(info->_sifields._sigchld._utime,
481                    &tinfo->_sifields._sigchld._utime);
482         __put_user(info->_sifields._sigchld._stime,
483                    &tinfo->_sifields._sigchld._stime);
484         break;
485     case QEMU_SI_RT:
486         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
487         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
488         __put_user(info->_sifields._rt._sigval.sival_ptr,
489                    &tinfo->_sifields._rt._sigval.sival_ptr);
490         break;
491     default:
492         g_assert_not_reached();
493     }
494 }
495 
496 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
497 {
498     target_siginfo_t tgt_tmp;
499     host_to_target_siginfo_noswap(&tgt_tmp, info);
500     tswap_siginfo(tinfo, &tgt_tmp);
501 }
502 
503 /* XXX: we support only POSIX RT signals are used. */
504 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
505 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
506 {
507     /* This conversion is used only for the rt_sigqueueinfo syscall,
508      * and so we know that the _rt fields are the valid ones.
509      */
510     abi_ulong sival_ptr;
511 
512     __get_user(info->si_signo, &tinfo->si_signo);
513     __get_user(info->si_errno, &tinfo->si_errno);
514     __get_user(info->si_code, &tinfo->si_code);
515     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
516     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
517     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
518     info->si_value.sival_ptr = (void *)(long)sival_ptr;
519 }
520 
521 static int fatal_signal (int sig)
522 {
523     switch (sig) {
524     case TARGET_SIGCHLD:
525     case TARGET_SIGURG:
526     case TARGET_SIGWINCH:
527         /* Ignored by default.  */
528         return 0;
529     case TARGET_SIGCONT:
530     case TARGET_SIGSTOP:
531     case TARGET_SIGTSTP:
532     case TARGET_SIGTTIN:
533     case TARGET_SIGTTOU:
534         /* Job control signals.  */
535         return 0;
536     default:
537         return 1;
538     }
539 }
540 
541 /* returns 1 if given signal should dump core if not handled */
542 static int core_dump_signal(int sig)
543 {
544     switch (sig) {
545     case TARGET_SIGABRT:
546     case TARGET_SIGFPE:
547     case TARGET_SIGILL:
548     case TARGET_SIGQUIT:
549     case TARGET_SIGSEGV:
550     case TARGET_SIGTRAP:
551     case TARGET_SIGBUS:
552         return (1);
553     default:
554         return (0);
555     }
556 }
557 
558 static void signal_table_init(void)
559 {
560     int host_sig, target_sig, count;
561 
562     /*
563      * Signals are supported starting from TARGET_SIGRTMIN and going up
564      * until we run out of host realtime signals.
565      * glibc at least uses only the lower 2 rt signals and probably
566      * nobody's using the upper ones.
567      * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
568      * To fix this properly we need to do manual signal delivery multiplexed
569      * over a single host signal.
570      * Attempts for configure "missing" signals via sigaction will be
571      * silently ignored.
572      */
573     for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
574         target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
575         if (target_sig <= TARGET_NSIG) {
576             host_to_target_signal_table[host_sig] = target_sig;
577         }
578     }
579 
580     /* generate signal conversion tables */
581     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
582         target_to_host_signal_table[target_sig] = _NSIG; /* poison */
583     }
584     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
585         if (host_to_target_signal_table[host_sig] == 0) {
586             host_to_target_signal_table[host_sig] = host_sig;
587         }
588         target_sig = host_to_target_signal_table[host_sig];
589         if (target_sig <= TARGET_NSIG) {
590             target_to_host_signal_table[target_sig] = host_sig;
591         }
592     }
593 
594     if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
595         for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
596             if (target_to_host_signal_table[target_sig] == _NSIG) {
597                 count++;
598             }
599         }
600         trace_signal_table_init(count);
601     }
602 }
603 
604 void signal_init(void)
605 {
606     TaskState *ts = (TaskState *)thread_cpu->opaque;
607     struct sigaction act;
608     struct sigaction oact;
609     int i;
610     int host_sig;
611 
612     /* initialize signal conversion tables */
613     signal_table_init();
614 
615     /* Set the signal mask from the host mask. */
616     sigprocmask(0, 0, &ts->signal_mask);
617 
618     sigfillset(&act.sa_mask);
619     act.sa_flags = SA_SIGINFO;
620     act.sa_sigaction = host_signal_handler;
621     for(i = 1; i <= TARGET_NSIG; i++) {
622 #ifdef CONFIG_GPROF
623         if (i == TARGET_SIGPROF) {
624             continue;
625         }
626 #endif
627         host_sig = target_to_host_signal(i);
628         sigaction(host_sig, NULL, &oact);
629         if (oact.sa_sigaction == (void *)SIG_IGN) {
630             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
631         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
632             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
633         }
634         /* If there's already a handler installed then something has
635            gone horribly wrong, so don't even try to handle that case.  */
636         /* Install some handlers for our own use.  We need at least
637            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
638            trap all signals because it affects syscall interrupt
639            behavior.  But do trap all default-fatal signals.  */
640         if (fatal_signal (i))
641             sigaction(host_sig, &act, NULL);
642     }
643 }
644 
645 /* Force a synchronously taken signal. The kernel force_sig() function
646  * also forces the signal to "not blocked, not ignored", but for QEMU
647  * that work is done in process_pending_signals().
648  */
649 void force_sig(int sig)
650 {
651     CPUState *cpu = thread_cpu;
652     CPUArchState *env = cpu->env_ptr;
653     target_siginfo_t info = {};
654 
655     info.si_signo = sig;
656     info.si_errno = 0;
657     info.si_code = TARGET_SI_KERNEL;
658     info._sifields._kill._pid = 0;
659     info._sifields._kill._uid = 0;
660     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
661 }
662 
663 /*
664  * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
665  * 'force' part is handled in process_pending_signals().
666  */
667 void force_sig_fault(int sig, int code, abi_ulong addr)
668 {
669     CPUState *cpu = thread_cpu;
670     CPUArchState *env = cpu->env_ptr;
671     target_siginfo_t info = {};
672 
673     info.si_signo = sig;
674     info.si_errno = 0;
675     info.si_code = code;
676     info._sifields._sigfault._addr = addr;
677     queue_signal(env, sig, QEMU_SI_FAULT, &info);
678 }
679 
680 /* Force a SIGSEGV if we couldn't write to memory trying to set
681  * up the signal frame. oldsig is the signal we were trying to handle
682  * at the point of failure.
683  */
684 #if !defined(TARGET_RISCV)
685 void force_sigsegv(int oldsig)
686 {
687     if (oldsig == SIGSEGV) {
688         /* Make sure we don't try to deliver the signal again; this will
689          * end up with handle_pending_signal() calling dump_core_and_abort().
690          */
691         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
692     }
693     force_sig(TARGET_SIGSEGV);
694 }
695 #endif
696 
697 void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
698                            MMUAccessType access_type, bool maperr, uintptr_t ra)
699 {
700     const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
701 
702     if (tcg_ops->record_sigsegv) {
703         tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
704     }
705 
706     force_sig_fault(TARGET_SIGSEGV,
707                     maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
708                     addr);
709     cpu->exception_index = EXCP_INTERRUPT;
710     cpu_loop_exit_restore(cpu, ra);
711 }
712 
713 void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
714                           MMUAccessType access_type, uintptr_t ra)
715 {
716     const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
717 
718     if (tcg_ops->record_sigbus) {
719         tcg_ops->record_sigbus(cpu, addr, access_type, ra);
720     }
721 
722     force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
723     cpu->exception_index = EXCP_INTERRUPT;
724     cpu_loop_exit_restore(cpu, ra);
725 }
726 
727 /* abort execution with signal */
728 static G_NORETURN
729 void dump_core_and_abort(int target_sig)
730 {
731     CPUState *cpu = thread_cpu;
732     CPUArchState *env = cpu->env_ptr;
733     TaskState *ts = (TaskState *)cpu->opaque;
734     int host_sig, core_dumped = 0;
735     struct sigaction act;
736 
737     host_sig = target_to_host_signal(target_sig);
738     trace_user_dump_core_and_abort(env, target_sig, host_sig);
739     gdb_signalled(env, target_sig);
740 
741     /* dump core if supported by target binary format */
742     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
743         stop_all_tasks();
744         core_dumped =
745             ((*ts->bprm->core_dump)(target_sig, env) == 0);
746     }
747     if (core_dumped) {
748         /* we already dumped the core of target process, we don't want
749          * a coredump of qemu itself */
750         struct rlimit nodump;
751         getrlimit(RLIMIT_CORE, &nodump);
752         nodump.rlim_cur=0;
753         setrlimit(RLIMIT_CORE, &nodump);
754         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
755             target_sig, strsignal(host_sig), "core dumped" );
756     }
757 
758     /* The proper exit code for dying from an uncaught signal is
759      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
760      * a negative value.  To get the proper exit code we need to
761      * actually die from an uncaught signal.  Here the default signal
762      * handler is installed, we send ourself a signal and we wait for
763      * it to arrive. */
764     sigfillset(&act.sa_mask);
765     act.sa_handler = SIG_DFL;
766     act.sa_flags = 0;
767     sigaction(host_sig, &act, NULL);
768 
769     /* For some reason raise(host_sig) doesn't send the signal when
770      * statically linked on x86-64. */
771     kill(getpid(), host_sig);
772 
773     /* Make sure the signal isn't masked (just reuse the mask inside
774     of act) */
775     sigdelset(&act.sa_mask, host_sig);
776     sigsuspend(&act.sa_mask);
777 
778     /* unreachable */
779     abort();
780 }
781 
782 /* queue a signal so that it will be send to the virtual CPU as soon
783    as possible */
784 void queue_signal(CPUArchState *env, int sig, int si_type,
785                   target_siginfo_t *info)
786 {
787     CPUState *cpu = env_cpu(env);
788     TaskState *ts = cpu->opaque;
789 
790     trace_user_queue_signal(env, sig);
791 
792     info->si_code = deposit32(info->si_code, 16, 16, si_type);
793 
794     ts->sync_signal.info = *info;
795     ts->sync_signal.pending = sig;
796     /* signal that a new signal is pending */
797     qatomic_set(&ts->signal_pending, 1);
798 }
799 
800 
801 /* Adjust the signal context to rewind out of safe-syscall if we're in it */
802 static inline void rewind_if_in_safe_syscall(void *puc)
803 {
804     host_sigcontext *uc = (host_sigcontext *)puc;
805     uintptr_t pcreg = host_signal_pc(uc);
806 
807     if (pcreg > (uintptr_t)safe_syscall_start
808         && pcreg < (uintptr_t)safe_syscall_end) {
809         host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
810     }
811 }
812 
813 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
814 {
815     CPUArchState *env = thread_cpu->env_ptr;
816     CPUState *cpu = env_cpu(env);
817     TaskState *ts = cpu->opaque;
818     target_siginfo_t tinfo;
819     host_sigcontext *uc = puc;
820     struct emulated_sigtable *k;
821     int guest_sig;
822     uintptr_t pc = 0;
823     bool sync_sig = false;
824     void *sigmask = host_signal_mask(uc);
825 
826     /*
827      * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
828      * handling wrt signal blocking and unwinding.
829      */
830     if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
831         MMUAccessType access_type;
832         uintptr_t host_addr;
833         abi_ptr guest_addr;
834         bool is_write;
835 
836         host_addr = (uintptr_t)info->si_addr;
837 
838         /*
839          * Convert forcefully to guest address space: addresses outside
840          * reserved_va are still valid to report via SEGV_MAPERR.
841          */
842         guest_addr = h2g_nocheck(host_addr);
843 
844         pc = host_signal_pc(uc);
845         is_write = host_signal_write(info, uc);
846         access_type = adjust_signal_pc(&pc, is_write);
847 
848         if (host_sig == SIGSEGV) {
849             bool maperr = true;
850 
851             if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
852                 /* If this was a write to a TB protected page, restart. */
853                 if (is_write &&
854                     handle_sigsegv_accerr_write(cpu, sigmask, pc, guest_addr)) {
855                     return;
856                 }
857 
858                 /*
859                  * With reserved_va, the whole address space is PROT_NONE,
860                  * which means that we may get ACCERR when we want MAPERR.
861                  */
862                 if (page_get_flags(guest_addr) & PAGE_VALID) {
863                     maperr = false;
864                 } else {
865                     info->si_code = SEGV_MAPERR;
866                 }
867             }
868 
869             sigprocmask(SIG_SETMASK, sigmask, NULL);
870             cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
871         } else {
872             sigprocmask(SIG_SETMASK, sigmask, NULL);
873             if (info->si_code == BUS_ADRALN) {
874                 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
875             }
876         }
877 
878         sync_sig = true;
879     }
880 
881     /* get target signal number */
882     guest_sig = host_to_target_signal(host_sig);
883     if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
884         return;
885     }
886     trace_user_host_signal(env, host_sig, guest_sig);
887 
888     host_to_target_siginfo_noswap(&tinfo, info);
889     k = &ts->sigtab[guest_sig - 1];
890     k->info = tinfo;
891     k->pending = guest_sig;
892     ts->signal_pending = 1;
893 
894     /*
895      * For synchronous signals, unwind the cpu state to the faulting
896      * insn and then exit back to the main loop so that the signal
897      * is delivered immediately.
898      */
899     if (sync_sig) {
900         cpu->exception_index = EXCP_INTERRUPT;
901         cpu_loop_exit_restore(cpu, pc);
902     }
903 
904     rewind_if_in_safe_syscall(puc);
905 
906     /*
907      * Block host signals until target signal handler entered. We
908      * can't block SIGSEGV or SIGBUS while we're executing guest
909      * code in case the guest code provokes one in the window between
910      * now and it getting out to the main loop. Signals will be
911      * unblocked again in process_pending_signals().
912      *
913      * WARNING: we cannot use sigfillset() here because the sigmask
914      * field is a kernel sigset_t, which is much smaller than the
915      * libc sigset_t which sigfillset() operates on. Using sigfillset()
916      * would write 0xff bytes off the end of the structure and trash
917      * data on the struct.
918      */
919     memset(sigmask, 0xff, SIGSET_T_SIZE);
920     sigdelset(sigmask, SIGSEGV);
921     sigdelset(sigmask, SIGBUS);
922 
923     /* interrupt the virtual CPU as soon as possible */
924     cpu_exit(thread_cpu);
925 }
926 
927 /* do_sigaltstack() returns target values and errnos. */
928 /* compare linux/kernel/signal.c:do_sigaltstack() */
929 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
930                         CPUArchState *env)
931 {
932     target_stack_t oss, *uoss = NULL;
933     abi_long ret = -TARGET_EFAULT;
934 
935     if (uoss_addr) {
936         /* Verify writability now, but do not alter user memory yet. */
937         if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
938             goto out;
939         }
940         target_save_altstack(&oss, env);
941     }
942 
943     if (uss_addr) {
944         target_stack_t *uss;
945 
946         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
947             goto out;
948         }
949         ret = target_restore_altstack(uss, env);
950         if (ret) {
951             goto out;
952         }
953     }
954 
955     if (uoss_addr) {
956         memcpy(uoss, &oss, sizeof(oss));
957         unlock_user_struct(uoss, uoss_addr, 1);
958         uoss = NULL;
959     }
960     ret = 0;
961 
962  out:
963     if (uoss) {
964         unlock_user_struct(uoss, uoss_addr, 0);
965     }
966     return ret;
967 }
968 
969 /* do_sigaction() return target values and host errnos */
970 int do_sigaction(int sig, const struct target_sigaction *act,
971                  struct target_sigaction *oact, abi_ulong ka_restorer)
972 {
973     struct target_sigaction *k;
974     struct sigaction act1;
975     int host_sig;
976     int ret = 0;
977 
978     trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
979 
980     if (sig < 1 || sig > TARGET_NSIG) {
981         return -TARGET_EINVAL;
982     }
983 
984     if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
985         return -TARGET_EINVAL;
986     }
987 
988     if (block_signals()) {
989         return -QEMU_ERESTARTSYS;
990     }
991 
992     k = &sigact_table[sig - 1];
993     if (oact) {
994         __put_user(k->_sa_handler, &oact->_sa_handler);
995         __put_user(k->sa_flags, &oact->sa_flags);
996 #ifdef TARGET_ARCH_HAS_SA_RESTORER
997         __put_user(k->sa_restorer, &oact->sa_restorer);
998 #endif
999         /* Not swapped.  */
1000         oact->sa_mask = k->sa_mask;
1001     }
1002     if (act) {
1003         __get_user(k->_sa_handler, &act->_sa_handler);
1004         __get_user(k->sa_flags, &act->sa_flags);
1005 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1006         __get_user(k->sa_restorer, &act->sa_restorer);
1007 #endif
1008 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1009         k->ka_restorer = ka_restorer;
1010 #endif
1011         /* To be swapped in target_to_host_sigset.  */
1012         k->sa_mask = act->sa_mask;
1013 
1014         /* we update the host linux signal state */
1015         host_sig = target_to_host_signal(sig);
1016         trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1017         if (host_sig > SIGRTMAX) {
1018             /* we don't have enough host signals to map all target signals */
1019             qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1020                           sig);
1021             /*
1022              * we don't return an error here because some programs try to
1023              * register an handler for all possible rt signals even if they
1024              * don't need it.
1025              * An error here can abort them whereas there can be no problem
1026              * to not have the signal available later.
1027              * This is the case for golang,
1028              *   See https://github.com/golang/go/issues/33746
1029              * So we silently ignore the error.
1030              */
1031             return 0;
1032         }
1033         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1034             sigfillset(&act1.sa_mask);
1035             act1.sa_flags = SA_SIGINFO;
1036             if (k->sa_flags & TARGET_SA_RESTART)
1037                 act1.sa_flags |= SA_RESTART;
1038             /* NOTE: it is important to update the host kernel signal
1039                ignore state to avoid getting unexpected interrupted
1040                syscalls */
1041             if (k->_sa_handler == TARGET_SIG_IGN) {
1042                 act1.sa_sigaction = (void *)SIG_IGN;
1043             } else if (k->_sa_handler == TARGET_SIG_DFL) {
1044                 if (fatal_signal (sig))
1045                     act1.sa_sigaction = host_signal_handler;
1046                 else
1047                     act1.sa_sigaction = (void *)SIG_DFL;
1048             } else {
1049                 act1.sa_sigaction = host_signal_handler;
1050             }
1051             ret = sigaction(host_sig, &act1, NULL);
1052         }
1053     }
1054     return ret;
1055 }
1056 
1057 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1058                                   struct emulated_sigtable *k)
1059 {
1060     CPUState *cpu = env_cpu(cpu_env);
1061     abi_ulong handler;
1062     sigset_t set;
1063     target_sigset_t target_old_set;
1064     struct target_sigaction *sa;
1065     TaskState *ts = cpu->opaque;
1066 
1067     trace_user_handle_signal(cpu_env, sig);
1068     /* dequeue signal */
1069     k->pending = 0;
1070 
1071     sig = gdb_handlesig(cpu, sig);
1072     if (!sig) {
1073         sa = NULL;
1074         handler = TARGET_SIG_IGN;
1075     } else {
1076         sa = &sigact_table[sig - 1];
1077         handler = sa->_sa_handler;
1078     }
1079 
1080     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1081         print_taken_signal(sig, &k->info);
1082     }
1083 
1084     if (handler == TARGET_SIG_DFL) {
1085         /* default handler : ignore some signal. The other are job control or fatal */
1086         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1087             kill(getpid(),SIGSTOP);
1088         } else if (sig != TARGET_SIGCHLD &&
1089                    sig != TARGET_SIGURG &&
1090                    sig != TARGET_SIGWINCH &&
1091                    sig != TARGET_SIGCONT) {
1092             dump_core_and_abort(sig);
1093         }
1094     } else if (handler == TARGET_SIG_IGN) {
1095         /* ignore sig */
1096     } else if (handler == TARGET_SIG_ERR) {
1097         dump_core_and_abort(sig);
1098     } else {
1099         /* compute the blocked signals during the handler execution */
1100         sigset_t *blocked_set;
1101 
1102         target_to_host_sigset(&set, &sa->sa_mask);
1103         /* SA_NODEFER indicates that the current signal should not be
1104            blocked during the handler */
1105         if (!(sa->sa_flags & TARGET_SA_NODEFER))
1106             sigaddset(&set, target_to_host_signal(sig));
1107 
1108         /* save the previous blocked signal state to restore it at the
1109            end of the signal execution (see do_sigreturn) */
1110         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1111 
1112         /* block signals in the handler */
1113         blocked_set = ts->in_sigsuspend ?
1114             &ts->sigsuspend_mask : &ts->signal_mask;
1115         sigorset(&ts->signal_mask, blocked_set, &set);
1116         ts->in_sigsuspend = 0;
1117 
1118         /* if the CPU is in VM86 mode, we restore the 32 bit values */
1119 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1120         {
1121             CPUX86State *env = cpu_env;
1122             if (env->eflags & VM_MASK)
1123                 save_v86_state(env);
1124         }
1125 #endif
1126         /* prepare the stack frame of the virtual CPU */
1127 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1128         if (sa->sa_flags & TARGET_SA_SIGINFO) {
1129             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1130         } else {
1131             setup_frame(sig, sa, &target_old_set, cpu_env);
1132         }
1133 #else
1134         /* These targets do not have traditional signals.  */
1135         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1136 #endif
1137         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1138             sa->_sa_handler = TARGET_SIG_DFL;
1139         }
1140     }
1141 }
1142 
1143 void process_pending_signals(CPUArchState *cpu_env)
1144 {
1145     CPUState *cpu = env_cpu(cpu_env);
1146     int sig;
1147     TaskState *ts = cpu->opaque;
1148     sigset_t set;
1149     sigset_t *blocked_set;
1150 
1151     while (qatomic_read(&ts->signal_pending)) {
1152         sigfillset(&set);
1153         sigprocmask(SIG_SETMASK, &set, 0);
1154 
1155     restart_scan:
1156         sig = ts->sync_signal.pending;
1157         if (sig) {
1158             /* Synchronous signals are forced,
1159              * see force_sig_info() and callers in Linux
1160              * Note that not all of our queue_signal() calls in QEMU correspond
1161              * to force_sig_info() calls in Linux (some are send_sig_info()).
1162              * However it seems like a kernel bug to me to allow the process
1163              * to block a synchronous signal since it could then just end up
1164              * looping round and round indefinitely.
1165              */
1166             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1167                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1168                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1169                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1170             }
1171 
1172             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1173         }
1174 
1175         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1176             blocked_set = ts->in_sigsuspend ?
1177                 &ts->sigsuspend_mask : &ts->signal_mask;
1178 
1179             if (ts->sigtab[sig - 1].pending &&
1180                 (!sigismember(blocked_set,
1181                               target_to_host_signal_table[sig]))) {
1182                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1183                 /* Restart scan from the beginning, as handle_pending_signal
1184                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1185                  */
1186                 goto restart_scan;
1187             }
1188         }
1189 
1190         /* if no signal is pending, unblock signals and recheck (the act
1191          * of unblocking might cause us to take another host signal which
1192          * will set signal_pending again).
1193          */
1194         qatomic_set(&ts->signal_pending, 0);
1195         ts->in_sigsuspend = 0;
1196         set = ts->signal_mask;
1197         sigdelset(&set, SIGSEGV);
1198         sigdelset(&set, SIGBUS);
1199         sigprocmask(SIG_SETMASK, &set, 0);
1200     }
1201     ts->in_sigsuspend = 0;
1202 }
1203 
1204 int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
1205                             target_ulong sigsize)
1206 {
1207     TaskState *ts = (TaskState *)thread_cpu->opaque;
1208     sigset_t *host_set = &ts->sigsuspend_mask;
1209     target_sigset_t *target_sigset;
1210 
1211     if (sigsize != sizeof(*target_sigset)) {
1212         /* Like the kernel, we enforce correct size sigsets */
1213         return -TARGET_EINVAL;
1214     }
1215 
1216     target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
1217     if (!target_sigset) {
1218         return -TARGET_EFAULT;
1219     }
1220     target_to_host_sigset(host_set, target_sigset);
1221     unlock_user(target_sigset, sigset, 0);
1222 
1223     *pset = host_set;
1224     return 0;
1225 }
1226