xref: /openbmc/qemu/linux-user/signal.c (revision 7c08eefc)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "gdbstub/user.h"
22 #include "exec/page-protection.h"
23 #include "hw/core/tcg-cpu-ops.h"
24 
25 #include <sys/ucontext.h>
26 #include <sys/resource.h>
27 
28 #include "qemu.h"
29 #include "user-internals.h"
30 #include "strace.h"
31 #include "loader.h"
32 #include "trace.h"
33 #include "signal-common.h"
34 #include "host-signal.h"
35 #include "user/safe-syscall.h"
36 #include "tcg/tcg.h"
37 
38 /* target_siginfo_t must fit in gdbstub's siginfo save area. */
39 QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
40 
41 static struct target_sigaction sigact_table[TARGET_NSIG];
42 
43 static void host_signal_handler(int host_signum, siginfo_t *info,
44                                 void *puc);
45 
46 /* Fallback addresses into sigtramp page. */
47 abi_ulong default_sigreturn;
48 abi_ulong default_rt_sigreturn;
49 
50 /*
51  * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
52  * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
53  * Signal number 0 is reserved for use as kill(pid, 0), to test whether
54  * a process exists without sending it a signal.
55  */
56 #ifdef __SIGRTMAX
57 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
58 #endif
59 static uint8_t host_to_target_signal_table[_NSIG] = {
60 #define MAKE_SIG_ENTRY(sig)     [sig] = TARGET_##sig,
61         MAKE_SIGNAL_LIST
62 #undef MAKE_SIG_ENTRY
63 };
64 
65 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
66 
67 /* valid sig is between 1 and _NSIG - 1 */
68 int host_to_target_signal(int sig)
69 {
70     if (sig < 1) {
71         return sig;
72     }
73     if (sig >= _NSIG) {
74         return TARGET_NSIG + 1;
75     }
76     return host_to_target_signal_table[sig];
77 }
78 
79 /* valid sig is between 1 and TARGET_NSIG */
80 int target_to_host_signal(int sig)
81 {
82     if (sig < 1) {
83         return sig;
84     }
85     if (sig > TARGET_NSIG) {
86         return _NSIG;
87     }
88     return target_to_host_signal_table[sig];
89 }
90 
91 static inline void target_sigaddset(target_sigset_t *set, int signum)
92 {
93     signum--;
94     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
95     set->sig[signum / TARGET_NSIG_BPW] |= mask;
96 }
97 
98 static inline int target_sigismember(const target_sigset_t *set, int signum)
99 {
100     signum--;
101     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
102     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
103 }
104 
105 void host_to_target_sigset_internal(target_sigset_t *d,
106                                     const sigset_t *s)
107 {
108     int host_sig, target_sig;
109     target_sigemptyset(d);
110     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
111         target_sig = host_to_target_signal(host_sig);
112         if (target_sig < 1 || target_sig > TARGET_NSIG) {
113             continue;
114         }
115         if (sigismember(s, host_sig)) {
116             target_sigaddset(d, target_sig);
117         }
118     }
119 }
120 
121 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
122 {
123     target_sigset_t d1;
124     int i;
125 
126     host_to_target_sigset_internal(&d1, s);
127     for(i = 0;i < TARGET_NSIG_WORDS; i++)
128         d->sig[i] = tswapal(d1.sig[i]);
129 }
130 
131 void target_to_host_sigset_internal(sigset_t *d,
132                                     const target_sigset_t *s)
133 {
134     int host_sig, target_sig;
135     sigemptyset(d);
136     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
137         host_sig = target_to_host_signal(target_sig);
138         if (host_sig < 1 || host_sig >= _NSIG) {
139             continue;
140         }
141         if (target_sigismember(s, target_sig)) {
142             sigaddset(d, host_sig);
143         }
144     }
145 }
146 
147 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
148 {
149     target_sigset_t s1;
150     int i;
151 
152     for(i = 0;i < TARGET_NSIG_WORDS; i++)
153         s1.sig[i] = tswapal(s->sig[i]);
154     target_to_host_sigset_internal(d, &s1);
155 }
156 
157 void host_to_target_old_sigset(abi_ulong *old_sigset,
158                                const sigset_t *sigset)
159 {
160     target_sigset_t d;
161     host_to_target_sigset(&d, sigset);
162     *old_sigset = d.sig[0];
163 }
164 
165 void target_to_host_old_sigset(sigset_t *sigset,
166                                const abi_ulong *old_sigset)
167 {
168     target_sigset_t d;
169     int i;
170 
171     d.sig[0] = *old_sigset;
172     for(i = 1;i < TARGET_NSIG_WORDS; i++)
173         d.sig[i] = 0;
174     target_to_host_sigset(sigset, &d);
175 }
176 
177 int block_signals(void)
178 {
179     TaskState *ts = get_task_state(thread_cpu);
180     sigset_t set;
181 
182     /* It's OK to block everything including SIGSEGV, because we won't
183      * run any further guest code before unblocking signals in
184      * process_pending_signals().
185      */
186     sigfillset(&set);
187     sigprocmask(SIG_SETMASK, &set, 0);
188 
189     return qatomic_xchg(&ts->signal_pending, 1);
190 }
191 
192 /* Wrapper for sigprocmask function
193  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
194  * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
195  * a signal was already pending and the syscall must be restarted, or
196  * 0 on success.
197  * If set is NULL, this is guaranteed not to fail.
198  */
199 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
200 {
201     TaskState *ts = get_task_state(thread_cpu);
202 
203     if (oldset) {
204         *oldset = ts->signal_mask;
205     }
206 
207     if (set) {
208         int i;
209 
210         if (block_signals()) {
211             return -QEMU_ERESTARTSYS;
212         }
213 
214         switch (how) {
215         case SIG_BLOCK:
216             sigorset(&ts->signal_mask, &ts->signal_mask, set);
217             break;
218         case SIG_UNBLOCK:
219             for (i = 1; i <= NSIG; ++i) {
220                 if (sigismember(set, i)) {
221                     sigdelset(&ts->signal_mask, i);
222                 }
223             }
224             break;
225         case SIG_SETMASK:
226             ts->signal_mask = *set;
227             break;
228         default:
229             g_assert_not_reached();
230         }
231 
232         /* Silently ignore attempts to change blocking status of KILL or STOP */
233         sigdelset(&ts->signal_mask, SIGKILL);
234         sigdelset(&ts->signal_mask, SIGSTOP);
235     }
236     return 0;
237 }
238 
239 /* Just set the guest's signal mask to the specified value; the
240  * caller is assumed to have called block_signals() already.
241  */
242 void set_sigmask(const sigset_t *set)
243 {
244     TaskState *ts = get_task_state(thread_cpu);
245 
246     ts->signal_mask = *set;
247 }
248 
249 /* sigaltstack management */
250 
251 int on_sig_stack(unsigned long sp)
252 {
253     TaskState *ts = get_task_state(thread_cpu);
254 
255     return (sp - ts->sigaltstack_used.ss_sp
256             < ts->sigaltstack_used.ss_size);
257 }
258 
259 int sas_ss_flags(unsigned long sp)
260 {
261     TaskState *ts = get_task_state(thread_cpu);
262 
263     return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
264             : on_sig_stack(sp) ? SS_ONSTACK : 0);
265 }
266 
267 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
268 {
269     /*
270      * This is the X/Open sanctioned signal stack switching.
271      */
272     TaskState *ts = get_task_state(thread_cpu);
273 
274     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
275         return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
276     }
277     return sp;
278 }
279 
280 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
281 {
282     TaskState *ts = get_task_state(thread_cpu);
283 
284     __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
285     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
286     __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
287 }
288 
289 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
290 {
291     TaskState *ts = get_task_state(thread_cpu);
292     size_t minstacksize = TARGET_MINSIGSTKSZ;
293     target_stack_t ss;
294 
295 #if defined(TARGET_PPC64)
296     /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
297     struct image_info *image = ts->info;
298     if (get_ppc64_abi(image) > 1) {
299         minstacksize = 4096;
300     }
301 #endif
302 
303     __get_user(ss.ss_sp, &uss->ss_sp);
304     __get_user(ss.ss_size, &uss->ss_size);
305     __get_user(ss.ss_flags, &uss->ss_flags);
306 
307     if (on_sig_stack(get_sp_from_cpustate(env))) {
308         return -TARGET_EPERM;
309     }
310 
311     switch (ss.ss_flags) {
312     default:
313         return -TARGET_EINVAL;
314 
315     case TARGET_SS_DISABLE:
316         ss.ss_size = 0;
317         ss.ss_sp = 0;
318         break;
319 
320     case TARGET_SS_ONSTACK:
321     case 0:
322         if (ss.ss_size < minstacksize) {
323             return -TARGET_ENOMEM;
324         }
325         break;
326     }
327 
328     ts->sigaltstack_used.ss_sp = ss.ss_sp;
329     ts->sigaltstack_used.ss_size = ss.ss_size;
330     return 0;
331 }
332 
333 /* siginfo conversion */
334 
335 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
336                                                  const siginfo_t *info)
337 {
338     int sig = host_to_target_signal(info->si_signo);
339     int si_code = info->si_code;
340     int si_type;
341     tinfo->si_signo = sig;
342     tinfo->si_errno = 0;
343     tinfo->si_code = info->si_code;
344 
345     /* This memset serves two purposes:
346      * (1) ensure we don't leak random junk to the guest later
347      * (2) placate false positives from gcc about fields
348      *     being used uninitialized if it chooses to inline both this
349      *     function and tswap_siginfo() into host_to_target_siginfo().
350      */
351     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
352 
353     /* This is awkward, because we have to use a combination of
354      * the si_code and si_signo to figure out which of the union's
355      * members are valid. (Within the host kernel it is always possible
356      * to tell, but the kernel carefully avoids giving userspace the
357      * high 16 bits of si_code, so we don't have the information to
358      * do this the easy way...) We therefore make our best guess,
359      * bearing in mind that a guest can spoof most of the si_codes
360      * via rt_sigqueueinfo() if it likes.
361      *
362      * Once we have made our guess, we record it in the top 16 bits of
363      * the si_code, so that tswap_siginfo() later can use it.
364      * tswap_siginfo() will strip these top bits out before writing
365      * si_code to the guest (sign-extending the lower bits).
366      */
367 
368     switch (si_code) {
369     case SI_USER:
370     case SI_TKILL:
371     case SI_KERNEL:
372         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
373          * These are the only unspoofable si_code values.
374          */
375         tinfo->_sifields._kill._pid = info->si_pid;
376         tinfo->_sifields._kill._uid = info->si_uid;
377         si_type = QEMU_SI_KILL;
378         break;
379     default:
380         /* Everything else is spoofable. Make best guess based on signal */
381         switch (sig) {
382         case TARGET_SIGCHLD:
383             tinfo->_sifields._sigchld._pid = info->si_pid;
384             tinfo->_sifields._sigchld._uid = info->si_uid;
385             if (si_code == CLD_EXITED)
386                 tinfo->_sifields._sigchld._status = info->si_status;
387             else
388                 tinfo->_sifields._sigchld._status
389                     = host_to_target_signal(info->si_status & 0x7f)
390                         | (info->si_status & ~0x7f);
391             tinfo->_sifields._sigchld._utime = info->si_utime;
392             tinfo->_sifields._sigchld._stime = info->si_stime;
393             si_type = QEMU_SI_CHLD;
394             break;
395         case TARGET_SIGIO:
396             tinfo->_sifields._sigpoll._band = info->si_band;
397             tinfo->_sifields._sigpoll._fd = info->si_fd;
398             si_type = QEMU_SI_POLL;
399             break;
400         default:
401             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
402             tinfo->_sifields._rt._pid = info->si_pid;
403             tinfo->_sifields._rt._uid = info->si_uid;
404             /* XXX: potential problem if 64 bit */
405             tinfo->_sifields._rt._sigval.sival_ptr
406                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
407             si_type = QEMU_SI_RT;
408             break;
409         }
410         break;
411     }
412 
413     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
414 }
415 
416 static void tswap_siginfo(target_siginfo_t *tinfo,
417                           const target_siginfo_t *info)
418 {
419     int si_type = extract32(info->si_code, 16, 16);
420     int si_code = sextract32(info->si_code, 0, 16);
421 
422     __put_user(info->si_signo, &tinfo->si_signo);
423     __put_user(info->si_errno, &tinfo->si_errno);
424     __put_user(si_code, &tinfo->si_code);
425 
426     /* We can use our internal marker of which fields in the structure
427      * are valid, rather than duplicating the guesswork of
428      * host_to_target_siginfo_noswap() here.
429      */
430     switch (si_type) {
431     case QEMU_SI_KILL:
432         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
433         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
434         break;
435     case QEMU_SI_TIMER:
436         __put_user(info->_sifields._timer._timer1,
437                    &tinfo->_sifields._timer._timer1);
438         __put_user(info->_sifields._timer._timer2,
439                    &tinfo->_sifields._timer._timer2);
440         break;
441     case QEMU_SI_POLL:
442         __put_user(info->_sifields._sigpoll._band,
443                    &tinfo->_sifields._sigpoll._band);
444         __put_user(info->_sifields._sigpoll._fd,
445                    &tinfo->_sifields._sigpoll._fd);
446         break;
447     case QEMU_SI_FAULT:
448         __put_user(info->_sifields._sigfault._addr,
449                    &tinfo->_sifields._sigfault._addr);
450         break;
451     case QEMU_SI_CHLD:
452         __put_user(info->_sifields._sigchld._pid,
453                    &tinfo->_sifields._sigchld._pid);
454         __put_user(info->_sifields._sigchld._uid,
455                    &tinfo->_sifields._sigchld._uid);
456         __put_user(info->_sifields._sigchld._status,
457                    &tinfo->_sifields._sigchld._status);
458         __put_user(info->_sifields._sigchld._utime,
459                    &tinfo->_sifields._sigchld._utime);
460         __put_user(info->_sifields._sigchld._stime,
461                    &tinfo->_sifields._sigchld._stime);
462         break;
463     case QEMU_SI_RT:
464         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
465         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
466         __put_user(info->_sifields._rt._sigval.sival_ptr,
467                    &tinfo->_sifields._rt._sigval.sival_ptr);
468         break;
469     default:
470         g_assert_not_reached();
471     }
472 }
473 
474 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
475 {
476     target_siginfo_t tgt_tmp;
477     host_to_target_siginfo_noswap(&tgt_tmp, info);
478     tswap_siginfo(tinfo, &tgt_tmp);
479 }
480 
481 /* XXX: we support only POSIX RT signals are used. */
482 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
483 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
484 {
485     /* This conversion is used only for the rt_sigqueueinfo syscall,
486      * and so we know that the _rt fields are the valid ones.
487      */
488     abi_ulong sival_ptr;
489 
490     __get_user(info->si_signo, &tinfo->si_signo);
491     __get_user(info->si_errno, &tinfo->si_errno);
492     __get_user(info->si_code, &tinfo->si_code);
493     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
494     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
495     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
496     info->si_value.sival_ptr = (void *)(long)sival_ptr;
497 }
498 
499 /* returns 1 if given signal should dump core if not handled */
500 static int core_dump_signal(int sig)
501 {
502     switch (sig) {
503     case TARGET_SIGABRT:
504     case TARGET_SIGFPE:
505     case TARGET_SIGILL:
506     case TARGET_SIGQUIT:
507     case TARGET_SIGSEGV:
508     case TARGET_SIGTRAP:
509     case TARGET_SIGBUS:
510         return (1);
511     default:
512         return (0);
513     }
514 }
515 
516 static void signal_table_init(void)
517 {
518     int hsig, tsig, count;
519 
520     /*
521      * Signals are supported starting from TARGET_SIGRTMIN and going up
522      * until we run out of host realtime signals.  Glibc uses the lower 2
523      * RT signals and (hopefully) nobody uses the upper ones.
524      * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
525      * To fix this properly we would need to do manual signal delivery
526      * multiplexed over a single host signal.
527      * Attempts for configure "missing" signals via sigaction will be
528      * silently ignored.
529      *
530      * Remap the target SIGABRT, so that we can distinguish host abort
531      * from guest abort.  When the guest registers a signal handler or
532      * calls raise(SIGABRT), the host will raise SIG_RTn.  If the guest
533      * arrives at dump_core_and_abort(), we will map back to host SIGABRT
534      * so that the parent (native or emulated) sees the correct signal.
535      * Finally, also map host to guest SIGABRT so that the emulated
536      * parent sees the correct mapping from wait status.
537      */
538 
539     hsig = SIGRTMIN;
540     host_to_target_signal_table[SIGABRT] = 0;
541     host_to_target_signal_table[hsig++] = TARGET_SIGABRT;
542 
543     for (tsig = TARGET_SIGRTMIN;
544          hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
545          hsig++, tsig++) {
546         host_to_target_signal_table[hsig] = tsig;
547     }
548 
549     /* Invert the mapping that has already been assigned. */
550     for (hsig = 1; hsig < _NSIG; hsig++) {
551         tsig = host_to_target_signal_table[hsig];
552         if (tsig) {
553             assert(target_to_host_signal_table[tsig] == 0);
554             target_to_host_signal_table[tsig] = hsig;
555         }
556     }
557 
558     host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
559 
560     /* Map everything else out-of-bounds. */
561     for (hsig = 1; hsig < _NSIG; hsig++) {
562         if (host_to_target_signal_table[hsig] == 0) {
563             host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
564         }
565     }
566     for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
567         if (target_to_host_signal_table[tsig] == 0) {
568             target_to_host_signal_table[tsig] = _NSIG;
569             count++;
570         }
571     }
572 
573     trace_signal_table_init(count);
574 }
575 
576 void signal_init(void)
577 {
578     TaskState *ts = get_task_state(thread_cpu);
579     struct sigaction act, oact;
580 
581     /* initialize signal conversion tables */
582     signal_table_init();
583 
584     /* Set the signal mask from the host mask. */
585     sigprocmask(0, 0, &ts->signal_mask);
586 
587     sigfillset(&act.sa_mask);
588     act.sa_flags = SA_SIGINFO;
589     act.sa_sigaction = host_signal_handler;
590 
591     /*
592      * A parent process may configure ignored signals, but all other
593      * signals are default.  For any target signals that have no host
594      * mapping, set to ignore.  For all core_dump_signal, install our
595      * host signal handler so that we may invoke dump_core_and_abort.
596      * This includes SIGSEGV and SIGBUS, which are also need our signal
597      * handler for paging and exceptions.
598      */
599     for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
600         int hsig = target_to_host_signal(tsig);
601         abi_ptr thand = TARGET_SIG_IGN;
602 
603         if (hsig >= _NSIG) {
604             continue;
605         }
606 
607         /* As we force remap SIGABRT, cannot probe and install in one step. */
608         if (tsig == TARGET_SIGABRT) {
609             sigaction(SIGABRT, NULL, &oact);
610             sigaction(hsig, &act, NULL);
611         } else {
612             struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
613             sigaction(hsig, iact, &oact);
614         }
615 
616         if (oact.sa_sigaction != (void *)SIG_IGN) {
617             thand = TARGET_SIG_DFL;
618         }
619         sigact_table[tsig - 1]._sa_handler = thand;
620     }
621 }
622 
623 /* Force a synchronously taken signal. The kernel force_sig() function
624  * also forces the signal to "not blocked, not ignored", but for QEMU
625  * that work is done in process_pending_signals().
626  */
627 void force_sig(int sig)
628 {
629     CPUState *cpu = thread_cpu;
630     target_siginfo_t info = {};
631 
632     info.si_signo = sig;
633     info.si_errno = 0;
634     info.si_code = TARGET_SI_KERNEL;
635     info._sifields._kill._pid = 0;
636     info._sifields._kill._uid = 0;
637     queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info);
638 }
639 
640 /*
641  * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
642  * 'force' part is handled in process_pending_signals().
643  */
644 void force_sig_fault(int sig, int code, abi_ulong addr)
645 {
646     CPUState *cpu = thread_cpu;
647     target_siginfo_t info = {};
648 
649     info.si_signo = sig;
650     info.si_errno = 0;
651     info.si_code = code;
652     info._sifields._sigfault._addr = addr;
653     queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
654 }
655 
656 /* Force a SIGSEGV if we couldn't write to memory trying to set
657  * up the signal frame. oldsig is the signal we were trying to handle
658  * at the point of failure.
659  */
660 #if !defined(TARGET_RISCV)
661 void force_sigsegv(int oldsig)
662 {
663     if (oldsig == SIGSEGV) {
664         /* Make sure we don't try to deliver the signal again; this will
665          * end up with handle_pending_signal() calling dump_core_and_abort().
666          */
667         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
668     }
669     force_sig(TARGET_SIGSEGV);
670 }
671 #endif
672 
673 void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
674                            MMUAccessType access_type, bool maperr, uintptr_t ra)
675 {
676     const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
677 
678     if (tcg_ops->record_sigsegv) {
679         tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
680     }
681 
682     force_sig_fault(TARGET_SIGSEGV,
683                     maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
684                     addr);
685     cpu->exception_index = EXCP_INTERRUPT;
686     cpu_loop_exit_restore(cpu, ra);
687 }
688 
689 void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
690                           MMUAccessType access_type, uintptr_t ra)
691 {
692     const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
693 
694     if (tcg_ops->record_sigbus) {
695         tcg_ops->record_sigbus(cpu, addr, access_type, ra);
696     }
697 
698     force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
699     cpu->exception_index = EXCP_INTERRUPT;
700     cpu_loop_exit_restore(cpu, ra);
701 }
702 
703 /* abort execution with signal */
704 static G_NORETURN
705 void die_with_signal(int host_sig)
706 {
707     struct sigaction act = {
708         .sa_handler = SIG_DFL,
709     };
710 
711     /*
712      * The proper exit code for dying from an uncaught signal is -<signal>.
713      * The kernel doesn't allow exit() or _exit() to pass a negative value.
714      * To get the proper exit code we need to actually die from an uncaught
715      * signal.  Here the default signal handler is installed, we send
716      * the signal and we wait for it to arrive.
717      */
718     sigfillset(&act.sa_mask);
719     sigaction(host_sig, &act, NULL);
720 
721     kill(getpid(), host_sig);
722 
723     /* Make sure the signal isn't masked (reusing the mask inside of act). */
724     sigdelset(&act.sa_mask, host_sig);
725     sigsuspend(&act.sa_mask);
726 
727     /* unreachable */
728     _exit(EXIT_FAILURE);
729 }
730 
731 static G_NORETURN
732 void dump_core_and_abort(CPUArchState *env, int target_sig)
733 {
734     CPUState *cpu = env_cpu(env);
735     TaskState *ts = get_task_state(cpu);
736     int host_sig, core_dumped = 0;
737 
738     /* On exit, undo the remapping of SIGABRT. */
739     if (target_sig == TARGET_SIGABRT) {
740         host_sig = SIGABRT;
741     } else {
742         host_sig = target_to_host_signal(target_sig);
743     }
744     trace_user_dump_core_and_abort(env, target_sig, host_sig);
745     gdb_signalled(env, target_sig);
746 
747     /* dump core if supported by target binary format */
748     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
749         stop_all_tasks();
750         core_dumped =
751             ((*ts->bprm->core_dump)(target_sig, env) == 0);
752     }
753     if (core_dumped) {
754         /* we already dumped the core of target process, we don't want
755          * a coredump of qemu itself */
756         struct rlimit nodump;
757         getrlimit(RLIMIT_CORE, &nodump);
758         nodump.rlim_cur=0;
759         setrlimit(RLIMIT_CORE, &nodump);
760         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
761             target_sig, strsignal(host_sig), "core dumped" );
762     }
763 
764     preexit_cleanup(env, 128 + target_sig);
765     die_with_signal(host_sig);
766 }
767 
768 /* queue a signal so that it will be send to the virtual CPU as soon
769    as possible */
770 void queue_signal(CPUArchState *env, int sig, int si_type,
771                   target_siginfo_t *info)
772 {
773     CPUState *cpu = env_cpu(env);
774     TaskState *ts = get_task_state(cpu);
775 
776     trace_user_queue_signal(env, sig);
777 
778     info->si_code = deposit32(info->si_code, 16, 16, si_type);
779 
780     ts->sync_signal.info = *info;
781     ts->sync_signal.pending = sig;
782     /* signal that a new signal is pending */
783     qatomic_set(&ts->signal_pending, 1);
784 }
785 
786 
787 /* Adjust the signal context to rewind out of safe-syscall if we're in it */
788 static inline void rewind_if_in_safe_syscall(void *puc)
789 {
790     host_sigcontext *uc = (host_sigcontext *)puc;
791     uintptr_t pcreg = host_signal_pc(uc);
792 
793     if (pcreg > (uintptr_t)safe_syscall_start
794         && pcreg < (uintptr_t)safe_syscall_end) {
795         host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
796     }
797 }
798 
799 static G_NORETURN
800 void die_from_signal(siginfo_t *info)
801 {
802     char sigbuf[4], codebuf[12];
803     const char *sig, *code = NULL;
804 
805     switch (info->si_signo) {
806     case SIGSEGV:
807         sig = "SEGV";
808         switch (info->si_code) {
809         case SEGV_MAPERR:
810             code = "MAPERR";
811             break;
812         case SEGV_ACCERR:
813             code = "ACCERR";
814             break;
815         }
816         break;
817     case SIGBUS:
818         sig = "BUS";
819         switch (info->si_code) {
820         case BUS_ADRALN:
821             code = "ADRALN";
822             break;
823         case BUS_ADRERR:
824             code = "ADRERR";
825             break;
826         }
827         break;
828     case SIGILL:
829         sig = "ILL";
830         switch (info->si_code) {
831         case ILL_ILLOPC:
832             code = "ILLOPC";
833             break;
834         case ILL_ILLOPN:
835             code = "ILLOPN";
836             break;
837         case ILL_ILLADR:
838             code = "ILLADR";
839             break;
840         case ILL_PRVOPC:
841             code = "PRVOPC";
842             break;
843         case ILL_PRVREG:
844             code = "PRVREG";
845             break;
846         case ILL_COPROC:
847             code = "COPROC";
848             break;
849         }
850         break;
851     case SIGFPE:
852         sig = "FPE";
853         switch (info->si_code) {
854         case FPE_INTDIV:
855             code = "INTDIV";
856             break;
857         case FPE_INTOVF:
858             code = "INTOVF";
859             break;
860         }
861         break;
862     case SIGTRAP:
863         sig = "TRAP";
864         break;
865     default:
866         snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
867         sig = sigbuf;
868         break;
869     }
870     if (code == NULL) {
871         snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
872         code = codebuf;
873     }
874 
875     error_report("QEMU internal SIG%s {code=%s, addr=%p}",
876                  sig, code, info->si_addr);
877     die_with_signal(info->si_signo);
878 }
879 
880 static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
881                                  host_sigcontext *uc)
882 {
883     uintptr_t host_addr = (uintptr_t)info->si_addr;
884     /*
885      * Convert forcefully to guest address space: addresses outside
886      * reserved_va are still valid to report via SEGV_MAPERR.
887      */
888     bool is_valid = h2g_valid(host_addr);
889     abi_ptr guest_addr = h2g_nocheck(host_addr);
890     uintptr_t pc = host_signal_pc(uc);
891     bool is_write = host_signal_write(info, uc);
892     MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
893     bool maperr;
894 
895     /* If this was a write to a TB protected page, restart. */
896     if (is_write
897         && is_valid
898         && info->si_code == SEGV_ACCERR
899         && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
900                                        pc, guest_addr)) {
901         return;
902     }
903 
904     /*
905      * If the access was not on behalf of the guest, within the executable
906      * mapping of the generated code buffer, then it is a host bug.
907      */
908     if (access_type != MMU_INST_FETCH
909         && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
910         die_from_signal(info);
911     }
912 
913     maperr = true;
914     if (is_valid && info->si_code == SEGV_ACCERR) {
915         /*
916          * With reserved_va, the whole address space is PROT_NONE,
917          * which means that we may get ACCERR when we want MAPERR.
918          */
919         if (page_get_flags(guest_addr) & PAGE_VALID) {
920             maperr = false;
921         } else {
922             info->si_code = SEGV_MAPERR;
923         }
924     }
925 
926     sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
927     cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
928 }
929 
930 static uintptr_t host_sigbus_handler(CPUState *cpu, siginfo_t *info,
931                                 host_sigcontext *uc)
932 {
933     uintptr_t pc = host_signal_pc(uc);
934     bool is_write = host_signal_write(info, uc);
935     MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
936 
937     /*
938      * If the access was not on behalf of the guest, within the executable
939      * mapping of the generated code buffer, then it is a host bug.
940      */
941     if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
942         die_from_signal(info);
943     }
944 
945     if (info->si_code == BUS_ADRALN) {
946         uintptr_t host_addr = (uintptr_t)info->si_addr;
947         abi_ptr guest_addr = h2g_nocheck(host_addr);
948 
949         sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
950         cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
951     }
952     return pc;
953 }
954 
955 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
956 {
957     CPUState *cpu = thread_cpu;
958     CPUArchState *env = cpu_env(cpu);
959     TaskState *ts = get_task_state(cpu);
960     target_siginfo_t tinfo;
961     host_sigcontext *uc = puc;
962     struct emulated_sigtable *k;
963     int guest_sig;
964     uintptr_t pc = 0;
965     bool sync_sig = false;
966     void *sigmask;
967 
968     /*
969      * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
970      * handling wrt signal blocking and unwinding.  Non-spoofed SIGILL,
971      * SIGFPE, SIGTRAP are always host bugs.
972      */
973     if (info->si_code > 0) {
974         switch (host_sig) {
975         case SIGSEGV:
976             /* Only returns on handle_sigsegv_accerr_write success. */
977             host_sigsegv_handler(cpu, info, uc);
978             return;
979         case SIGBUS:
980             pc = host_sigbus_handler(cpu, info, uc);
981             sync_sig = true;
982             break;
983         case SIGILL:
984         case SIGFPE:
985         case SIGTRAP:
986             die_from_signal(info);
987         }
988     }
989 
990     /* get target signal number */
991     guest_sig = host_to_target_signal(host_sig);
992     if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
993         return;
994     }
995     trace_user_host_signal(env, host_sig, guest_sig);
996 
997     host_to_target_siginfo_noswap(&tinfo, info);
998     k = &ts->sigtab[guest_sig - 1];
999     k->info = tinfo;
1000     k->pending = guest_sig;
1001     ts->signal_pending = 1;
1002 
1003     /*
1004      * For synchronous signals, unwind the cpu state to the faulting
1005      * insn and then exit back to the main loop so that the signal
1006      * is delivered immediately.
1007      */
1008     if (sync_sig) {
1009         cpu->exception_index = EXCP_INTERRUPT;
1010         cpu_loop_exit_restore(cpu, pc);
1011     }
1012 
1013     rewind_if_in_safe_syscall(puc);
1014 
1015     /*
1016      * Block host signals until target signal handler entered. We
1017      * can't block SIGSEGV or SIGBUS while we're executing guest
1018      * code in case the guest code provokes one in the window between
1019      * now and it getting out to the main loop. Signals will be
1020      * unblocked again in process_pending_signals().
1021      *
1022      * WARNING: we cannot use sigfillset() here because the sigmask
1023      * field is a kernel sigset_t, which is much smaller than the
1024      * libc sigset_t which sigfillset() operates on. Using sigfillset()
1025      * would write 0xff bytes off the end of the structure and trash
1026      * data on the struct.
1027      */
1028     sigmask = host_signal_mask(uc);
1029     memset(sigmask, 0xff, SIGSET_T_SIZE);
1030     sigdelset(sigmask, SIGSEGV);
1031     sigdelset(sigmask, SIGBUS);
1032 
1033     /* interrupt the virtual CPU as soon as possible */
1034     cpu_exit(thread_cpu);
1035 }
1036 
1037 /* do_sigaltstack() returns target values and errnos. */
1038 /* compare linux/kernel/signal.c:do_sigaltstack() */
1039 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
1040                         CPUArchState *env)
1041 {
1042     target_stack_t oss, *uoss = NULL;
1043     abi_long ret = -TARGET_EFAULT;
1044 
1045     if (uoss_addr) {
1046         /* Verify writability now, but do not alter user memory yet. */
1047         if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
1048             goto out;
1049         }
1050         target_save_altstack(&oss, env);
1051     }
1052 
1053     if (uss_addr) {
1054         target_stack_t *uss;
1055 
1056         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
1057             goto out;
1058         }
1059         ret = target_restore_altstack(uss, env);
1060         if (ret) {
1061             goto out;
1062         }
1063     }
1064 
1065     if (uoss_addr) {
1066         memcpy(uoss, &oss, sizeof(oss));
1067         unlock_user_struct(uoss, uoss_addr, 1);
1068         uoss = NULL;
1069     }
1070     ret = 0;
1071 
1072  out:
1073     if (uoss) {
1074         unlock_user_struct(uoss, uoss_addr, 0);
1075     }
1076     return ret;
1077 }
1078 
1079 /* do_sigaction() return target values and host errnos */
1080 int do_sigaction(int sig, const struct target_sigaction *act,
1081                  struct target_sigaction *oact, abi_ulong ka_restorer)
1082 {
1083     struct target_sigaction *k;
1084     int host_sig;
1085     int ret = 0;
1086 
1087     trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
1088 
1089     if (sig < 1 || sig > TARGET_NSIG) {
1090         return -TARGET_EINVAL;
1091     }
1092 
1093     if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
1094         return -TARGET_EINVAL;
1095     }
1096 
1097     if (block_signals()) {
1098         return -QEMU_ERESTARTSYS;
1099     }
1100 
1101     k = &sigact_table[sig - 1];
1102     if (oact) {
1103         __put_user(k->_sa_handler, &oact->_sa_handler);
1104         __put_user(k->sa_flags, &oact->sa_flags);
1105 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1106         __put_user(k->sa_restorer, &oact->sa_restorer);
1107 #endif
1108         /* Not swapped.  */
1109         oact->sa_mask = k->sa_mask;
1110     }
1111     if (act) {
1112         __get_user(k->_sa_handler, &act->_sa_handler);
1113         __get_user(k->sa_flags, &act->sa_flags);
1114 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1115         __get_user(k->sa_restorer, &act->sa_restorer);
1116 #endif
1117 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1118         k->ka_restorer = ka_restorer;
1119 #endif
1120         /* To be swapped in target_to_host_sigset.  */
1121         k->sa_mask = act->sa_mask;
1122 
1123         /* we update the host linux signal state */
1124         host_sig = target_to_host_signal(sig);
1125         trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1126         if (host_sig > SIGRTMAX) {
1127             /* we don't have enough host signals to map all target signals */
1128             qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1129                           sig);
1130             /*
1131              * we don't return an error here because some programs try to
1132              * register an handler for all possible rt signals even if they
1133              * don't need it.
1134              * An error here can abort them whereas there can be no problem
1135              * to not have the signal available later.
1136              * This is the case for golang,
1137              *   See https://github.com/golang/go/issues/33746
1138              * So we silently ignore the error.
1139              */
1140             return 0;
1141         }
1142         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1143             struct sigaction act1;
1144 
1145             sigfillset(&act1.sa_mask);
1146             act1.sa_flags = SA_SIGINFO;
1147             if (k->_sa_handler == TARGET_SIG_IGN) {
1148                 /*
1149                  * It is important to update the host kernel signal ignore
1150                  * state to avoid getting unexpected interrupted syscalls.
1151                  */
1152                 act1.sa_sigaction = (void *)SIG_IGN;
1153             } else if (k->_sa_handler == TARGET_SIG_DFL) {
1154                 if (core_dump_signal(sig)) {
1155                     act1.sa_sigaction = host_signal_handler;
1156                 } else {
1157                     act1.sa_sigaction = (void *)SIG_DFL;
1158                 }
1159             } else {
1160                 act1.sa_sigaction = host_signal_handler;
1161                 if (k->sa_flags & TARGET_SA_RESTART) {
1162                     act1.sa_flags |= SA_RESTART;
1163                 }
1164             }
1165             ret = sigaction(host_sig, &act1, NULL);
1166         }
1167     }
1168     return ret;
1169 }
1170 
1171 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1172                                   struct emulated_sigtable *k)
1173 {
1174     CPUState *cpu = env_cpu(cpu_env);
1175     abi_ulong handler;
1176     sigset_t set;
1177     target_siginfo_t unswapped;
1178     target_sigset_t target_old_set;
1179     struct target_sigaction *sa;
1180     TaskState *ts = get_task_state(cpu);
1181 
1182     trace_user_handle_signal(cpu_env, sig);
1183     /* dequeue signal */
1184     k->pending = 0;
1185 
1186     /*
1187      * Writes out siginfo values byteswapped, accordingly to the target.
1188      * It also cleans the si_type from si_code making it correct for
1189      * the target.  We must hold on to the original unswapped copy for
1190      * strace below, because si_type is still required there.
1191      */
1192     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1193         unswapped = k->info;
1194     }
1195     tswap_siginfo(&k->info, &k->info);
1196 
1197     sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
1198     if (!sig) {
1199         sa = NULL;
1200         handler = TARGET_SIG_IGN;
1201     } else {
1202         sa = &sigact_table[sig - 1];
1203         handler = sa->_sa_handler;
1204     }
1205 
1206     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1207         print_taken_signal(sig, &unswapped);
1208     }
1209 
1210     if (handler == TARGET_SIG_DFL) {
1211         /* default handler : ignore some signal. The other are job control or fatal */
1212         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1213             kill(getpid(),SIGSTOP);
1214         } else if (sig != TARGET_SIGCHLD &&
1215                    sig != TARGET_SIGURG &&
1216                    sig != TARGET_SIGWINCH &&
1217                    sig != TARGET_SIGCONT) {
1218             dump_core_and_abort(cpu_env, sig);
1219         }
1220     } else if (handler == TARGET_SIG_IGN) {
1221         /* ignore sig */
1222     } else if (handler == TARGET_SIG_ERR) {
1223         dump_core_and_abort(cpu_env, sig);
1224     } else {
1225         /* compute the blocked signals during the handler execution */
1226         sigset_t *blocked_set;
1227 
1228         target_to_host_sigset(&set, &sa->sa_mask);
1229         /* SA_NODEFER indicates that the current signal should not be
1230            blocked during the handler */
1231         if (!(sa->sa_flags & TARGET_SA_NODEFER))
1232             sigaddset(&set, target_to_host_signal(sig));
1233 
1234         /* save the previous blocked signal state to restore it at the
1235            end of the signal execution (see do_sigreturn) */
1236         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1237 
1238         /* block signals in the handler */
1239         blocked_set = ts->in_sigsuspend ?
1240             &ts->sigsuspend_mask : &ts->signal_mask;
1241         sigorset(&ts->signal_mask, blocked_set, &set);
1242         ts->in_sigsuspend = 0;
1243 
1244         /* if the CPU is in VM86 mode, we restore the 32 bit values */
1245 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1246         {
1247             CPUX86State *env = cpu_env;
1248             if (env->eflags & VM_MASK)
1249                 save_v86_state(env);
1250         }
1251 #endif
1252         /* prepare the stack frame of the virtual CPU */
1253 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1254         if (sa->sa_flags & TARGET_SA_SIGINFO) {
1255             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1256         } else {
1257             setup_frame(sig, sa, &target_old_set, cpu_env);
1258         }
1259 #else
1260         /* These targets do not have traditional signals.  */
1261         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1262 #endif
1263         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1264             sa->_sa_handler = TARGET_SIG_DFL;
1265         }
1266     }
1267 }
1268 
1269 void process_pending_signals(CPUArchState *cpu_env)
1270 {
1271     CPUState *cpu = env_cpu(cpu_env);
1272     int sig;
1273     TaskState *ts = get_task_state(cpu);
1274     sigset_t set;
1275     sigset_t *blocked_set;
1276 
1277     while (qatomic_read(&ts->signal_pending)) {
1278         sigfillset(&set);
1279         sigprocmask(SIG_SETMASK, &set, 0);
1280 
1281     restart_scan:
1282         sig = ts->sync_signal.pending;
1283         if (sig) {
1284             /* Synchronous signals are forced,
1285              * see force_sig_info() and callers in Linux
1286              * Note that not all of our queue_signal() calls in QEMU correspond
1287              * to force_sig_info() calls in Linux (some are send_sig_info()).
1288              * However it seems like a kernel bug to me to allow the process
1289              * to block a synchronous signal since it could then just end up
1290              * looping round and round indefinitely.
1291              */
1292             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1293                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1294                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1295                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1296             }
1297 
1298             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1299         }
1300 
1301         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1302             blocked_set = ts->in_sigsuspend ?
1303                 &ts->sigsuspend_mask : &ts->signal_mask;
1304 
1305             if (ts->sigtab[sig - 1].pending &&
1306                 (!sigismember(blocked_set,
1307                               target_to_host_signal_table[sig]))) {
1308                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1309                 /* Restart scan from the beginning, as handle_pending_signal
1310                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1311                  */
1312                 goto restart_scan;
1313             }
1314         }
1315 
1316         /* if no signal is pending, unblock signals and recheck (the act
1317          * of unblocking might cause us to take another host signal which
1318          * will set signal_pending again).
1319          */
1320         qatomic_set(&ts->signal_pending, 0);
1321         ts->in_sigsuspend = 0;
1322         set = ts->signal_mask;
1323         sigdelset(&set, SIGSEGV);
1324         sigdelset(&set, SIGBUS);
1325         sigprocmask(SIG_SETMASK, &set, 0);
1326     }
1327     ts->in_sigsuspend = 0;
1328 }
1329 
1330 int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
1331                             target_ulong sigsize)
1332 {
1333     TaskState *ts = get_task_state(thread_cpu);
1334     sigset_t *host_set = &ts->sigsuspend_mask;
1335     target_sigset_t *target_sigset;
1336 
1337     if (sigsize != sizeof(*target_sigset)) {
1338         /* Like the kernel, we enforce correct size sigsets */
1339         return -TARGET_EINVAL;
1340     }
1341 
1342     target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
1343     if (!target_sigset) {
1344         return -TARGET_EFAULT;
1345     }
1346     target_to_host_sigset(host_set, target_sigset);
1347     unlock_user(target_sigset, sigset, 0);
1348 
1349     *pset = host_set;
1350     return 0;
1351 }
1352