xref: /openbmc/qemu/linux-user/signal.c (revision 66210a1a30f2384bb59f9dad8d769dba56dd30f1)
1  /*
2   *  Emulation of Linux signals
3   *
4   *  Copyright (c) 2003 Fabrice Bellard
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
8   *  the Free Software Foundation; either version 2 of the License, or
9   *  (at your option) any later version.
10   *
11   *  This program is distributed in the hope that it will be useful,
12   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   *  GNU General Public License for more details.
15   *
16   *  You should have received a copy of the GNU General Public License
17   *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18   */
19  #include "qemu/osdep.h"
20  #include "qemu/bitops.h"
21  #include "gdbstub/user.h"
22  #include "hw/core/tcg-cpu-ops.h"
23  
24  #include <sys/ucontext.h>
25  #include <sys/resource.h>
26  
27  #include "qemu.h"
28  #include "user-internals.h"
29  #include "strace.h"
30  #include "loader.h"
31  #include "trace.h"
32  #include "signal-common.h"
33  #include "host-signal.h"
34  #include "user/safe-syscall.h"
35  #include "tcg/tcg.h"
36  
37  /* target_siginfo_t must fit in gdbstub's siginfo save area. */
38  QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
39  
40  static struct target_sigaction sigact_table[TARGET_NSIG];
41  
42  static void host_signal_handler(int host_signum, siginfo_t *info,
43                                  void *puc);
44  
45  /* Fallback addresses into sigtramp page. */
46  abi_ulong default_sigreturn;
47  abi_ulong default_rt_sigreturn;
48  
49  /*
50   * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
51   * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
52   * Signal number 0 is reserved for use as kill(pid, 0), to test whether
53   * a process exists without sending it a signal.
54   */
55  #ifdef __SIGRTMAX
56  QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
57  #endif
58  static uint8_t host_to_target_signal_table[_NSIG] = {
59  #define MAKE_SIG_ENTRY(sig)     [sig] = TARGET_##sig,
60          MAKE_SIGNAL_LIST
61  #undef MAKE_SIG_ENTRY
62  };
63  
64  static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
65  
66  /* valid sig is between 1 and _NSIG - 1 */
67  int host_to_target_signal(int sig)
68  {
69      if (sig < 1) {
70          return sig;
71      }
72      if (sig >= _NSIG) {
73          return TARGET_NSIG + 1;
74      }
75      return host_to_target_signal_table[sig];
76  }
77  
78  /* valid sig is between 1 and TARGET_NSIG */
79  int target_to_host_signal(int sig)
80  {
81      if (sig < 1) {
82          return sig;
83      }
84      if (sig > TARGET_NSIG) {
85          return _NSIG;
86      }
87      return target_to_host_signal_table[sig];
88  }
89  
90  static inline void target_sigaddset(target_sigset_t *set, int signum)
91  {
92      signum--;
93      abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
94      set->sig[signum / TARGET_NSIG_BPW] |= mask;
95  }
96  
97  static inline int target_sigismember(const target_sigset_t *set, int signum)
98  {
99      signum--;
100      abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
101      return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
102  }
103  
104  void host_to_target_sigset_internal(target_sigset_t *d,
105                                      const sigset_t *s)
106  {
107      int host_sig, target_sig;
108      target_sigemptyset(d);
109      for (host_sig = 1; host_sig < _NSIG; host_sig++) {
110          target_sig = host_to_target_signal(host_sig);
111          if (target_sig < 1 || target_sig > TARGET_NSIG) {
112              continue;
113          }
114          if (sigismember(s, host_sig)) {
115              target_sigaddset(d, target_sig);
116          }
117      }
118  }
119  
120  void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
121  {
122      target_sigset_t d1;
123      int i;
124  
125      host_to_target_sigset_internal(&d1, s);
126      for(i = 0;i < TARGET_NSIG_WORDS; i++)
127          d->sig[i] = tswapal(d1.sig[i]);
128  }
129  
130  void target_to_host_sigset_internal(sigset_t *d,
131                                      const target_sigset_t *s)
132  {
133      int host_sig, target_sig;
134      sigemptyset(d);
135      for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
136          host_sig = target_to_host_signal(target_sig);
137          if (host_sig < 1 || host_sig >= _NSIG) {
138              continue;
139          }
140          if (target_sigismember(s, target_sig)) {
141              sigaddset(d, host_sig);
142          }
143      }
144  }
145  
146  void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
147  {
148      target_sigset_t s1;
149      int i;
150  
151      for(i = 0;i < TARGET_NSIG_WORDS; i++)
152          s1.sig[i] = tswapal(s->sig[i]);
153      target_to_host_sigset_internal(d, &s1);
154  }
155  
156  void host_to_target_old_sigset(abi_ulong *old_sigset,
157                                 const sigset_t *sigset)
158  {
159      target_sigset_t d;
160      host_to_target_sigset(&d, sigset);
161      *old_sigset = d.sig[0];
162  }
163  
164  void target_to_host_old_sigset(sigset_t *sigset,
165                                 const abi_ulong *old_sigset)
166  {
167      target_sigset_t d;
168      int i;
169  
170      d.sig[0] = *old_sigset;
171      for(i = 1;i < TARGET_NSIG_WORDS; i++)
172          d.sig[i] = 0;
173      target_to_host_sigset(sigset, &d);
174  }
175  
176  int block_signals(void)
177  {
178      TaskState *ts = get_task_state(thread_cpu);
179      sigset_t set;
180  
181      /* It's OK to block everything including SIGSEGV, because we won't
182       * run any further guest code before unblocking signals in
183       * process_pending_signals().
184       */
185      sigfillset(&set);
186      sigprocmask(SIG_SETMASK, &set, 0);
187  
188      return qatomic_xchg(&ts->signal_pending, 1);
189  }
190  
191  /* Wrapper for sigprocmask function
192   * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
193   * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
194   * a signal was already pending and the syscall must be restarted, or
195   * 0 on success.
196   * If set is NULL, this is guaranteed not to fail.
197   */
198  int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
199  {
200      TaskState *ts = get_task_state(thread_cpu);
201  
202      if (oldset) {
203          *oldset = ts->signal_mask;
204      }
205  
206      if (set) {
207          int i;
208  
209          if (block_signals()) {
210              return -QEMU_ERESTARTSYS;
211          }
212  
213          switch (how) {
214          case SIG_BLOCK:
215              sigorset(&ts->signal_mask, &ts->signal_mask, set);
216              break;
217          case SIG_UNBLOCK:
218              for (i = 1; i <= NSIG; ++i) {
219                  if (sigismember(set, i)) {
220                      sigdelset(&ts->signal_mask, i);
221                  }
222              }
223              break;
224          case SIG_SETMASK:
225              ts->signal_mask = *set;
226              break;
227          default:
228              g_assert_not_reached();
229          }
230  
231          /* Silently ignore attempts to change blocking status of KILL or STOP */
232          sigdelset(&ts->signal_mask, SIGKILL);
233          sigdelset(&ts->signal_mask, SIGSTOP);
234      }
235      return 0;
236  }
237  
238  /* Just set the guest's signal mask to the specified value; the
239   * caller is assumed to have called block_signals() already.
240   */
241  void set_sigmask(const sigset_t *set)
242  {
243      TaskState *ts = get_task_state(thread_cpu);
244  
245      ts->signal_mask = *set;
246  }
247  
248  /* sigaltstack management */
249  
250  int on_sig_stack(unsigned long sp)
251  {
252      TaskState *ts = get_task_state(thread_cpu);
253  
254      return (sp - ts->sigaltstack_used.ss_sp
255              < ts->sigaltstack_used.ss_size);
256  }
257  
258  int sas_ss_flags(unsigned long sp)
259  {
260      TaskState *ts = get_task_state(thread_cpu);
261  
262      return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
263              : on_sig_stack(sp) ? SS_ONSTACK : 0);
264  }
265  
266  abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
267  {
268      /*
269       * This is the X/Open sanctioned signal stack switching.
270       */
271      TaskState *ts = get_task_state(thread_cpu);
272  
273      if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
274          return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
275      }
276      return sp;
277  }
278  
279  void target_save_altstack(target_stack_t *uss, CPUArchState *env)
280  {
281      TaskState *ts = get_task_state(thread_cpu);
282  
283      __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
284      __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
285      __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
286  }
287  
288  abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
289  {
290      TaskState *ts = get_task_state(thread_cpu);
291      size_t minstacksize = TARGET_MINSIGSTKSZ;
292      target_stack_t ss;
293  
294  #if defined(TARGET_PPC64)
295      /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
296      struct image_info *image = ts->info;
297      if (get_ppc64_abi(image) > 1) {
298          minstacksize = 4096;
299      }
300  #endif
301  
302      __get_user(ss.ss_sp, &uss->ss_sp);
303      __get_user(ss.ss_size, &uss->ss_size);
304      __get_user(ss.ss_flags, &uss->ss_flags);
305  
306      if (on_sig_stack(get_sp_from_cpustate(env))) {
307          return -TARGET_EPERM;
308      }
309  
310      switch (ss.ss_flags) {
311      default:
312          return -TARGET_EINVAL;
313  
314      case TARGET_SS_DISABLE:
315          ss.ss_size = 0;
316          ss.ss_sp = 0;
317          break;
318  
319      case TARGET_SS_ONSTACK:
320      case 0:
321          if (ss.ss_size < minstacksize) {
322              return -TARGET_ENOMEM;
323          }
324          break;
325      }
326  
327      ts->sigaltstack_used.ss_sp = ss.ss_sp;
328      ts->sigaltstack_used.ss_size = ss.ss_size;
329      return 0;
330  }
331  
332  /* siginfo conversion */
333  
334  static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
335                                                   const siginfo_t *info)
336  {
337      int sig = host_to_target_signal(info->si_signo);
338      int si_code = info->si_code;
339      int si_type;
340      tinfo->si_signo = sig;
341      tinfo->si_errno = 0;
342      tinfo->si_code = info->si_code;
343  
344      /* This memset serves two purposes:
345       * (1) ensure we don't leak random junk to the guest later
346       * (2) placate false positives from gcc about fields
347       *     being used uninitialized if it chooses to inline both this
348       *     function and tswap_siginfo() into host_to_target_siginfo().
349       */
350      memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
351  
352      /* This is awkward, because we have to use a combination of
353       * the si_code and si_signo to figure out which of the union's
354       * members are valid. (Within the host kernel it is always possible
355       * to tell, but the kernel carefully avoids giving userspace the
356       * high 16 bits of si_code, so we don't have the information to
357       * do this the easy way...) We therefore make our best guess,
358       * bearing in mind that a guest can spoof most of the si_codes
359       * via rt_sigqueueinfo() if it likes.
360       *
361       * Once we have made our guess, we record it in the top 16 bits of
362       * the si_code, so that tswap_siginfo() later can use it.
363       * tswap_siginfo() will strip these top bits out before writing
364       * si_code to the guest (sign-extending the lower bits).
365       */
366  
367      switch (si_code) {
368      case SI_USER:
369      case SI_TKILL:
370      case SI_KERNEL:
371          /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
372           * These are the only unspoofable si_code values.
373           */
374          tinfo->_sifields._kill._pid = info->si_pid;
375          tinfo->_sifields._kill._uid = info->si_uid;
376          si_type = QEMU_SI_KILL;
377          break;
378      default:
379          /* Everything else is spoofable. Make best guess based on signal */
380          switch (sig) {
381          case TARGET_SIGCHLD:
382              tinfo->_sifields._sigchld._pid = info->si_pid;
383              tinfo->_sifields._sigchld._uid = info->si_uid;
384              if (si_code == CLD_EXITED)
385                  tinfo->_sifields._sigchld._status = info->si_status;
386              else
387                  tinfo->_sifields._sigchld._status
388                      = host_to_target_signal(info->si_status & 0x7f)
389                          | (info->si_status & ~0x7f);
390              tinfo->_sifields._sigchld._utime = info->si_utime;
391              tinfo->_sifields._sigchld._stime = info->si_stime;
392              si_type = QEMU_SI_CHLD;
393              break;
394          case TARGET_SIGIO:
395              tinfo->_sifields._sigpoll._band = info->si_band;
396              tinfo->_sifields._sigpoll._fd = info->si_fd;
397              si_type = QEMU_SI_POLL;
398              break;
399          default:
400              /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
401              tinfo->_sifields._rt._pid = info->si_pid;
402              tinfo->_sifields._rt._uid = info->si_uid;
403              /* XXX: potential problem if 64 bit */
404              tinfo->_sifields._rt._sigval.sival_ptr
405                  = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
406              si_type = QEMU_SI_RT;
407              break;
408          }
409          break;
410      }
411  
412      tinfo->si_code = deposit32(si_code, 16, 16, si_type);
413  }
414  
415  static void tswap_siginfo(target_siginfo_t *tinfo,
416                            const target_siginfo_t *info)
417  {
418      int si_type = extract32(info->si_code, 16, 16);
419      int si_code = sextract32(info->si_code, 0, 16);
420  
421      __put_user(info->si_signo, &tinfo->si_signo);
422      __put_user(info->si_errno, &tinfo->si_errno);
423      __put_user(si_code, &tinfo->si_code);
424  
425      /* We can use our internal marker of which fields in the structure
426       * are valid, rather than duplicating the guesswork of
427       * host_to_target_siginfo_noswap() here.
428       */
429      switch (si_type) {
430      case QEMU_SI_KILL:
431          __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
432          __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
433          break;
434      case QEMU_SI_TIMER:
435          __put_user(info->_sifields._timer._timer1,
436                     &tinfo->_sifields._timer._timer1);
437          __put_user(info->_sifields._timer._timer2,
438                     &tinfo->_sifields._timer._timer2);
439          break;
440      case QEMU_SI_POLL:
441          __put_user(info->_sifields._sigpoll._band,
442                     &tinfo->_sifields._sigpoll._band);
443          __put_user(info->_sifields._sigpoll._fd,
444                     &tinfo->_sifields._sigpoll._fd);
445          break;
446      case QEMU_SI_FAULT:
447          __put_user(info->_sifields._sigfault._addr,
448                     &tinfo->_sifields._sigfault._addr);
449          break;
450      case QEMU_SI_CHLD:
451          __put_user(info->_sifields._sigchld._pid,
452                     &tinfo->_sifields._sigchld._pid);
453          __put_user(info->_sifields._sigchld._uid,
454                     &tinfo->_sifields._sigchld._uid);
455          __put_user(info->_sifields._sigchld._status,
456                     &tinfo->_sifields._sigchld._status);
457          __put_user(info->_sifields._sigchld._utime,
458                     &tinfo->_sifields._sigchld._utime);
459          __put_user(info->_sifields._sigchld._stime,
460                     &tinfo->_sifields._sigchld._stime);
461          break;
462      case QEMU_SI_RT:
463          __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
464          __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
465          __put_user(info->_sifields._rt._sigval.sival_ptr,
466                     &tinfo->_sifields._rt._sigval.sival_ptr);
467          break;
468      default:
469          g_assert_not_reached();
470      }
471  }
472  
473  void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
474  {
475      target_siginfo_t tgt_tmp;
476      host_to_target_siginfo_noswap(&tgt_tmp, info);
477      tswap_siginfo(tinfo, &tgt_tmp);
478  }
479  
480  /* XXX: we support only POSIX RT signals are used. */
481  /* XXX: find a solution for 64 bit (additional malloced data is needed) */
482  void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
483  {
484      /* This conversion is used only for the rt_sigqueueinfo syscall,
485       * and so we know that the _rt fields are the valid ones.
486       */
487      abi_ulong sival_ptr;
488  
489      __get_user(info->si_signo, &tinfo->si_signo);
490      __get_user(info->si_errno, &tinfo->si_errno);
491      __get_user(info->si_code, &tinfo->si_code);
492      __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
493      __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
494      __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
495      info->si_value.sival_ptr = (void *)(long)sival_ptr;
496  }
497  
498  /* returns 1 if given signal should dump core if not handled */
499  static int core_dump_signal(int sig)
500  {
501      switch (sig) {
502      case TARGET_SIGABRT:
503      case TARGET_SIGFPE:
504      case TARGET_SIGILL:
505      case TARGET_SIGQUIT:
506      case TARGET_SIGSEGV:
507      case TARGET_SIGTRAP:
508      case TARGET_SIGBUS:
509          return (1);
510      default:
511          return (0);
512      }
513  }
514  
515  static void signal_table_init(void)
516  {
517      int hsig, tsig, count;
518  
519      /*
520       * Signals are supported starting from TARGET_SIGRTMIN and going up
521       * until we run out of host realtime signals.  Glibc uses the lower 2
522       * RT signals and (hopefully) nobody uses the upper ones.
523       * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
524       * To fix this properly we would need to do manual signal delivery
525       * multiplexed over a single host signal.
526       * Attempts for configure "missing" signals via sigaction will be
527       * silently ignored.
528       *
529       * Remap the target SIGABRT, so that we can distinguish host abort
530       * from guest abort.  When the guest registers a signal handler or
531       * calls raise(SIGABRT), the host will raise SIG_RTn.  If the guest
532       * arrives at dump_core_and_abort(), we will map back to host SIGABRT
533       * so that the parent (native or emulated) sees the correct signal.
534       * Finally, also map host to guest SIGABRT so that the emulated
535       * parent sees the correct mapping from wait status.
536       */
537  
538      hsig = SIGRTMIN;
539      host_to_target_signal_table[SIGABRT] = 0;
540      host_to_target_signal_table[hsig++] = TARGET_SIGABRT;
541  
542      for (tsig = TARGET_SIGRTMIN;
543           hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
544           hsig++, tsig++) {
545          host_to_target_signal_table[hsig] = tsig;
546      }
547  
548      /* Invert the mapping that has already been assigned. */
549      for (hsig = 1; hsig < _NSIG; hsig++) {
550          tsig = host_to_target_signal_table[hsig];
551          if (tsig) {
552              assert(target_to_host_signal_table[tsig] == 0);
553              target_to_host_signal_table[tsig] = hsig;
554          }
555      }
556  
557      host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
558  
559      /* Map everything else out-of-bounds. */
560      for (hsig = 1; hsig < _NSIG; hsig++) {
561          if (host_to_target_signal_table[hsig] == 0) {
562              host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
563          }
564      }
565      for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
566          if (target_to_host_signal_table[tsig] == 0) {
567              target_to_host_signal_table[tsig] = _NSIG;
568              count++;
569          }
570      }
571  
572      trace_signal_table_init(count);
573  }
574  
575  void signal_init(void)
576  {
577      TaskState *ts = get_task_state(thread_cpu);
578      struct sigaction act, oact;
579  
580      /* initialize signal conversion tables */
581      signal_table_init();
582  
583      /* Set the signal mask from the host mask. */
584      sigprocmask(0, 0, &ts->signal_mask);
585  
586      sigfillset(&act.sa_mask);
587      act.sa_flags = SA_SIGINFO;
588      act.sa_sigaction = host_signal_handler;
589  
590      /*
591       * A parent process may configure ignored signals, but all other
592       * signals are default.  For any target signals that have no host
593       * mapping, set to ignore.  For all core_dump_signal, install our
594       * host signal handler so that we may invoke dump_core_and_abort.
595       * This includes SIGSEGV and SIGBUS, which are also need our signal
596       * handler for paging and exceptions.
597       */
598      for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
599          int hsig = target_to_host_signal(tsig);
600          abi_ptr thand = TARGET_SIG_IGN;
601  
602          if (hsig >= _NSIG) {
603              continue;
604          }
605  
606          /* As we force remap SIGABRT, cannot probe and install in one step. */
607          if (tsig == TARGET_SIGABRT) {
608              sigaction(SIGABRT, NULL, &oact);
609              sigaction(hsig, &act, NULL);
610          } else {
611              struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
612              sigaction(hsig, iact, &oact);
613          }
614  
615          if (oact.sa_sigaction != (void *)SIG_IGN) {
616              thand = TARGET_SIG_DFL;
617          }
618          sigact_table[tsig - 1]._sa_handler = thand;
619      }
620  }
621  
622  /* Force a synchronously taken signal. The kernel force_sig() function
623   * also forces the signal to "not blocked, not ignored", but for QEMU
624   * that work is done in process_pending_signals().
625   */
626  void force_sig(int sig)
627  {
628      CPUState *cpu = thread_cpu;
629      target_siginfo_t info = {};
630  
631      info.si_signo = sig;
632      info.si_errno = 0;
633      info.si_code = TARGET_SI_KERNEL;
634      info._sifields._kill._pid = 0;
635      info._sifields._kill._uid = 0;
636      queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info);
637  }
638  
639  /*
640   * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
641   * 'force' part is handled in process_pending_signals().
642   */
643  void force_sig_fault(int sig, int code, abi_ulong addr)
644  {
645      CPUState *cpu = thread_cpu;
646      target_siginfo_t info = {};
647  
648      info.si_signo = sig;
649      info.si_errno = 0;
650      info.si_code = code;
651      info._sifields._sigfault._addr = addr;
652      queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
653  }
654  
655  /* Force a SIGSEGV if we couldn't write to memory trying to set
656   * up the signal frame. oldsig is the signal we were trying to handle
657   * at the point of failure.
658   */
659  #if !defined(TARGET_RISCV)
660  void force_sigsegv(int oldsig)
661  {
662      if (oldsig == SIGSEGV) {
663          /* Make sure we don't try to deliver the signal again; this will
664           * end up with handle_pending_signal() calling dump_core_and_abort().
665           */
666          sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
667      }
668      force_sig(TARGET_SIGSEGV);
669  }
670  #endif
671  
672  void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
673                             MMUAccessType access_type, bool maperr, uintptr_t ra)
674  {
675      const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
676  
677      if (tcg_ops->record_sigsegv) {
678          tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
679      }
680  
681      force_sig_fault(TARGET_SIGSEGV,
682                      maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
683                      addr);
684      cpu->exception_index = EXCP_INTERRUPT;
685      cpu_loop_exit_restore(cpu, ra);
686  }
687  
688  void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
689                            MMUAccessType access_type, uintptr_t ra)
690  {
691      const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
692  
693      if (tcg_ops->record_sigbus) {
694          tcg_ops->record_sigbus(cpu, addr, access_type, ra);
695      }
696  
697      force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
698      cpu->exception_index = EXCP_INTERRUPT;
699      cpu_loop_exit_restore(cpu, ra);
700  }
701  
702  /* abort execution with signal */
703  static G_NORETURN
704  void die_with_signal(int host_sig)
705  {
706      struct sigaction act = {
707          .sa_handler = SIG_DFL,
708      };
709  
710      /*
711       * The proper exit code for dying from an uncaught signal is -<signal>.
712       * The kernel doesn't allow exit() or _exit() to pass a negative value.
713       * To get the proper exit code we need to actually die from an uncaught
714       * signal.  Here the default signal handler is installed, we send
715       * the signal and we wait for it to arrive.
716       */
717      sigfillset(&act.sa_mask);
718      sigaction(host_sig, &act, NULL);
719  
720      kill(getpid(), host_sig);
721  
722      /* Make sure the signal isn't masked (reusing the mask inside of act). */
723      sigdelset(&act.sa_mask, host_sig);
724      sigsuspend(&act.sa_mask);
725  
726      /* unreachable */
727      _exit(EXIT_FAILURE);
728  }
729  
730  static G_NORETURN
731  void dump_core_and_abort(CPUArchState *env, int target_sig)
732  {
733      CPUState *cpu = env_cpu(env);
734      TaskState *ts = get_task_state(cpu);
735      int host_sig, core_dumped = 0;
736  
737      /* On exit, undo the remapping of SIGABRT. */
738      if (target_sig == TARGET_SIGABRT) {
739          host_sig = SIGABRT;
740      } else {
741          host_sig = target_to_host_signal(target_sig);
742      }
743      trace_user_dump_core_and_abort(env, target_sig, host_sig);
744      gdb_signalled(env, target_sig);
745  
746      /* dump core if supported by target binary format */
747      if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
748          stop_all_tasks();
749          core_dumped =
750              ((*ts->bprm->core_dump)(target_sig, env) == 0);
751      }
752      if (core_dumped) {
753          /* we already dumped the core of target process, we don't want
754           * a coredump of qemu itself */
755          struct rlimit nodump;
756          getrlimit(RLIMIT_CORE, &nodump);
757          nodump.rlim_cur=0;
758          setrlimit(RLIMIT_CORE, &nodump);
759          (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
760              target_sig, strsignal(host_sig), "core dumped" );
761      }
762  
763      preexit_cleanup(env, 128 + target_sig);
764      die_with_signal(host_sig);
765  }
766  
767  /* queue a signal so that it will be send to the virtual CPU as soon
768     as possible */
769  void queue_signal(CPUArchState *env, int sig, int si_type,
770                    target_siginfo_t *info)
771  {
772      CPUState *cpu = env_cpu(env);
773      TaskState *ts = get_task_state(cpu);
774  
775      trace_user_queue_signal(env, sig);
776  
777      info->si_code = deposit32(info->si_code, 16, 16, si_type);
778  
779      ts->sync_signal.info = *info;
780      ts->sync_signal.pending = sig;
781      /* signal that a new signal is pending */
782      qatomic_set(&ts->signal_pending, 1);
783  }
784  
785  
786  /* Adjust the signal context to rewind out of safe-syscall if we're in it */
787  static inline void rewind_if_in_safe_syscall(void *puc)
788  {
789      host_sigcontext *uc = (host_sigcontext *)puc;
790      uintptr_t pcreg = host_signal_pc(uc);
791  
792      if (pcreg > (uintptr_t)safe_syscall_start
793          && pcreg < (uintptr_t)safe_syscall_end) {
794          host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
795      }
796  }
797  
798  static G_NORETURN
799  void die_from_signal(siginfo_t *info)
800  {
801      char sigbuf[4], codebuf[12];
802      const char *sig, *code = NULL;
803  
804      switch (info->si_signo) {
805      case SIGSEGV:
806          sig = "SEGV";
807          switch (info->si_code) {
808          case SEGV_MAPERR:
809              code = "MAPERR";
810              break;
811          case SEGV_ACCERR:
812              code = "ACCERR";
813              break;
814          }
815          break;
816      case SIGBUS:
817          sig = "BUS";
818          switch (info->si_code) {
819          case BUS_ADRALN:
820              code = "ADRALN";
821              break;
822          case BUS_ADRERR:
823              code = "ADRERR";
824              break;
825          }
826          break;
827      case SIGILL:
828          sig = "ILL";
829          switch (info->si_code) {
830          case ILL_ILLOPC:
831              code = "ILLOPC";
832              break;
833          case ILL_ILLOPN:
834              code = "ILLOPN";
835              break;
836          case ILL_ILLADR:
837              code = "ILLADR";
838              break;
839          case ILL_PRVOPC:
840              code = "PRVOPC";
841              break;
842          case ILL_PRVREG:
843              code = "PRVREG";
844              break;
845          case ILL_COPROC:
846              code = "COPROC";
847              break;
848          }
849          break;
850      case SIGFPE:
851          sig = "FPE";
852          switch (info->si_code) {
853          case FPE_INTDIV:
854              code = "INTDIV";
855              break;
856          case FPE_INTOVF:
857              code = "INTOVF";
858              break;
859          }
860          break;
861      case SIGTRAP:
862          sig = "TRAP";
863          break;
864      default:
865          snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
866          sig = sigbuf;
867          break;
868      }
869      if (code == NULL) {
870          snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
871          code = codebuf;
872      }
873  
874      error_report("QEMU internal SIG%s {code=%s, addr=%p}",
875                   sig, code, info->si_addr);
876      die_with_signal(info->si_signo);
877  }
878  
879  static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
880                                   host_sigcontext *uc)
881  {
882      uintptr_t host_addr = (uintptr_t)info->si_addr;
883      /*
884       * Convert forcefully to guest address space: addresses outside
885       * reserved_va are still valid to report via SEGV_MAPERR.
886       */
887      bool is_valid = h2g_valid(host_addr);
888      abi_ptr guest_addr = h2g_nocheck(host_addr);
889      uintptr_t pc = host_signal_pc(uc);
890      bool is_write = host_signal_write(info, uc);
891      MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
892      bool maperr;
893  
894      /* If this was a write to a TB protected page, restart. */
895      if (is_write
896          && is_valid
897          && info->si_code == SEGV_ACCERR
898          && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
899                                         pc, guest_addr)) {
900          return;
901      }
902  
903      /*
904       * If the access was not on behalf of the guest, within the executable
905       * mapping of the generated code buffer, then it is a host bug.
906       */
907      if (access_type != MMU_INST_FETCH
908          && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
909          die_from_signal(info);
910      }
911  
912      maperr = true;
913      if (is_valid && info->si_code == SEGV_ACCERR) {
914          /*
915           * With reserved_va, the whole address space is PROT_NONE,
916           * which means that we may get ACCERR when we want MAPERR.
917           */
918          if (page_get_flags(guest_addr) & PAGE_VALID) {
919              maperr = false;
920          } else {
921              info->si_code = SEGV_MAPERR;
922          }
923      }
924  
925      sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
926      cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
927  }
928  
929  static uintptr_t host_sigbus_handler(CPUState *cpu, siginfo_t *info,
930                                  host_sigcontext *uc)
931  {
932      uintptr_t pc = host_signal_pc(uc);
933      bool is_write = host_signal_write(info, uc);
934      MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
935  
936      /*
937       * If the access was not on behalf of the guest, within the executable
938       * mapping of the generated code buffer, then it is a host bug.
939       */
940      if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
941          die_from_signal(info);
942      }
943  
944      if (info->si_code == BUS_ADRALN) {
945          uintptr_t host_addr = (uintptr_t)info->si_addr;
946          abi_ptr guest_addr = h2g_nocheck(host_addr);
947  
948          sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
949          cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
950      }
951      return pc;
952  }
953  
954  static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
955  {
956      CPUState *cpu = thread_cpu;
957      CPUArchState *env = cpu_env(cpu);
958      TaskState *ts = get_task_state(cpu);
959      target_siginfo_t tinfo;
960      host_sigcontext *uc = puc;
961      struct emulated_sigtable *k;
962      int guest_sig;
963      uintptr_t pc = 0;
964      bool sync_sig = false;
965      void *sigmask;
966  
967      /*
968       * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
969       * handling wrt signal blocking and unwinding.  Non-spoofed SIGILL,
970       * SIGFPE, SIGTRAP are always host bugs.
971       */
972      if (info->si_code > 0) {
973          switch (host_sig) {
974          case SIGSEGV:
975              /* Only returns on handle_sigsegv_accerr_write success. */
976              host_sigsegv_handler(cpu, info, uc);
977              return;
978          case SIGBUS:
979              pc = host_sigbus_handler(cpu, info, uc);
980              sync_sig = true;
981              break;
982          case SIGILL:
983          case SIGFPE:
984          case SIGTRAP:
985              die_from_signal(info);
986          }
987      }
988  
989      /* get target signal number */
990      guest_sig = host_to_target_signal(host_sig);
991      if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
992          return;
993      }
994      trace_user_host_signal(env, host_sig, guest_sig);
995  
996      host_to_target_siginfo_noswap(&tinfo, info);
997      k = &ts->sigtab[guest_sig - 1];
998      k->info = tinfo;
999      k->pending = guest_sig;
1000      ts->signal_pending = 1;
1001  
1002      /*
1003       * For synchronous signals, unwind the cpu state to the faulting
1004       * insn and then exit back to the main loop so that the signal
1005       * is delivered immediately.
1006       */
1007      if (sync_sig) {
1008          cpu->exception_index = EXCP_INTERRUPT;
1009          cpu_loop_exit_restore(cpu, pc);
1010      }
1011  
1012      rewind_if_in_safe_syscall(puc);
1013  
1014      /*
1015       * Block host signals until target signal handler entered. We
1016       * can't block SIGSEGV or SIGBUS while we're executing guest
1017       * code in case the guest code provokes one in the window between
1018       * now and it getting out to the main loop. Signals will be
1019       * unblocked again in process_pending_signals().
1020       *
1021       * WARNING: we cannot use sigfillset() here because the sigmask
1022       * field is a kernel sigset_t, which is much smaller than the
1023       * libc sigset_t which sigfillset() operates on. Using sigfillset()
1024       * would write 0xff bytes off the end of the structure and trash
1025       * data on the struct.
1026       */
1027      sigmask = host_signal_mask(uc);
1028      memset(sigmask, 0xff, SIGSET_T_SIZE);
1029      sigdelset(sigmask, SIGSEGV);
1030      sigdelset(sigmask, SIGBUS);
1031  
1032      /* interrupt the virtual CPU as soon as possible */
1033      cpu_exit(thread_cpu);
1034  }
1035  
1036  /* do_sigaltstack() returns target values and errnos. */
1037  /* compare linux/kernel/signal.c:do_sigaltstack() */
1038  abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
1039                          CPUArchState *env)
1040  {
1041      target_stack_t oss, *uoss = NULL;
1042      abi_long ret = -TARGET_EFAULT;
1043  
1044      if (uoss_addr) {
1045          /* Verify writability now, but do not alter user memory yet. */
1046          if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
1047              goto out;
1048          }
1049          target_save_altstack(&oss, env);
1050      }
1051  
1052      if (uss_addr) {
1053          target_stack_t *uss;
1054  
1055          if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
1056              goto out;
1057          }
1058          ret = target_restore_altstack(uss, env);
1059          if (ret) {
1060              goto out;
1061          }
1062      }
1063  
1064      if (uoss_addr) {
1065          memcpy(uoss, &oss, sizeof(oss));
1066          unlock_user_struct(uoss, uoss_addr, 1);
1067          uoss = NULL;
1068      }
1069      ret = 0;
1070  
1071   out:
1072      if (uoss) {
1073          unlock_user_struct(uoss, uoss_addr, 0);
1074      }
1075      return ret;
1076  }
1077  
1078  /* do_sigaction() return target values and host errnos */
1079  int do_sigaction(int sig, const struct target_sigaction *act,
1080                   struct target_sigaction *oact, abi_ulong ka_restorer)
1081  {
1082      struct target_sigaction *k;
1083      int host_sig;
1084      int ret = 0;
1085  
1086      trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
1087  
1088      if (sig < 1 || sig > TARGET_NSIG) {
1089          return -TARGET_EINVAL;
1090      }
1091  
1092      if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
1093          return -TARGET_EINVAL;
1094      }
1095  
1096      if (block_signals()) {
1097          return -QEMU_ERESTARTSYS;
1098      }
1099  
1100      k = &sigact_table[sig - 1];
1101      if (oact) {
1102          __put_user(k->_sa_handler, &oact->_sa_handler);
1103          __put_user(k->sa_flags, &oact->sa_flags);
1104  #ifdef TARGET_ARCH_HAS_SA_RESTORER
1105          __put_user(k->sa_restorer, &oact->sa_restorer);
1106  #endif
1107          /* Not swapped.  */
1108          oact->sa_mask = k->sa_mask;
1109      }
1110      if (act) {
1111          __get_user(k->_sa_handler, &act->_sa_handler);
1112          __get_user(k->sa_flags, &act->sa_flags);
1113  #ifdef TARGET_ARCH_HAS_SA_RESTORER
1114          __get_user(k->sa_restorer, &act->sa_restorer);
1115  #endif
1116  #ifdef TARGET_ARCH_HAS_KA_RESTORER
1117          k->ka_restorer = ka_restorer;
1118  #endif
1119          /* To be swapped in target_to_host_sigset.  */
1120          k->sa_mask = act->sa_mask;
1121  
1122          /* we update the host linux signal state */
1123          host_sig = target_to_host_signal(sig);
1124          trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1125          if (host_sig > SIGRTMAX) {
1126              /* we don't have enough host signals to map all target signals */
1127              qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1128                            sig);
1129              /*
1130               * we don't return an error here because some programs try to
1131               * register an handler for all possible rt signals even if they
1132               * don't need it.
1133               * An error here can abort them whereas there can be no problem
1134               * to not have the signal available later.
1135               * This is the case for golang,
1136               *   See https://github.com/golang/go/issues/33746
1137               * So we silently ignore the error.
1138               */
1139              return 0;
1140          }
1141          if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1142              struct sigaction act1;
1143  
1144              sigfillset(&act1.sa_mask);
1145              act1.sa_flags = SA_SIGINFO;
1146              if (k->_sa_handler == TARGET_SIG_IGN) {
1147                  /*
1148                   * It is important to update the host kernel signal ignore
1149                   * state to avoid getting unexpected interrupted syscalls.
1150                   */
1151                  act1.sa_sigaction = (void *)SIG_IGN;
1152              } else if (k->_sa_handler == TARGET_SIG_DFL) {
1153                  if (core_dump_signal(sig)) {
1154                      act1.sa_sigaction = host_signal_handler;
1155                  } else {
1156                      act1.sa_sigaction = (void *)SIG_DFL;
1157                  }
1158              } else {
1159                  act1.sa_sigaction = host_signal_handler;
1160                  if (k->sa_flags & TARGET_SA_RESTART) {
1161                      act1.sa_flags |= SA_RESTART;
1162                  }
1163              }
1164              ret = sigaction(host_sig, &act1, NULL);
1165          }
1166      }
1167      return ret;
1168  }
1169  
1170  static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1171                                    struct emulated_sigtable *k)
1172  {
1173      CPUState *cpu = env_cpu(cpu_env);
1174      abi_ulong handler;
1175      sigset_t set;
1176      target_siginfo_t unswapped;
1177      target_sigset_t target_old_set;
1178      struct target_sigaction *sa;
1179      TaskState *ts = get_task_state(cpu);
1180  
1181      trace_user_handle_signal(cpu_env, sig);
1182      /* dequeue signal */
1183      k->pending = 0;
1184  
1185      /*
1186       * Writes out siginfo values byteswapped, accordingly to the target.
1187       * It also cleans the si_type from si_code making it correct for
1188       * the target.  We must hold on to the original unswapped copy for
1189       * strace below, because si_type is still required there.
1190       */
1191      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1192          unswapped = k->info;
1193      }
1194      tswap_siginfo(&k->info, &k->info);
1195  
1196      sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
1197      if (!sig) {
1198          sa = NULL;
1199          handler = TARGET_SIG_IGN;
1200      } else {
1201          sa = &sigact_table[sig - 1];
1202          handler = sa->_sa_handler;
1203      }
1204  
1205      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1206          print_taken_signal(sig, &unswapped);
1207      }
1208  
1209      if (handler == TARGET_SIG_DFL) {
1210          /* default handler : ignore some signal. The other are job control or fatal */
1211          if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1212              kill(getpid(),SIGSTOP);
1213          } else if (sig != TARGET_SIGCHLD &&
1214                     sig != TARGET_SIGURG &&
1215                     sig != TARGET_SIGWINCH &&
1216                     sig != TARGET_SIGCONT) {
1217              dump_core_and_abort(cpu_env, sig);
1218          }
1219      } else if (handler == TARGET_SIG_IGN) {
1220          /* ignore sig */
1221      } else if (handler == TARGET_SIG_ERR) {
1222          dump_core_and_abort(cpu_env, sig);
1223      } else {
1224          /* compute the blocked signals during the handler execution */
1225          sigset_t *blocked_set;
1226  
1227          target_to_host_sigset(&set, &sa->sa_mask);
1228          /* SA_NODEFER indicates that the current signal should not be
1229             blocked during the handler */
1230          if (!(sa->sa_flags & TARGET_SA_NODEFER))
1231              sigaddset(&set, target_to_host_signal(sig));
1232  
1233          /* save the previous blocked signal state to restore it at the
1234             end of the signal execution (see do_sigreturn) */
1235          host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1236  
1237          /* block signals in the handler */
1238          blocked_set = ts->in_sigsuspend ?
1239              &ts->sigsuspend_mask : &ts->signal_mask;
1240          sigorset(&ts->signal_mask, blocked_set, &set);
1241          ts->in_sigsuspend = 0;
1242  
1243          /* if the CPU is in VM86 mode, we restore the 32 bit values */
1244  #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1245          {
1246              CPUX86State *env = cpu_env;
1247              if (env->eflags & VM_MASK)
1248                  save_v86_state(env);
1249          }
1250  #endif
1251          /* prepare the stack frame of the virtual CPU */
1252  #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1253          if (sa->sa_flags & TARGET_SA_SIGINFO) {
1254              setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1255          } else {
1256              setup_frame(sig, sa, &target_old_set, cpu_env);
1257          }
1258  #else
1259          /* These targets do not have traditional signals.  */
1260          setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1261  #endif
1262          if (sa->sa_flags & TARGET_SA_RESETHAND) {
1263              sa->_sa_handler = TARGET_SIG_DFL;
1264          }
1265      }
1266  }
1267  
1268  void process_pending_signals(CPUArchState *cpu_env)
1269  {
1270      CPUState *cpu = env_cpu(cpu_env);
1271      int sig;
1272      TaskState *ts = get_task_state(cpu);
1273      sigset_t set;
1274      sigset_t *blocked_set;
1275  
1276      while (qatomic_read(&ts->signal_pending)) {
1277          sigfillset(&set);
1278          sigprocmask(SIG_SETMASK, &set, 0);
1279  
1280      restart_scan:
1281          sig = ts->sync_signal.pending;
1282          if (sig) {
1283              /* Synchronous signals are forced,
1284               * see force_sig_info() and callers in Linux
1285               * Note that not all of our queue_signal() calls in QEMU correspond
1286               * to force_sig_info() calls in Linux (some are send_sig_info()).
1287               * However it seems like a kernel bug to me to allow the process
1288               * to block a synchronous signal since it could then just end up
1289               * looping round and round indefinitely.
1290               */
1291              if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1292                  || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1293                  sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1294                  sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1295              }
1296  
1297              handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1298          }
1299  
1300          for (sig = 1; sig <= TARGET_NSIG; sig++) {
1301              blocked_set = ts->in_sigsuspend ?
1302                  &ts->sigsuspend_mask : &ts->signal_mask;
1303  
1304              if (ts->sigtab[sig - 1].pending &&
1305                  (!sigismember(blocked_set,
1306                                target_to_host_signal_table[sig]))) {
1307                  handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1308                  /* Restart scan from the beginning, as handle_pending_signal
1309                   * might have resulted in a new synchronous signal (eg SIGSEGV).
1310                   */
1311                  goto restart_scan;
1312              }
1313          }
1314  
1315          /* if no signal is pending, unblock signals and recheck (the act
1316           * of unblocking might cause us to take another host signal which
1317           * will set signal_pending again).
1318           */
1319          qatomic_set(&ts->signal_pending, 0);
1320          ts->in_sigsuspend = 0;
1321          set = ts->signal_mask;
1322          sigdelset(&set, SIGSEGV);
1323          sigdelset(&set, SIGBUS);
1324          sigprocmask(SIG_SETMASK, &set, 0);
1325      }
1326      ts->in_sigsuspend = 0;
1327  }
1328  
1329  int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
1330                              target_ulong sigsize)
1331  {
1332      TaskState *ts = get_task_state(thread_cpu);
1333      sigset_t *host_set = &ts->sigsuspend_mask;
1334      target_sigset_t *target_sigset;
1335  
1336      if (sigsize != sizeof(*target_sigset)) {
1337          /* Like the kernel, we enforce correct size sigsets */
1338          return -TARGET_EINVAL;
1339      }
1340  
1341      target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
1342      if (!target_sigset) {
1343          return -TARGET_EFAULT;
1344      }
1345      target_to_host_sigset(host_set, target_sigset);
1346      unlock_user(target_sigset, sigset, 0);
1347  
1348      *pset = host_set;
1349      return 0;
1350  }
1351