xref: /openbmc/qemu/linux-user/signal.c (revision 851ed57d7a24ddf234a90b5bb196a143c84c10bc)
1  /*
2   *  Emulation of Linux signals
3   *
4   *  Copyright (c) 2003 Fabrice Bellard
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
8   *  the Free Software Foundation; either version 2 of the License, or
9   *  (at your option) any later version.
10   *
11   *  This program is distributed in the hope that it will be useful,
12   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   *  GNU General Public License for more details.
15   *
16   *  You should have received a copy of the GNU General Public License
17   *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18   */
19  #include "qemu/osdep.h"
20  #include "qemu/bitops.h"
21  #include "qemu/cutils.h"
22  #include "gdbstub/user.h"
23  #include "exec/page-protection.h"
24  #include "hw/core/tcg-cpu-ops.h"
25  
26  #include <sys/ucontext.h>
27  #include <sys/resource.h>
28  
29  #include "qemu.h"
30  #include "user-internals.h"
31  #include "strace.h"
32  #include "loader.h"
33  #include "trace.h"
34  #include "signal-common.h"
35  #include "host-signal.h"
36  #include "user/safe-syscall.h"
37  #include "tcg/tcg.h"
38  
39  /* target_siginfo_t must fit in gdbstub's siginfo save area. */
40  QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
41  
42  static struct target_sigaction sigact_table[TARGET_NSIG];
43  
44  static void host_signal_handler(int host_signum, siginfo_t *info,
45                                  void *puc);
46  
47  /* Fallback addresses into sigtramp page. */
48  abi_ulong default_sigreturn;
49  abi_ulong default_rt_sigreturn;
50  
51  /*
52   * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
53   * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
54   * Signal number 0 is reserved for use as kill(pid, 0), to test whether
55   * a process exists without sending it a signal.
56   */
57  #ifdef __SIGRTMAX
58  QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
59  #endif
60  static uint8_t host_to_target_signal_table[_NSIG] = {
61  #define MAKE_SIG_ENTRY(sig)     [sig] = TARGET_##sig,
62          MAKE_SIGNAL_LIST
63  #undef MAKE_SIG_ENTRY
64  };
65  
66  static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
67  
68  /* valid sig is between 1 and _NSIG - 1 */
host_to_target_signal(int sig)69  int host_to_target_signal(int sig)
70  {
71      if (sig < 1) {
72          return sig;
73      }
74      if (sig >= _NSIG) {
75          return TARGET_NSIG + 1;
76      }
77      return host_to_target_signal_table[sig];
78  }
79  
80  /* valid sig is between 1 and TARGET_NSIG */
target_to_host_signal(int sig)81  int target_to_host_signal(int sig)
82  {
83      if (sig < 1) {
84          return sig;
85      }
86      if (sig > TARGET_NSIG) {
87          return _NSIG;
88      }
89      return target_to_host_signal_table[sig];
90  }
91  
target_sigaddset(target_sigset_t * set,int signum)92  static inline void target_sigaddset(target_sigset_t *set, int signum)
93  {
94      signum--;
95      abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
96      set->sig[signum / TARGET_NSIG_BPW] |= mask;
97  }
98  
target_sigismember(const target_sigset_t * set,int signum)99  static inline int target_sigismember(const target_sigset_t *set, int signum)
100  {
101      signum--;
102      abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
103      return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
104  }
105  
host_to_target_sigset_internal(target_sigset_t * d,const sigset_t * s)106  void host_to_target_sigset_internal(target_sigset_t *d,
107                                      const sigset_t *s)
108  {
109      int host_sig, target_sig;
110      target_sigemptyset(d);
111      for (host_sig = 1; host_sig < _NSIG; host_sig++) {
112          target_sig = host_to_target_signal(host_sig);
113          if (target_sig < 1 || target_sig > TARGET_NSIG) {
114              continue;
115          }
116          if (sigismember(s, host_sig)) {
117              target_sigaddset(d, target_sig);
118          }
119      }
120  }
121  
host_to_target_sigset(target_sigset_t * d,const sigset_t * s)122  void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
123  {
124      target_sigset_t d1;
125      int i;
126  
127      host_to_target_sigset_internal(&d1, s);
128      for(i = 0;i < TARGET_NSIG_WORDS; i++)
129          d->sig[i] = tswapal(d1.sig[i]);
130  }
131  
target_to_host_sigset_internal(sigset_t * d,const target_sigset_t * s)132  void target_to_host_sigset_internal(sigset_t *d,
133                                      const target_sigset_t *s)
134  {
135      int host_sig, target_sig;
136      sigemptyset(d);
137      for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
138          host_sig = target_to_host_signal(target_sig);
139          if (host_sig < 1 || host_sig >= _NSIG) {
140              continue;
141          }
142          if (target_sigismember(s, target_sig)) {
143              sigaddset(d, host_sig);
144          }
145      }
146  }
147  
target_to_host_sigset(sigset_t * d,const target_sigset_t * s)148  void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149  {
150      target_sigset_t s1;
151      int i;
152  
153      for(i = 0;i < TARGET_NSIG_WORDS; i++)
154          s1.sig[i] = tswapal(s->sig[i]);
155      target_to_host_sigset_internal(d, &s1);
156  }
157  
host_to_target_old_sigset(abi_ulong * old_sigset,const sigset_t * sigset)158  void host_to_target_old_sigset(abi_ulong *old_sigset,
159                                 const sigset_t *sigset)
160  {
161      target_sigset_t d;
162      host_to_target_sigset(&d, sigset);
163      *old_sigset = d.sig[0];
164  }
165  
target_to_host_old_sigset(sigset_t * sigset,const abi_ulong * old_sigset)166  void target_to_host_old_sigset(sigset_t *sigset,
167                                 const abi_ulong *old_sigset)
168  {
169      target_sigset_t d;
170      int i;
171  
172      d.sig[0] = *old_sigset;
173      for(i = 1;i < TARGET_NSIG_WORDS; i++)
174          d.sig[i] = 0;
175      target_to_host_sigset(sigset, &d);
176  }
177  
block_signals(void)178  int block_signals(void)
179  {
180      TaskState *ts = get_task_state(thread_cpu);
181      sigset_t set;
182  
183      /* It's OK to block everything including SIGSEGV, because we won't
184       * run any further guest code before unblocking signals in
185       * process_pending_signals().
186       */
187      sigfillset(&set);
188      sigprocmask(SIG_SETMASK, &set, 0);
189  
190      return qatomic_xchg(&ts->signal_pending, 1);
191  }
192  
193  /* Wrapper for sigprocmask function
194   * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195   * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
196   * a signal was already pending and the syscall must be restarted, or
197   * 0 on success.
198   * If set is NULL, this is guaranteed not to fail.
199   */
do_sigprocmask(int how,const sigset_t * set,sigset_t * oldset)200  int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201  {
202      TaskState *ts = get_task_state(thread_cpu);
203  
204      if (oldset) {
205          *oldset = ts->signal_mask;
206      }
207  
208      if (set) {
209          int i;
210  
211          if (block_signals()) {
212              return -QEMU_ERESTARTSYS;
213          }
214  
215          switch (how) {
216          case SIG_BLOCK:
217              sigorset(&ts->signal_mask, &ts->signal_mask, set);
218              break;
219          case SIG_UNBLOCK:
220              for (i = 1; i <= NSIG; ++i) {
221                  if (sigismember(set, i)) {
222                      sigdelset(&ts->signal_mask, i);
223                  }
224              }
225              break;
226          case SIG_SETMASK:
227              ts->signal_mask = *set;
228              break;
229          default:
230              g_assert_not_reached();
231          }
232  
233          /* Silently ignore attempts to change blocking status of KILL or STOP */
234          sigdelset(&ts->signal_mask, SIGKILL);
235          sigdelset(&ts->signal_mask, SIGSTOP);
236      }
237      return 0;
238  }
239  
240  /* Just set the guest's signal mask to the specified value; the
241   * caller is assumed to have called block_signals() already.
242   */
set_sigmask(const sigset_t * set)243  void set_sigmask(const sigset_t *set)
244  {
245      TaskState *ts = get_task_state(thread_cpu);
246  
247      ts->signal_mask = *set;
248  }
249  
250  /* sigaltstack management */
251  
on_sig_stack(unsigned long sp)252  int on_sig_stack(unsigned long sp)
253  {
254      TaskState *ts = get_task_state(thread_cpu);
255  
256      return (sp - ts->sigaltstack_used.ss_sp
257              < ts->sigaltstack_used.ss_size);
258  }
259  
sas_ss_flags(unsigned long sp)260  int sas_ss_flags(unsigned long sp)
261  {
262      TaskState *ts = get_task_state(thread_cpu);
263  
264      return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
265              : on_sig_stack(sp) ? SS_ONSTACK : 0);
266  }
267  
target_sigsp(abi_ulong sp,struct target_sigaction * ka)268  abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
269  {
270      /*
271       * This is the X/Open sanctioned signal stack switching.
272       */
273      TaskState *ts = get_task_state(thread_cpu);
274  
275      if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
276          return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
277      }
278      return sp;
279  }
280  
target_save_altstack(target_stack_t * uss,CPUArchState * env)281  void target_save_altstack(target_stack_t *uss, CPUArchState *env)
282  {
283      TaskState *ts = get_task_state(thread_cpu);
284  
285      __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
286      __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
287      __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
288  }
289  
target_restore_altstack(target_stack_t * uss,CPUArchState * env)290  abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
291  {
292      TaskState *ts = get_task_state(thread_cpu);
293      size_t minstacksize = TARGET_MINSIGSTKSZ;
294      target_stack_t ss;
295  
296  #if defined(TARGET_PPC64)
297      /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
298      struct image_info *image = ts->info;
299      if (get_ppc64_abi(image) > 1) {
300          minstacksize = 4096;
301      }
302  #endif
303  
304      __get_user(ss.ss_sp, &uss->ss_sp);
305      __get_user(ss.ss_size, &uss->ss_size);
306      __get_user(ss.ss_flags, &uss->ss_flags);
307  
308      if (on_sig_stack(get_sp_from_cpustate(env))) {
309          return -TARGET_EPERM;
310      }
311  
312      switch (ss.ss_flags) {
313      default:
314          return -TARGET_EINVAL;
315  
316      case TARGET_SS_DISABLE:
317          ss.ss_size = 0;
318          ss.ss_sp = 0;
319          break;
320  
321      case TARGET_SS_ONSTACK:
322      case 0:
323          if (ss.ss_size < minstacksize) {
324              return -TARGET_ENOMEM;
325          }
326          break;
327      }
328  
329      ts->sigaltstack_used.ss_sp = ss.ss_sp;
330      ts->sigaltstack_used.ss_size = ss.ss_size;
331      return 0;
332  }
333  
334  /* siginfo conversion */
335  
host_to_target_siginfo_noswap(target_siginfo_t * tinfo,const siginfo_t * info)336  static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
337                                                   const siginfo_t *info)
338  {
339      int sig = host_to_target_signal(info->si_signo);
340      int si_code = info->si_code;
341      int si_type;
342      tinfo->si_signo = sig;
343      tinfo->si_errno = 0;
344      tinfo->si_code = info->si_code;
345  
346      /* This memset serves two purposes:
347       * (1) ensure we don't leak random junk to the guest later
348       * (2) placate false positives from gcc about fields
349       *     being used uninitialized if it chooses to inline both this
350       *     function and tswap_siginfo() into host_to_target_siginfo().
351       */
352      memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
353  
354      /* This is awkward, because we have to use a combination of
355       * the si_code and si_signo to figure out which of the union's
356       * members are valid. (Within the host kernel it is always possible
357       * to tell, but the kernel carefully avoids giving userspace the
358       * high 16 bits of si_code, so we don't have the information to
359       * do this the easy way...) We therefore make our best guess,
360       * bearing in mind that a guest can spoof most of the si_codes
361       * via rt_sigqueueinfo() if it likes.
362       *
363       * Once we have made our guess, we record it in the top 16 bits of
364       * the si_code, so that tswap_siginfo() later can use it.
365       * tswap_siginfo() will strip these top bits out before writing
366       * si_code to the guest (sign-extending the lower bits).
367       */
368  
369      switch (si_code) {
370      case SI_USER:
371      case SI_TKILL:
372      case SI_KERNEL:
373          /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
374           * These are the only unspoofable si_code values.
375           */
376          tinfo->_sifields._kill._pid = info->si_pid;
377          tinfo->_sifields._kill._uid = info->si_uid;
378          si_type = QEMU_SI_KILL;
379          break;
380      default:
381          /* Everything else is spoofable. Make best guess based on signal */
382          switch (sig) {
383          case TARGET_SIGCHLD:
384              tinfo->_sifields._sigchld._pid = info->si_pid;
385              tinfo->_sifields._sigchld._uid = info->si_uid;
386              if (si_code == CLD_EXITED)
387                  tinfo->_sifields._sigchld._status = info->si_status;
388              else
389                  tinfo->_sifields._sigchld._status
390                      = host_to_target_signal(info->si_status & 0x7f)
391                          | (info->si_status & ~0x7f);
392              tinfo->_sifields._sigchld._utime = info->si_utime;
393              tinfo->_sifields._sigchld._stime = info->si_stime;
394              si_type = QEMU_SI_CHLD;
395              break;
396          case TARGET_SIGIO:
397              tinfo->_sifields._sigpoll._band = info->si_band;
398              tinfo->_sifields._sigpoll._fd = info->si_fd;
399              si_type = QEMU_SI_POLL;
400              break;
401          default:
402              /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
403              tinfo->_sifields._rt._pid = info->si_pid;
404              tinfo->_sifields._rt._uid = info->si_uid;
405              /* XXX: potential problem if 64 bit */
406              tinfo->_sifields._rt._sigval.sival_ptr
407                  = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
408              si_type = QEMU_SI_RT;
409              break;
410          }
411          break;
412      }
413  
414      tinfo->si_code = deposit32(si_code, 16, 16, si_type);
415  }
416  
tswap_siginfo(target_siginfo_t * tinfo,const target_siginfo_t * info)417  static void tswap_siginfo(target_siginfo_t *tinfo,
418                            const target_siginfo_t *info)
419  {
420      int si_type = extract32(info->si_code, 16, 16);
421      int si_code = sextract32(info->si_code, 0, 16);
422  
423      __put_user(info->si_signo, &tinfo->si_signo);
424      __put_user(info->si_errno, &tinfo->si_errno);
425      __put_user(si_code, &tinfo->si_code);
426  
427      /* We can use our internal marker of which fields in the structure
428       * are valid, rather than duplicating the guesswork of
429       * host_to_target_siginfo_noswap() here.
430       */
431      switch (si_type) {
432      case QEMU_SI_KILL:
433          __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
434          __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
435          break;
436      case QEMU_SI_TIMER:
437          __put_user(info->_sifields._timer._timer1,
438                     &tinfo->_sifields._timer._timer1);
439          __put_user(info->_sifields._timer._timer2,
440                     &tinfo->_sifields._timer._timer2);
441          break;
442      case QEMU_SI_POLL:
443          __put_user(info->_sifields._sigpoll._band,
444                     &tinfo->_sifields._sigpoll._band);
445          __put_user(info->_sifields._sigpoll._fd,
446                     &tinfo->_sifields._sigpoll._fd);
447          break;
448      case QEMU_SI_FAULT:
449          __put_user(info->_sifields._sigfault._addr,
450                     &tinfo->_sifields._sigfault._addr);
451          break;
452      case QEMU_SI_CHLD:
453          __put_user(info->_sifields._sigchld._pid,
454                     &tinfo->_sifields._sigchld._pid);
455          __put_user(info->_sifields._sigchld._uid,
456                     &tinfo->_sifields._sigchld._uid);
457          __put_user(info->_sifields._sigchld._status,
458                     &tinfo->_sifields._sigchld._status);
459          __put_user(info->_sifields._sigchld._utime,
460                     &tinfo->_sifields._sigchld._utime);
461          __put_user(info->_sifields._sigchld._stime,
462                     &tinfo->_sifields._sigchld._stime);
463          break;
464      case QEMU_SI_RT:
465          __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
466          __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
467          __put_user(info->_sifields._rt._sigval.sival_ptr,
468                     &tinfo->_sifields._rt._sigval.sival_ptr);
469          break;
470      default:
471          g_assert_not_reached();
472      }
473  }
474  
host_to_target_siginfo(target_siginfo_t * tinfo,const siginfo_t * info)475  void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
476  {
477      target_siginfo_t tgt_tmp;
478      host_to_target_siginfo_noswap(&tgt_tmp, info);
479      tswap_siginfo(tinfo, &tgt_tmp);
480  }
481  
482  /* XXX: we support only POSIX RT signals are used. */
483  /* XXX: find a solution for 64 bit (additional malloced data is needed) */
target_to_host_siginfo(siginfo_t * info,const target_siginfo_t * tinfo)484  void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
485  {
486      /* This conversion is used only for the rt_sigqueueinfo syscall,
487       * and so we know that the _rt fields are the valid ones.
488       */
489      abi_ulong sival_ptr;
490  
491      __get_user(info->si_signo, &tinfo->si_signo);
492      __get_user(info->si_errno, &tinfo->si_errno);
493      __get_user(info->si_code, &tinfo->si_code);
494      __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
495      __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
496      __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
497      info->si_value.sival_ptr = (void *)(long)sival_ptr;
498  }
499  
500  /* returns 1 if given signal should dump core if not handled */
core_dump_signal(int sig)501  static int core_dump_signal(int sig)
502  {
503      switch (sig) {
504      case TARGET_SIGABRT:
505      case TARGET_SIGFPE:
506      case TARGET_SIGILL:
507      case TARGET_SIGQUIT:
508      case TARGET_SIGSEGV:
509      case TARGET_SIGTRAP:
510      case TARGET_SIGBUS:
511          return (1);
512      default:
513          return (0);
514      }
515  }
516  
signal_table_init(const char * rtsig_map)517  static void signal_table_init(const char *rtsig_map)
518  {
519      int hsig, tsig, count;
520  
521      if (rtsig_map) {
522          /*
523           * Map host RT signals to target RT signals according to the
524           * user-provided specification.
525           */
526          const char *s = rtsig_map;
527  
528          while (true) {
529              int i;
530  
531              if (qemu_strtoi(s, &s, 10, &tsig) || *s++ != ' ') {
532                  fprintf(stderr, "Malformed target signal in QEMU_RTSIG_MAP\n");
533                  exit(EXIT_FAILURE);
534              }
535              if (qemu_strtoi(s, &s, 10, &hsig) || *s++ != ' ') {
536                  fprintf(stderr, "Malformed host signal in QEMU_RTSIG_MAP\n");
537                  exit(EXIT_FAILURE);
538              }
539              if (qemu_strtoi(s, &s, 10, &count) || (*s && *s != ',')) {
540                  fprintf(stderr, "Malformed signal count in QEMU_RTSIG_MAP\n");
541                  exit(EXIT_FAILURE);
542              }
543  
544              for (i = 0; i < count; i++, tsig++, hsig++) {
545                  if (tsig < TARGET_SIGRTMIN || tsig > TARGET_NSIG) {
546                      fprintf(stderr, "%d is not a target rt signal\n", tsig);
547                      exit(EXIT_FAILURE);
548                  }
549                  if (hsig < SIGRTMIN || hsig > SIGRTMAX) {
550                      fprintf(stderr, "%d is not a host rt signal\n", hsig);
551                      exit(EXIT_FAILURE);
552                  }
553                  if (host_to_target_signal_table[hsig]) {
554                      fprintf(stderr, "%d already maps %d\n",
555                              hsig, host_to_target_signal_table[hsig]);
556                      exit(EXIT_FAILURE);
557                  }
558                  host_to_target_signal_table[hsig] = tsig;
559              }
560  
561              if (*s) {
562                  s++;
563              } else {
564                  break;
565              }
566          }
567      } else {
568          /*
569           * Default host-to-target RT signal mapping.
570           *
571           * Signals are supported starting from TARGET_SIGRTMIN and going up
572           * until we run out of host realtime signals.  Glibc uses the lower 2
573           * RT signals and (hopefully) nobody uses the upper ones.
574           * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
575           * To fix this properly we would need to do manual signal delivery
576           * multiplexed over a single host signal.
577           * Attempts for configure "missing" signals via sigaction will be
578           * silently ignored.
579           *
580           * Reserve one signal for internal usage (see below).
581           */
582  
583          hsig = SIGRTMIN + 1;
584          for (tsig = TARGET_SIGRTMIN;
585               hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
586               hsig++, tsig++) {
587              host_to_target_signal_table[hsig] = tsig;
588          }
589      }
590  
591      /*
592       * Remap the target SIGABRT, so that we can distinguish host abort
593       * from guest abort.  When the guest registers a signal handler or
594       * calls raise(SIGABRT), the host will raise SIG_RTn.  If the guest
595       * arrives at dump_core_and_abort(), we will map back to host SIGABRT
596       * so that the parent (native or emulated) sees the correct signal.
597       * Finally, also map host to guest SIGABRT so that the emulated
598       * parent sees the correct mapping from wait status.
599       */
600  
601      host_to_target_signal_table[SIGABRT] = 0;
602      for (hsig = SIGRTMIN; hsig <= SIGRTMAX; hsig++) {
603          if (!host_to_target_signal_table[hsig]) {
604              host_to_target_signal_table[hsig] = TARGET_SIGABRT;
605              break;
606          }
607      }
608      if (hsig > SIGRTMAX) {
609          fprintf(stderr, "No rt signals left for SIGABRT mapping\n");
610          exit(EXIT_FAILURE);
611      }
612  
613      /* Invert the mapping that has already been assigned. */
614      for (hsig = 1; hsig < _NSIG; hsig++) {
615          tsig = host_to_target_signal_table[hsig];
616          if (tsig) {
617              if (target_to_host_signal_table[tsig]) {
618                  fprintf(stderr, "%d is already mapped to %d\n",
619                          tsig, target_to_host_signal_table[tsig]);
620                  exit(EXIT_FAILURE);
621              }
622              target_to_host_signal_table[tsig] = hsig;
623          }
624      }
625  
626      host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
627  
628      /* Map everything else out-of-bounds. */
629      for (hsig = 1; hsig < _NSIG; hsig++) {
630          if (host_to_target_signal_table[hsig] == 0) {
631              host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
632          }
633      }
634      for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
635          if (target_to_host_signal_table[tsig] == 0) {
636              target_to_host_signal_table[tsig] = _NSIG;
637              count++;
638          }
639      }
640  
641      trace_signal_table_init(count);
642  }
643  
signal_init(const char * rtsig_map)644  void signal_init(const char *rtsig_map)
645  {
646      TaskState *ts = get_task_state(thread_cpu);
647      struct sigaction act, oact;
648  
649      /* initialize signal conversion tables */
650      signal_table_init(rtsig_map);
651  
652      /* Set the signal mask from the host mask. */
653      sigprocmask(0, 0, &ts->signal_mask);
654  
655      sigfillset(&act.sa_mask);
656      act.sa_flags = SA_SIGINFO;
657      act.sa_sigaction = host_signal_handler;
658  
659      /*
660       * A parent process may configure ignored signals, but all other
661       * signals are default.  For any target signals that have no host
662       * mapping, set to ignore.  For all core_dump_signal, install our
663       * host signal handler so that we may invoke dump_core_and_abort.
664       * This includes SIGSEGV and SIGBUS, which are also need our signal
665       * handler for paging and exceptions.
666       */
667      for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
668          int hsig = target_to_host_signal(tsig);
669          abi_ptr thand = TARGET_SIG_IGN;
670  
671          if (hsig >= _NSIG) {
672              continue;
673          }
674  
675          /* As we force remap SIGABRT, cannot probe and install in one step. */
676          if (tsig == TARGET_SIGABRT) {
677              sigaction(SIGABRT, NULL, &oact);
678              sigaction(hsig, &act, NULL);
679          } else {
680              struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
681              sigaction(hsig, iact, &oact);
682          }
683  
684          if (oact.sa_sigaction != (void *)SIG_IGN) {
685              thand = TARGET_SIG_DFL;
686          }
687          sigact_table[tsig - 1]._sa_handler = thand;
688      }
689  }
690  
691  /* Force a synchronously taken signal. The kernel force_sig() function
692   * also forces the signal to "not blocked, not ignored", but for QEMU
693   * that work is done in process_pending_signals().
694   */
force_sig(int sig)695  void force_sig(int sig)
696  {
697      CPUState *cpu = thread_cpu;
698      target_siginfo_t info = {};
699  
700      info.si_signo = sig;
701      info.si_errno = 0;
702      info.si_code = TARGET_SI_KERNEL;
703      info._sifields._kill._pid = 0;
704      info._sifields._kill._uid = 0;
705      queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info);
706  }
707  
708  /*
709   * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
710   * 'force' part is handled in process_pending_signals().
711   */
force_sig_fault(int sig,int code,abi_ulong addr)712  void force_sig_fault(int sig, int code, abi_ulong addr)
713  {
714      CPUState *cpu = thread_cpu;
715      target_siginfo_t info = {};
716  
717      info.si_signo = sig;
718      info.si_errno = 0;
719      info.si_code = code;
720      info._sifields._sigfault._addr = addr;
721      queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
722  }
723  
724  /* Force a SIGSEGV if we couldn't write to memory trying to set
725   * up the signal frame. oldsig is the signal we were trying to handle
726   * at the point of failure.
727   */
728  #if !defined(TARGET_RISCV)
force_sigsegv(int oldsig)729  void force_sigsegv(int oldsig)
730  {
731      if (oldsig == SIGSEGV) {
732          /* Make sure we don't try to deliver the signal again; this will
733           * end up with handle_pending_signal() calling dump_core_and_abort().
734           */
735          sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
736      }
737      force_sig(TARGET_SIGSEGV);
738  }
739  #endif
740  
cpu_loop_exit_sigsegv(CPUState * cpu,target_ulong addr,MMUAccessType access_type,bool maperr,uintptr_t ra)741  void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
742                             MMUAccessType access_type, bool maperr, uintptr_t ra)
743  {
744      const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
745  
746      if (tcg_ops->record_sigsegv) {
747          tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
748      }
749  
750      force_sig_fault(TARGET_SIGSEGV,
751                      maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
752                      addr);
753      cpu->exception_index = EXCP_INTERRUPT;
754      cpu_loop_exit_restore(cpu, ra);
755  }
756  
cpu_loop_exit_sigbus(CPUState * cpu,target_ulong addr,MMUAccessType access_type,uintptr_t ra)757  void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
758                            MMUAccessType access_type, uintptr_t ra)
759  {
760      const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
761  
762      if (tcg_ops->record_sigbus) {
763          tcg_ops->record_sigbus(cpu, addr, access_type, ra);
764      }
765  
766      force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
767      cpu->exception_index = EXCP_INTERRUPT;
768      cpu_loop_exit_restore(cpu, ra);
769  }
770  
771  /* abort execution with signal */
772  static G_NORETURN
die_with_signal(int host_sig)773  void die_with_signal(int host_sig)
774  {
775      struct sigaction act = {
776          .sa_handler = SIG_DFL,
777      };
778  
779      /*
780       * The proper exit code for dying from an uncaught signal is -<signal>.
781       * The kernel doesn't allow exit() or _exit() to pass a negative value.
782       * To get the proper exit code we need to actually die from an uncaught
783       * signal.  Here the default signal handler is installed, we send
784       * the signal and we wait for it to arrive.
785       */
786      sigfillset(&act.sa_mask);
787      sigaction(host_sig, &act, NULL);
788  
789      kill(getpid(), host_sig);
790  
791      /* Make sure the signal isn't masked (reusing the mask inside of act). */
792      sigdelset(&act.sa_mask, host_sig);
793      sigsuspend(&act.sa_mask);
794  
795      /* unreachable */
796      _exit(EXIT_FAILURE);
797  }
798  
799  static G_NORETURN
dump_core_and_abort(CPUArchState * env,int target_sig)800  void dump_core_and_abort(CPUArchState *env, int target_sig)
801  {
802      CPUState *cpu = env_cpu(env);
803      TaskState *ts = get_task_state(cpu);
804      int host_sig, core_dumped = 0;
805  
806      /* On exit, undo the remapping of SIGABRT. */
807      if (target_sig == TARGET_SIGABRT) {
808          host_sig = SIGABRT;
809      } else {
810          host_sig = target_to_host_signal(target_sig);
811      }
812      trace_user_dump_core_and_abort(env, target_sig, host_sig);
813      gdb_signalled(env, target_sig);
814  
815      /* dump core if supported by target binary format */
816      if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
817          stop_all_tasks();
818          core_dumped =
819              ((*ts->bprm->core_dump)(target_sig, env) == 0);
820      }
821      if (core_dumped) {
822          /* we already dumped the core of target process, we don't want
823           * a coredump of qemu itself */
824          struct rlimit nodump;
825          getrlimit(RLIMIT_CORE, &nodump);
826          nodump.rlim_cur=0;
827          setrlimit(RLIMIT_CORE, &nodump);
828          (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
829              target_sig, strsignal(host_sig), "core dumped" );
830      }
831  
832      preexit_cleanup(env, 128 + target_sig);
833      die_with_signal(host_sig);
834  }
835  
836  /* queue a signal so that it will be send to the virtual CPU as soon
837     as possible */
queue_signal(CPUArchState * env,int sig,int si_type,target_siginfo_t * info)838  void queue_signal(CPUArchState *env, int sig, int si_type,
839                    target_siginfo_t *info)
840  {
841      CPUState *cpu = env_cpu(env);
842      TaskState *ts = get_task_state(cpu);
843  
844      trace_user_queue_signal(env, sig);
845  
846      info->si_code = deposit32(info->si_code, 16, 16, si_type);
847  
848      ts->sync_signal.info = *info;
849      ts->sync_signal.pending = sig;
850      /* signal that a new signal is pending */
851      qatomic_set(&ts->signal_pending, 1);
852  }
853  
854  
855  /* Adjust the signal context to rewind out of safe-syscall if we're in it */
rewind_if_in_safe_syscall(void * puc)856  static inline void rewind_if_in_safe_syscall(void *puc)
857  {
858      host_sigcontext *uc = (host_sigcontext *)puc;
859      uintptr_t pcreg = host_signal_pc(uc);
860  
861      if (pcreg > (uintptr_t)safe_syscall_start
862          && pcreg < (uintptr_t)safe_syscall_end) {
863          host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
864      }
865  }
866  
867  static G_NORETURN
die_from_signal(siginfo_t * info)868  void die_from_signal(siginfo_t *info)
869  {
870      char sigbuf[4], codebuf[12];
871      const char *sig, *code = NULL;
872  
873      switch (info->si_signo) {
874      case SIGSEGV:
875          sig = "SEGV";
876          switch (info->si_code) {
877          case SEGV_MAPERR:
878              code = "MAPERR";
879              break;
880          case SEGV_ACCERR:
881              code = "ACCERR";
882              break;
883          }
884          break;
885      case SIGBUS:
886          sig = "BUS";
887          switch (info->si_code) {
888          case BUS_ADRALN:
889              code = "ADRALN";
890              break;
891          case BUS_ADRERR:
892              code = "ADRERR";
893              break;
894          }
895          break;
896      case SIGILL:
897          sig = "ILL";
898          switch (info->si_code) {
899          case ILL_ILLOPC:
900              code = "ILLOPC";
901              break;
902          case ILL_ILLOPN:
903              code = "ILLOPN";
904              break;
905          case ILL_ILLADR:
906              code = "ILLADR";
907              break;
908          case ILL_PRVOPC:
909              code = "PRVOPC";
910              break;
911          case ILL_PRVREG:
912              code = "PRVREG";
913              break;
914          case ILL_COPROC:
915              code = "COPROC";
916              break;
917          }
918          break;
919      case SIGFPE:
920          sig = "FPE";
921          switch (info->si_code) {
922          case FPE_INTDIV:
923              code = "INTDIV";
924              break;
925          case FPE_INTOVF:
926              code = "INTOVF";
927              break;
928          }
929          break;
930      case SIGTRAP:
931          sig = "TRAP";
932          break;
933      default:
934          snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
935          sig = sigbuf;
936          break;
937      }
938      if (code == NULL) {
939          snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
940          code = codebuf;
941      }
942  
943      error_report("QEMU internal SIG%s {code=%s, addr=%p}",
944                   sig, code, info->si_addr);
945      die_with_signal(info->si_signo);
946  }
947  
host_sigsegv_handler(CPUState * cpu,siginfo_t * info,host_sigcontext * uc)948  static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
949                                   host_sigcontext *uc)
950  {
951      uintptr_t host_addr = (uintptr_t)info->si_addr;
952      /*
953       * Convert forcefully to guest address space: addresses outside
954       * reserved_va are still valid to report via SEGV_MAPERR.
955       */
956      bool is_valid = h2g_valid(host_addr);
957      abi_ptr guest_addr = h2g_nocheck(host_addr);
958      uintptr_t pc = host_signal_pc(uc);
959      bool is_write = host_signal_write(info, uc);
960      MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
961      bool maperr;
962  
963      /* If this was a write to a TB protected page, restart. */
964      if (is_write
965          && is_valid
966          && info->si_code == SEGV_ACCERR
967          && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
968                                         pc, guest_addr)) {
969          return;
970      }
971  
972      /*
973       * If the access was not on behalf of the guest, within the executable
974       * mapping of the generated code buffer, then it is a host bug.
975       */
976      if (access_type != MMU_INST_FETCH
977          && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
978          die_from_signal(info);
979      }
980  
981      maperr = true;
982      if (is_valid && info->si_code == SEGV_ACCERR) {
983          /*
984           * With reserved_va, the whole address space is PROT_NONE,
985           * which means that we may get ACCERR when we want MAPERR.
986           */
987          if (page_get_flags(guest_addr) & PAGE_VALID) {
988              maperr = false;
989          } else {
990              info->si_code = SEGV_MAPERR;
991          }
992      }
993  
994      sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
995      cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
996  }
997  
host_sigbus_handler(CPUState * cpu,siginfo_t * info,host_sigcontext * uc)998  static uintptr_t host_sigbus_handler(CPUState *cpu, siginfo_t *info,
999                                  host_sigcontext *uc)
1000  {
1001      uintptr_t pc = host_signal_pc(uc);
1002      bool is_write = host_signal_write(info, uc);
1003      MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
1004  
1005      /*
1006       * If the access was not on behalf of the guest, within the executable
1007       * mapping of the generated code buffer, then it is a host bug.
1008       */
1009      if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
1010          die_from_signal(info);
1011      }
1012  
1013      if (info->si_code == BUS_ADRALN) {
1014          uintptr_t host_addr = (uintptr_t)info->si_addr;
1015          abi_ptr guest_addr = h2g_nocheck(host_addr);
1016  
1017          sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
1018          cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
1019      }
1020      return pc;
1021  }
1022  
host_signal_handler(int host_sig,siginfo_t * info,void * puc)1023  static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
1024  {
1025      CPUState *cpu = thread_cpu;
1026      CPUArchState *env = cpu_env(cpu);
1027      TaskState *ts = get_task_state(cpu);
1028      target_siginfo_t tinfo;
1029      host_sigcontext *uc = puc;
1030      struct emulated_sigtable *k;
1031      int guest_sig;
1032      uintptr_t pc = 0;
1033      bool sync_sig = false;
1034      void *sigmask;
1035  
1036      /*
1037       * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
1038       * handling wrt signal blocking and unwinding.  Non-spoofed SIGILL,
1039       * SIGFPE, SIGTRAP are always host bugs.
1040       */
1041      if (info->si_code > 0) {
1042          switch (host_sig) {
1043          case SIGSEGV:
1044              /* Only returns on handle_sigsegv_accerr_write success. */
1045              host_sigsegv_handler(cpu, info, uc);
1046              return;
1047          case SIGBUS:
1048              pc = host_sigbus_handler(cpu, info, uc);
1049              sync_sig = true;
1050              break;
1051          case SIGILL:
1052          case SIGFPE:
1053          case SIGTRAP:
1054              die_from_signal(info);
1055          }
1056      }
1057  
1058      /* get target signal number */
1059      guest_sig = host_to_target_signal(host_sig);
1060      if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
1061          return;
1062      }
1063      trace_user_host_signal(env, host_sig, guest_sig);
1064  
1065      host_to_target_siginfo_noswap(&tinfo, info);
1066      k = &ts->sigtab[guest_sig - 1];
1067      k->info = tinfo;
1068      k->pending = guest_sig;
1069      ts->signal_pending = 1;
1070  
1071      /*
1072       * For synchronous signals, unwind the cpu state to the faulting
1073       * insn and then exit back to the main loop so that the signal
1074       * is delivered immediately.
1075       */
1076      if (sync_sig) {
1077          cpu->exception_index = EXCP_INTERRUPT;
1078          cpu_loop_exit_restore(cpu, pc);
1079      }
1080  
1081      rewind_if_in_safe_syscall(puc);
1082  
1083      /*
1084       * Block host signals until target signal handler entered. We
1085       * can't block SIGSEGV or SIGBUS while we're executing guest
1086       * code in case the guest code provokes one in the window between
1087       * now and it getting out to the main loop. Signals will be
1088       * unblocked again in process_pending_signals().
1089       *
1090       * WARNING: we cannot use sigfillset() here because the sigmask
1091       * field is a kernel sigset_t, which is much smaller than the
1092       * libc sigset_t which sigfillset() operates on. Using sigfillset()
1093       * would write 0xff bytes off the end of the structure and trash
1094       * data on the struct.
1095       */
1096      sigmask = host_signal_mask(uc);
1097      memset(sigmask, 0xff, SIGSET_T_SIZE);
1098      sigdelset(sigmask, SIGSEGV);
1099      sigdelset(sigmask, SIGBUS);
1100  
1101      /* interrupt the virtual CPU as soon as possible */
1102      cpu_exit(thread_cpu);
1103  }
1104  
1105  /* do_sigaltstack() returns target values and errnos. */
1106  /* compare linux/kernel/signal.c:do_sigaltstack() */
do_sigaltstack(abi_ulong uss_addr,abi_ulong uoss_addr,CPUArchState * env)1107  abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
1108                          CPUArchState *env)
1109  {
1110      target_stack_t oss, *uoss = NULL;
1111      abi_long ret = -TARGET_EFAULT;
1112  
1113      if (uoss_addr) {
1114          /* Verify writability now, but do not alter user memory yet. */
1115          if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
1116              goto out;
1117          }
1118          target_save_altstack(&oss, env);
1119      }
1120  
1121      if (uss_addr) {
1122          target_stack_t *uss;
1123  
1124          if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
1125              goto out;
1126          }
1127          ret = target_restore_altstack(uss, env);
1128          if (ret) {
1129              goto out;
1130          }
1131      }
1132  
1133      if (uoss_addr) {
1134          memcpy(uoss, &oss, sizeof(oss));
1135          unlock_user_struct(uoss, uoss_addr, 1);
1136          uoss = NULL;
1137      }
1138      ret = 0;
1139  
1140   out:
1141      if (uoss) {
1142          unlock_user_struct(uoss, uoss_addr, 0);
1143      }
1144      return ret;
1145  }
1146  
1147  /* do_sigaction() return target values and host errnos */
do_sigaction(int sig,const struct target_sigaction * act,struct target_sigaction * oact,abi_ulong ka_restorer)1148  int do_sigaction(int sig, const struct target_sigaction *act,
1149                   struct target_sigaction *oact, abi_ulong ka_restorer)
1150  {
1151      struct target_sigaction *k;
1152      int host_sig;
1153      int ret = 0;
1154  
1155      trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
1156  
1157      if (sig < 1 || sig > TARGET_NSIG) {
1158          return -TARGET_EINVAL;
1159      }
1160  
1161      if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
1162          return -TARGET_EINVAL;
1163      }
1164  
1165      if (block_signals()) {
1166          return -QEMU_ERESTARTSYS;
1167      }
1168  
1169      k = &sigact_table[sig - 1];
1170      if (oact) {
1171          __put_user(k->_sa_handler, &oact->_sa_handler);
1172          __put_user(k->sa_flags, &oact->sa_flags);
1173  #ifdef TARGET_ARCH_HAS_SA_RESTORER
1174          __put_user(k->sa_restorer, &oact->sa_restorer);
1175  #endif
1176          /* Not swapped.  */
1177          oact->sa_mask = k->sa_mask;
1178      }
1179      if (act) {
1180          __get_user(k->_sa_handler, &act->_sa_handler);
1181          __get_user(k->sa_flags, &act->sa_flags);
1182  #ifdef TARGET_ARCH_HAS_SA_RESTORER
1183          __get_user(k->sa_restorer, &act->sa_restorer);
1184  #endif
1185  #ifdef TARGET_ARCH_HAS_KA_RESTORER
1186          k->ka_restorer = ka_restorer;
1187  #endif
1188          /* To be swapped in target_to_host_sigset.  */
1189          k->sa_mask = act->sa_mask;
1190  
1191          /* we update the host linux signal state */
1192          host_sig = target_to_host_signal(sig);
1193          trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1194          if (host_sig > SIGRTMAX) {
1195              /* we don't have enough host signals to map all target signals */
1196              qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1197                            sig);
1198              /*
1199               * we don't return an error here because some programs try to
1200               * register an handler for all possible rt signals even if they
1201               * don't need it.
1202               * An error here can abort them whereas there can be no problem
1203               * to not have the signal available later.
1204               * This is the case for golang,
1205               *   See https://github.com/golang/go/issues/33746
1206               * So we silently ignore the error.
1207               */
1208              return 0;
1209          }
1210          if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1211              struct sigaction act1;
1212  
1213              sigfillset(&act1.sa_mask);
1214              act1.sa_flags = SA_SIGINFO;
1215              if (k->_sa_handler == TARGET_SIG_IGN) {
1216                  /*
1217                   * It is important to update the host kernel signal ignore
1218                   * state to avoid getting unexpected interrupted syscalls.
1219                   */
1220                  act1.sa_sigaction = (void *)SIG_IGN;
1221              } else if (k->_sa_handler == TARGET_SIG_DFL) {
1222                  if (core_dump_signal(sig)) {
1223                      act1.sa_sigaction = host_signal_handler;
1224                  } else {
1225                      act1.sa_sigaction = (void *)SIG_DFL;
1226                  }
1227              } else {
1228                  act1.sa_sigaction = host_signal_handler;
1229                  if (k->sa_flags & TARGET_SA_RESTART) {
1230                      act1.sa_flags |= SA_RESTART;
1231                  }
1232              }
1233              ret = sigaction(host_sig, &act1, NULL);
1234          }
1235      }
1236      return ret;
1237  }
1238  
handle_pending_signal(CPUArchState * cpu_env,int sig,struct emulated_sigtable * k)1239  static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1240                                    struct emulated_sigtable *k)
1241  {
1242      CPUState *cpu = env_cpu(cpu_env);
1243      abi_ulong handler;
1244      sigset_t set;
1245      target_siginfo_t unswapped;
1246      target_sigset_t target_old_set;
1247      struct target_sigaction *sa;
1248      TaskState *ts = get_task_state(cpu);
1249  
1250      trace_user_handle_signal(cpu_env, sig);
1251      /* dequeue signal */
1252      k->pending = 0;
1253  
1254      /*
1255       * Writes out siginfo values byteswapped, accordingly to the target.
1256       * It also cleans the si_type from si_code making it correct for
1257       * the target.  We must hold on to the original unswapped copy for
1258       * strace below, because si_type is still required there.
1259       */
1260      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1261          unswapped = k->info;
1262      }
1263      tswap_siginfo(&k->info, &k->info);
1264  
1265      sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
1266      if (!sig) {
1267          sa = NULL;
1268          handler = TARGET_SIG_IGN;
1269      } else {
1270          sa = &sigact_table[sig - 1];
1271          handler = sa->_sa_handler;
1272      }
1273  
1274      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1275          print_taken_signal(sig, &unswapped);
1276      }
1277  
1278      if (handler == TARGET_SIG_DFL) {
1279          /* default handler : ignore some signal. The other are job control or fatal */
1280          if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1281              kill(getpid(),SIGSTOP);
1282          } else if (sig != TARGET_SIGCHLD &&
1283                     sig != TARGET_SIGURG &&
1284                     sig != TARGET_SIGWINCH &&
1285                     sig != TARGET_SIGCONT) {
1286              dump_core_and_abort(cpu_env, sig);
1287          }
1288      } else if (handler == TARGET_SIG_IGN) {
1289          /* ignore sig */
1290      } else if (handler == TARGET_SIG_ERR) {
1291          dump_core_and_abort(cpu_env, sig);
1292      } else {
1293          /* compute the blocked signals during the handler execution */
1294          sigset_t *blocked_set;
1295  
1296          target_to_host_sigset(&set, &sa->sa_mask);
1297          /* SA_NODEFER indicates that the current signal should not be
1298             blocked during the handler */
1299          if (!(sa->sa_flags & TARGET_SA_NODEFER))
1300              sigaddset(&set, target_to_host_signal(sig));
1301  
1302          /* save the previous blocked signal state to restore it at the
1303             end of the signal execution (see do_sigreturn) */
1304          host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1305  
1306          /* block signals in the handler */
1307          blocked_set = ts->in_sigsuspend ?
1308              &ts->sigsuspend_mask : &ts->signal_mask;
1309          sigorset(&ts->signal_mask, blocked_set, &set);
1310          ts->in_sigsuspend = 0;
1311  
1312          /* if the CPU is in VM86 mode, we restore the 32 bit values */
1313  #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1314          {
1315              CPUX86State *env = cpu_env;
1316              if (env->eflags & VM_MASK)
1317                  save_v86_state(env);
1318          }
1319  #endif
1320          /* prepare the stack frame of the virtual CPU */
1321  #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1322          if (sa->sa_flags & TARGET_SA_SIGINFO) {
1323              setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1324          } else {
1325              setup_frame(sig, sa, &target_old_set, cpu_env);
1326          }
1327  #else
1328          /* These targets do not have traditional signals.  */
1329          setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1330  #endif
1331          if (sa->sa_flags & TARGET_SA_RESETHAND) {
1332              sa->_sa_handler = TARGET_SIG_DFL;
1333          }
1334      }
1335  }
1336  
process_pending_signals(CPUArchState * cpu_env)1337  void process_pending_signals(CPUArchState *cpu_env)
1338  {
1339      CPUState *cpu = env_cpu(cpu_env);
1340      int sig;
1341      TaskState *ts = get_task_state(cpu);
1342      sigset_t set;
1343      sigset_t *blocked_set;
1344  
1345      while (qatomic_read(&ts->signal_pending)) {
1346          sigfillset(&set);
1347          sigprocmask(SIG_SETMASK, &set, 0);
1348  
1349      restart_scan:
1350          sig = ts->sync_signal.pending;
1351          if (sig) {
1352              /* Synchronous signals are forced,
1353               * see force_sig_info() and callers in Linux
1354               * Note that not all of our queue_signal() calls in QEMU correspond
1355               * to force_sig_info() calls in Linux (some are send_sig_info()).
1356               * However it seems like a kernel bug to me to allow the process
1357               * to block a synchronous signal since it could then just end up
1358               * looping round and round indefinitely.
1359               */
1360              if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1361                  || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1362                  sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1363                  sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1364              }
1365  
1366              handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1367          }
1368  
1369          for (sig = 1; sig <= TARGET_NSIG; sig++) {
1370              blocked_set = ts->in_sigsuspend ?
1371                  &ts->sigsuspend_mask : &ts->signal_mask;
1372  
1373              if (ts->sigtab[sig - 1].pending &&
1374                  (!sigismember(blocked_set,
1375                                target_to_host_signal_table[sig]))) {
1376                  handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1377                  /* Restart scan from the beginning, as handle_pending_signal
1378                   * might have resulted in a new synchronous signal (eg SIGSEGV).
1379                   */
1380                  goto restart_scan;
1381              }
1382          }
1383  
1384          /* if no signal is pending, unblock signals and recheck (the act
1385           * of unblocking might cause us to take another host signal which
1386           * will set signal_pending again).
1387           */
1388          qatomic_set(&ts->signal_pending, 0);
1389          ts->in_sigsuspend = 0;
1390          set = ts->signal_mask;
1391          sigdelset(&set, SIGSEGV);
1392          sigdelset(&set, SIGBUS);
1393          sigprocmask(SIG_SETMASK, &set, 0);
1394      }
1395      ts->in_sigsuspend = 0;
1396  }
1397  
process_sigsuspend_mask(sigset_t ** pset,target_ulong sigset,target_ulong sigsize)1398  int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
1399                              target_ulong sigsize)
1400  {
1401      TaskState *ts = get_task_state(thread_cpu);
1402      sigset_t *host_set = &ts->sigsuspend_mask;
1403      target_sigset_t *target_sigset;
1404  
1405      if (sigsize != sizeof(*target_sigset)) {
1406          /* Like the kernel, we enforce correct size sigsets */
1407          return -TARGET_EINVAL;
1408      }
1409  
1410      target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
1411      if (!target_sigset) {
1412          return -TARGET_EFAULT;
1413      }
1414      target_to_host_sigset(host_set, target_sigset);
1415      unlock_user(target_sigset, sigset, 0);
1416  
1417      *pset = host_set;
1418      return 0;
1419  }
1420