xref: /openbmc/qemu/linux-user/signal.c (revision 0662946aa6d3129e7974d4484fc94ab2a5b15d4e)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23 
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28 #include "signal-common.h"
29 
30 struct target_sigaltstack target_sigaltstack_used = {
31     .ss_sp = 0,
32     .ss_size = 0,
33     .ss_flags = TARGET_SS_DISABLE,
34 };
35 
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37 
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39                                 void *puc);
40 
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42     [SIGHUP] = TARGET_SIGHUP,
43     [SIGINT] = TARGET_SIGINT,
44     [SIGQUIT] = TARGET_SIGQUIT,
45     [SIGILL] = TARGET_SIGILL,
46     [SIGTRAP] = TARGET_SIGTRAP,
47     [SIGABRT] = TARGET_SIGABRT,
48 /*    [SIGIOT] = TARGET_SIGIOT,*/
49     [SIGBUS] = TARGET_SIGBUS,
50     [SIGFPE] = TARGET_SIGFPE,
51     [SIGKILL] = TARGET_SIGKILL,
52     [SIGUSR1] = TARGET_SIGUSR1,
53     [SIGSEGV] = TARGET_SIGSEGV,
54     [SIGUSR2] = TARGET_SIGUSR2,
55     [SIGPIPE] = TARGET_SIGPIPE,
56     [SIGALRM] = TARGET_SIGALRM,
57     [SIGTERM] = TARGET_SIGTERM,
58 #ifdef SIGSTKFLT
59     [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 #endif
61     [SIGCHLD] = TARGET_SIGCHLD,
62     [SIGCONT] = TARGET_SIGCONT,
63     [SIGSTOP] = TARGET_SIGSTOP,
64     [SIGTSTP] = TARGET_SIGTSTP,
65     [SIGTTIN] = TARGET_SIGTTIN,
66     [SIGTTOU] = TARGET_SIGTTOU,
67     [SIGURG] = TARGET_SIGURG,
68     [SIGXCPU] = TARGET_SIGXCPU,
69     [SIGXFSZ] = TARGET_SIGXFSZ,
70     [SIGVTALRM] = TARGET_SIGVTALRM,
71     [SIGPROF] = TARGET_SIGPROF,
72     [SIGWINCH] = TARGET_SIGWINCH,
73     [SIGIO] = TARGET_SIGIO,
74     [SIGPWR] = TARGET_SIGPWR,
75     [SIGSYS] = TARGET_SIGSYS,
76     /* next signals stay the same */
77     /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78        host libpthread signals.  This assumes no one actually uses SIGRTMAX :-/
79        To fix this properly we need to do manual signal delivery multiplexed
80        over a single host signal.  */
81     [__SIGRTMIN] = __SIGRTMAX,
82     [__SIGRTMAX] = __SIGRTMIN,
83 };
84 static uint8_t target_to_host_signal_table[_NSIG];
85 
86 int host_to_target_signal(int sig)
87 {
88     if (sig < 0 || sig >= _NSIG)
89         return sig;
90     return host_to_target_signal_table[sig];
91 }
92 
93 int target_to_host_signal(int sig)
94 {
95     if (sig < 0 || sig >= _NSIG)
96         return sig;
97     return target_to_host_signal_table[sig];
98 }
99 
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
101 {
102     signum--;
103     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104     set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 }
106 
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
108 {
109     signum--;
110     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 }
113 
114 void host_to_target_sigset_internal(target_sigset_t *d,
115                                     const sigset_t *s)
116 {
117     int i;
118     target_sigemptyset(d);
119     for (i = 1; i <= TARGET_NSIG; i++) {
120         if (sigismember(s, i)) {
121             target_sigaddset(d, host_to_target_signal(i));
122         }
123     }
124 }
125 
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
127 {
128     target_sigset_t d1;
129     int i;
130 
131     host_to_target_sigset_internal(&d1, s);
132     for(i = 0;i < TARGET_NSIG_WORDS; i++)
133         d->sig[i] = tswapal(d1.sig[i]);
134 }
135 
136 void target_to_host_sigset_internal(sigset_t *d,
137                                     const target_sigset_t *s)
138 {
139     int i;
140     sigemptyset(d);
141     for (i = 1; i <= TARGET_NSIG; i++) {
142         if (target_sigismember(s, i)) {
143             sigaddset(d, target_to_host_signal(i));
144         }
145     }
146 }
147 
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149 {
150     target_sigset_t s1;
151     int i;
152 
153     for(i = 0;i < TARGET_NSIG_WORDS; i++)
154         s1.sig[i] = tswapal(s->sig[i]);
155     target_to_host_sigset_internal(d, &s1);
156 }
157 
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159                                const sigset_t *sigset)
160 {
161     target_sigset_t d;
162     host_to_target_sigset(&d, sigset);
163     *old_sigset = d.sig[0];
164 }
165 
166 void target_to_host_old_sigset(sigset_t *sigset,
167                                const abi_ulong *old_sigset)
168 {
169     target_sigset_t d;
170     int i;
171 
172     d.sig[0] = *old_sigset;
173     for(i = 1;i < TARGET_NSIG_WORDS; i++)
174         d.sig[i] = 0;
175     target_to_host_sigset(sigset, &d);
176 }
177 
178 int block_signals(void)
179 {
180     TaskState *ts = (TaskState *)thread_cpu->opaque;
181     sigset_t set;
182 
183     /* It's OK to block everything including SIGSEGV, because we won't
184      * run any further guest code before unblocking signals in
185      * process_pending_signals().
186      */
187     sigfillset(&set);
188     sigprocmask(SIG_SETMASK, &set, 0);
189 
190     return atomic_xchg(&ts->signal_pending, 1);
191 }
192 
193 /* Wrapper for sigprocmask function
194  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196  * a signal was already pending and the syscall must be restarted, or
197  * 0 on success.
198  * If set is NULL, this is guaranteed not to fail.
199  */
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201 {
202     TaskState *ts = (TaskState *)thread_cpu->opaque;
203 
204     if (oldset) {
205         *oldset = ts->signal_mask;
206     }
207 
208     if (set) {
209         int i;
210 
211         if (block_signals()) {
212             return -TARGET_ERESTARTSYS;
213         }
214 
215         switch (how) {
216         case SIG_BLOCK:
217             sigorset(&ts->signal_mask, &ts->signal_mask, set);
218             break;
219         case SIG_UNBLOCK:
220             for (i = 1; i <= NSIG; ++i) {
221                 if (sigismember(set, i)) {
222                     sigdelset(&ts->signal_mask, i);
223                 }
224             }
225             break;
226         case SIG_SETMASK:
227             ts->signal_mask = *set;
228             break;
229         default:
230             g_assert_not_reached();
231         }
232 
233         /* Silently ignore attempts to change blocking status of KILL or STOP */
234         sigdelset(&ts->signal_mask, SIGKILL);
235         sigdelset(&ts->signal_mask, SIGSTOP);
236     }
237     return 0;
238 }
239 
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242  * caller is assumed to have called block_signals() already.
243  */
244 void set_sigmask(const sigset_t *set)
245 {
246     TaskState *ts = (TaskState *)thread_cpu->opaque;
247 
248     ts->signal_mask = *set;
249 }
250 #endif
251 
252 /* siginfo conversion */
253 
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
255                                                  const siginfo_t *info)
256 {
257     int sig = host_to_target_signal(info->si_signo);
258     int si_code = info->si_code;
259     int si_type;
260     tinfo->si_signo = sig;
261     tinfo->si_errno = 0;
262     tinfo->si_code = info->si_code;
263 
264     /* This memset serves two purposes:
265      * (1) ensure we don't leak random junk to the guest later
266      * (2) placate false positives from gcc about fields
267      *     being used uninitialized if it chooses to inline both this
268      *     function and tswap_siginfo() into host_to_target_siginfo().
269      */
270     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
271 
272     /* This is awkward, because we have to use a combination of
273      * the si_code and si_signo to figure out which of the union's
274      * members are valid. (Within the host kernel it is always possible
275      * to tell, but the kernel carefully avoids giving userspace the
276      * high 16 bits of si_code, so we don't have the information to
277      * do this the easy way...) We therefore make our best guess,
278      * bearing in mind that a guest can spoof most of the si_codes
279      * via rt_sigqueueinfo() if it likes.
280      *
281      * Once we have made our guess, we record it in the top 16 bits of
282      * the si_code, so that tswap_siginfo() later can use it.
283      * tswap_siginfo() will strip these top bits out before writing
284      * si_code to the guest (sign-extending the lower bits).
285      */
286 
287     switch (si_code) {
288     case SI_USER:
289     case SI_TKILL:
290     case SI_KERNEL:
291         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292          * These are the only unspoofable si_code values.
293          */
294         tinfo->_sifields._kill._pid = info->si_pid;
295         tinfo->_sifields._kill._uid = info->si_uid;
296         si_type = QEMU_SI_KILL;
297         break;
298     default:
299         /* Everything else is spoofable. Make best guess based on signal */
300         switch (sig) {
301         case TARGET_SIGCHLD:
302             tinfo->_sifields._sigchld._pid = info->si_pid;
303             tinfo->_sifields._sigchld._uid = info->si_uid;
304             tinfo->_sifields._sigchld._status
305                 = host_to_target_waitstatus(info->si_status);
306             tinfo->_sifields._sigchld._utime = info->si_utime;
307             tinfo->_sifields._sigchld._stime = info->si_stime;
308             si_type = QEMU_SI_CHLD;
309             break;
310         case TARGET_SIGIO:
311             tinfo->_sifields._sigpoll._band = info->si_band;
312             tinfo->_sifields._sigpoll._fd = info->si_fd;
313             si_type = QEMU_SI_POLL;
314             break;
315         default:
316             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317             tinfo->_sifields._rt._pid = info->si_pid;
318             tinfo->_sifields._rt._uid = info->si_uid;
319             /* XXX: potential problem if 64 bit */
320             tinfo->_sifields._rt._sigval.sival_ptr
321                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
322             si_type = QEMU_SI_RT;
323             break;
324         }
325         break;
326     }
327 
328     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
329 }
330 
331 void tswap_siginfo(target_siginfo_t *tinfo,
332                    const target_siginfo_t *info)
333 {
334     int si_type = extract32(info->si_code, 16, 16);
335     int si_code = sextract32(info->si_code, 0, 16);
336 
337     __put_user(info->si_signo, &tinfo->si_signo);
338     __put_user(info->si_errno, &tinfo->si_errno);
339     __put_user(si_code, &tinfo->si_code);
340 
341     /* We can use our internal marker of which fields in the structure
342      * are valid, rather than duplicating the guesswork of
343      * host_to_target_siginfo_noswap() here.
344      */
345     switch (si_type) {
346     case QEMU_SI_KILL:
347         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
348         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
349         break;
350     case QEMU_SI_TIMER:
351         __put_user(info->_sifields._timer._timer1,
352                    &tinfo->_sifields._timer._timer1);
353         __put_user(info->_sifields._timer._timer2,
354                    &tinfo->_sifields._timer._timer2);
355         break;
356     case QEMU_SI_POLL:
357         __put_user(info->_sifields._sigpoll._band,
358                    &tinfo->_sifields._sigpoll._band);
359         __put_user(info->_sifields._sigpoll._fd,
360                    &tinfo->_sifields._sigpoll._fd);
361         break;
362     case QEMU_SI_FAULT:
363         __put_user(info->_sifields._sigfault._addr,
364                    &tinfo->_sifields._sigfault._addr);
365         break;
366     case QEMU_SI_CHLD:
367         __put_user(info->_sifields._sigchld._pid,
368                    &tinfo->_sifields._sigchld._pid);
369         __put_user(info->_sifields._sigchld._uid,
370                    &tinfo->_sifields._sigchld._uid);
371         __put_user(info->_sifields._sigchld._status,
372                    &tinfo->_sifields._sigchld._status);
373         __put_user(info->_sifields._sigchld._utime,
374                    &tinfo->_sifields._sigchld._utime);
375         __put_user(info->_sifields._sigchld._stime,
376                    &tinfo->_sifields._sigchld._stime);
377         break;
378     case QEMU_SI_RT:
379         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
380         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
381         __put_user(info->_sifields._rt._sigval.sival_ptr,
382                    &tinfo->_sifields._rt._sigval.sival_ptr);
383         break;
384     default:
385         g_assert_not_reached();
386     }
387 }
388 
389 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
390 {
391     target_siginfo_t tgt_tmp;
392     host_to_target_siginfo_noswap(&tgt_tmp, info);
393     tswap_siginfo(tinfo, &tgt_tmp);
394 }
395 
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
399 {
400     /* This conversion is used only for the rt_sigqueueinfo syscall,
401      * and so we know that the _rt fields are the valid ones.
402      */
403     abi_ulong sival_ptr;
404 
405     __get_user(info->si_signo, &tinfo->si_signo);
406     __get_user(info->si_errno, &tinfo->si_errno);
407     __get_user(info->si_code, &tinfo->si_code);
408     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
409     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
410     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
411     info->si_value.sival_ptr = (void *)(long)sival_ptr;
412 }
413 
414 static int fatal_signal (int sig)
415 {
416     switch (sig) {
417     case TARGET_SIGCHLD:
418     case TARGET_SIGURG:
419     case TARGET_SIGWINCH:
420         /* Ignored by default.  */
421         return 0;
422     case TARGET_SIGCONT:
423     case TARGET_SIGSTOP:
424     case TARGET_SIGTSTP:
425     case TARGET_SIGTTIN:
426     case TARGET_SIGTTOU:
427         /* Job control signals.  */
428         return 0;
429     default:
430         return 1;
431     }
432 }
433 
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig)
436 {
437     switch (sig) {
438     case TARGET_SIGABRT:
439     case TARGET_SIGFPE:
440     case TARGET_SIGILL:
441     case TARGET_SIGQUIT:
442     case TARGET_SIGSEGV:
443     case TARGET_SIGTRAP:
444     case TARGET_SIGBUS:
445         return (1);
446     default:
447         return (0);
448     }
449 }
450 
451 void signal_init(void)
452 {
453     TaskState *ts = (TaskState *)thread_cpu->opaque;
454     struct sigaction act;
455     struct sigaction oact;
456     int i, j;
457     int host_sig;
458 
459     /* generate signal conversion tables */
460     for(i = 1; i < _NSIG; i++) {
461         if (host_to_target_signal_table[i] == 0)
462             host_to_target_signal_table[i] = i;
463     }
464     for(i = 1; i < _NSIG; i++) {
465         j = host_to_target_signal_table[i];
466         target_to_host_signal_table[j] = i;
467     }
468 
469     /* Set the signal mask from the host mask. */
470     sigprocmask(0, 0, &ts->signal_mask);
471 
472     /* set all host signal handlers. ALL signals are blocked during
473        the handlers to serialize them. */
474     memset(sigact_table, 0, sizeof(sigact_table));
475 
476     sigfillset(&act.sa_mask);
477     act.sa_flags = SA_SIGINFO;
478     act.sa_sigaction = host_signal_handler;
479     for(i = 1; i <= TARGET_NSIG; i++) {
480         host_sig = target_to_host_signal(i);
481         sigaction(host_sig, NULL, &oact);
482         if (oact.sa_sigaction == (void *)SIG_IGN) {
483             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
484         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
485             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
486         }
487         /* If there's already a handler installed then something has
488            gone horribly wrong, so don't even try to handle that case.  */
489         /* Install some handlers for our own use.  We need at least
490            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
491            trap all signals because it affects syscall interrupt
492            behavior.  But do trap all default-fatal signals.  */
493         if (fatal_signal (i))
494             sigaction(host_sig, &act, NULL);
495     }
496 }
497 
498 /* Force a synchronously taken signal. The kernel force_sig() function
499  * also forces the signal to "not blocked, not ignored", but for QEMU
500  * that work is done in process_pending_signals().
501  */
502 void force_sig(int sig)
503 {
504     CPUState *cpu = thread_cpu;
505     CPUArchState *env = cpu->env_ptr;
506     target_siginfo_t info;
507 
508     info.si_signo = sig;
509     info.si_errno = 0;
510     info.si_code = TARGET_SI_KERNEL;
511     info._sifields._kill._pid = 0;
512     info._sifields._kill._uid = 0;
513     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
514 }
515 
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517  * up the signal frame. oldsig is the signal we were trying to handle
518  * at the point of failure.
519  */
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig)
522 {
523     if (oldsig == SIGSEGV) {
524         /* Make sure we don't try to deliver the signal again; this will
525          * end up with handle_pending_signal() calling dump_core_and_abort().
526          */
527         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
528     }
529     force_sig(TARGET_SIGSEGV);
530 }
531 
532 #endif
533 
534 /* abort execution with signal */
535 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
536 {
537     CPUState *cpu = thread_cpu;
538     CPUArchState *env = cpu->env_ptr;
539     TaskState *ts = (TaskState *)cpu->opaque;
540     int host_sig, core_dumped = 0;
541     struct sigaction act;
542 
543     host_sig = target_to_host_signal(target_sig);
544     trace_user_force_sig(env, target_sig, host_sig);
545     gdb_signalled(env, target_sig);
546 
547     /* dump core if supported by target binary format */
548     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
549         stop_all_tasks();
550         core_dumped =
551             ((*ts->bprm->core_dump)(target_sig, env) == 0);
552     }
553     if (core_dumped) {
554         /* we already dumped the core of target process, we don't want
555          * a coredump of qemu itself */
556         struct rlimit nodump;
557         getrlimit(RLIMIT_CORE, &nodump);
558         nodump.rlim_cur=0;
559         setrlimit(RLIMIT_CORE, &nodump);
560         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
561             target_sig, strsignal(host_sig), "core dumped" );
562     }
563 
564     /* The proper exit code for dying from an uncaught signal is
565      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
566      * a negative value.  To get the proper exit code we need to
567      * actually die from an uncaught signal.  Here the default signal
568      * handler is installed, we send ourself a signal and we wait for
569      * it to arrive. */
570     sigfillset(&act.sa_mask);
571     act.sa_handler = SIG_DFL;
572     act.sa_flags = 0;
573     sigaction(host_sig, &act, NULL);
574 
575     /* For some reason raise(host_sig) doesn't send the signal when
576      * statically linked on x86-64. */
577     kill(getpid(), host_sig);
578 
579     /* Make sure the signal isn't masked (just reuse the mask inside
580     of act) */
581     sigdelset(&act.sa_mask, host_sig);
582     sigsuspend(&act.sa_mask);
583 
584     /* unreachable */
585     abort();
586 }
587 
588 /* queue a signal so that it will be send to the virtual CPU as soon
589    as possible */
590 int queue_signal(CPUArchState *env, int sig, int si_type,
591                  target_siginfo_t *info)
592 {
593     CPUState *cpu = ENV_GET_CPU(env);
594     TaskState *ts = cpu->opaque;
595 
596     trace_user_queue_signal(env, sig);
597 
598     info->si_code = deposit32(info->si_code, 16, 16, si_type);
599 
600     ts->sync_signal.info = *info;
601     ts->sync_signal.pending = sig;
602     /* signal that a new signal is pending */
603     atomic_set(&ts->signal_pending, 1);
604     return 1; /* indicates that the signal was queued */
605 }
606 
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc)
609 {
610     /* Default version: never rewind */
611 }
612 #endif
613 
614 static void host_signal_handler(int host_signum, siginfo_t *info,
615                                 void *puc)
616 {
617     CPUArchState *env = thread_cpu->env_ptr;
618     CPUState *cpu = ENV_GET_CPU(env);
619     TaskState *ts = cpu->opaque;
620 
621     int sig;
622     target_siginfo_t tinfo;
623     ucontext_t *uc = puc;
624     struct emulated_sigtable *k;
625 
626     /* the CPU emulator uses some host signals to detect exceptions,
627        we forward to it some signals */
628     if ((host_signum == SIGSEGV || host_signum == SIGBUS)
629         && info->si_code > 0) {
630         if (cpu_signal_handler(host_signum, info, puc))
631             return;
632     }
633 
634     /* get target signal number */
635     sig = host_to_target_signal(host_signum);
636     if (sig < 1 || sig > TARGET_NSIG)
637         return;
638     trace_user_host_signal(env, host_signum, sig);
639 
640     rewind_if_in_safe_syscall(puc);
641 
642     host_to_target_siginfo_noswap(&tinfo, info);
643     k = &ts->sigtab[sig - 1];
644     k->info = tinfo;
645     k->pending = sig;
646     ts->signal_pending = 1;
647 
648     /* Block host signals until target signal handler entered. We
649      * can't block SIGSEGV or SIGBUS while we're executing guest
650      * code in case the guest code provokes one in the window between
651      * now and it getting out to the main loop. Signals will be
652      * unblocked again in process_pending_signals().
653      *
654      * WARNING: we cannot use sigfillset() here because the uc_sigmask
655      * field is a kernel sigset_t, which is much smaller than the
656      * libc sigset_t which sigfillset() operates on. Using sigfillset()
657      * would write 0xff bytes off the end of the structure and trash
658      * data on the struct.
659      * We can't use sizeof(uc->uc_sigmask) either, because the libc
660      * headers define the struct field with the wrong (too large) type.
661      */
662     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
663     sigdelset(&uc->uc_sigmask, SIGSEGV);
664     sigdelset(&uc->uc_sigmask, SIGBUS);
665 
666     /* interrupt the virtual CPU as soon as possible */
667     cpu_exit(thread_cpu);
668 }
669 
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
673 {
674     int ret;
675     struct target_sigaltstack oss;
676 
677     /* XXX: test errors */
678     if(uoss_addr)
679     {
680         __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
681         __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
682         __put_user(sas_ss_flags(sp), &oss.ss_flags);
683     }
684 
685     if(uss_addr)
686     {
687         struct target_sigaltstack *uss;
688         struct target_sigaltstack ss;
689         size_t minstacksize = TARGET_MINSIGSTKSZ;
690 
691 #if defined(TARGET_PPC64)
692         /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693         struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
694         if (get_ppc64_abi(image) > 1) {
695             minstacksize = 4096;
696         }
697 #endif
698 
699 	ret = -TARGET_EFAULT;
700         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
701             goto out;
702         }
703         __get_user(ss.ss_sp, &uss->ss_sp);
704         __get_user(ss.ss_size, &uss->ss_size);
705         __get_user(ss.ss_flags, &uss->ss_flags);
706         unlock_user_struct(uss, uss_addr, 0);
707 
708 	ret = -TARGET_EPERM;
709 	if (on_sig_stack(sp))
710             goto out;
711 
712 	ret = -TARGET_EINVAL;
713 	if (ss.ss_flags != TARGET_SS_DISABLE
714             && ss.ss_flags != TARGET_SS_ONSTACK
715             && ss.ss_flags != 0)
716             goto out;
717 
718 	if (ss.ss_flags == TARGET_SS_DISABLE) {
719             ss.ss_size = 0;
720             ss.ss_sp = 0;
721 	} else {
722             ret = -TARGET_ENOMEM;
723             if (ss.ss_size < minstacksize) {
724                 goto out;
725             }
726 	}
727 
728         target_sigaltstack_used.ss_sp = ss.ss_sp;
729         target_sigaltstack_used.ss_size = ss.ss_size;
730     }
731 
732     if (uoss_addr) {
733         ret = -TARGET_EFAULT;
734         if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
735             goto out;
736     }
737 
738     ret = 0;
739 out:
740     return ret;
741 }
742 
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig, const struct target_sigaction *act,
745                  struct target_sigaction *oact)
746 {
747     struct target_sigaction *k;
748     struct sigaction act1;
749     int host_sig;
750     int ret = 0;
751 
752     if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
753         return -TARGET_EINVAL;
754     }
755 
756     if (block_signals()) {
757         return -TARGET_ERESTARTSYS;
758     }
759 
760     k = &sigact_table[sig - 1];
761     if (oact) {
762         __put_user(k->_sa_handler, &oact->_sa_handler);
763         __put_user(k->sa_flags, &oact->sa_flags);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765         __put_user(k->sa_restorer, &oact->sa_restorer);
766 #endif
767         /* Not swapped.  */
768         oact->sa_mask = k->sa_mask;
769     }
770     if (act) {
771         /* FIXME: This is not threadsafe.  */
772         __get_user(k->_sa_handler, &act->_sa_handler);
773         __get_user(k->sa_flags, &act->sa_flags);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775         __get_user(k->sa_restorer, &act->sa_restorer);
776 #endif
777         /* To be swapped in target_to_host_sigset.  */
778         k->sa_mask = act->sa_mask;
779 
780         /* we update the host linux signal state */
781         host_sig = target_to_host_signal(sig);
782         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
783             sigfillset(&act1.sa_mask);
784             act1.sa_flags = SA_SIGINFO;
785             if (k->sa_flags & TARGET_SA_RESTART)
786                 act1.sa_flags |= SA_RESTART;
787             /* NOTE: it is important to update the host kernel signal
788                ignore state to avoid getting unexpected interrupted
789                syscalls */
790             if (k->_sa_handler == TARGET_SIG_IGN) {
791                 act1.sa_sigaction = (void *)SIG_IGN;
792             } else if (k->_sa_handler == TARGET_SIG_DFL) {
793                 if (fatal_signal (sig))
794                     act1.sa_sigaction = host_signal_handler;
795                 else
796                     act1.sa_sigaction = (void *)SIG_DFL;
797             } else {
798                 act1.sa_sigaction = host_signal_handler;
799             }
800             ret = sigaction(host_sig, &act1, NULL);
801         }
802     }
803     return ret;
804 }
805 
806 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
807                                   struct emulated_sigtable *k)
808 {
809     CPUState *cpu = ENV_GET_CPU(cpu_env);
810     abi_ulong handler;
811     sigset_t set;
812     target_sigset_t target_old_set;
813     struct target_sigaction *sa;
814     TaskState *ts = cpu->opaque;
815 
816     trace_user_handle_signal(cpu_env, sig);
817     /* dequeue signal */
818     k->pending = 0;
819 
820     sig = gdb_handlesig(cpu, sig);
821     if (!sig) {
822         sa = NULL;
823         handler = TARGET_SIG_IGN;
824     } else {
825         sa = &sigact_table[sig - 1];
826         handler = sa->_sa_handler;
827     }
828 
829     if (do_strace) {
830         print_taken_signal(sig, &k->info);
831     }
832 
833     if (handler == TARGET_SIG_DFL) {
834         /* default handler : ignore some signal. The other are job control or fatal */
835         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
836             kill(getpid(),SIGSTOP);
837         } else if (sig != TARGET_SIGCHLD &&
838                    sig != TARGET_SIGURG &&
839                    sig != TARGET_SIGWINCH &&
840                    sig != TARGET_SIGCONT) {
841             dump_core_and_abort(sig);
842         }
843     } else if (handler == TARGET_SIG_IGN) {
844         /* ignore sig */
845     } else if (handler == TARGET_SIG_ERR) {
846         dump_core_and_abort(sig);
847     } else {
848         /* compute the blocked signals during the handler execution */
849         sigset_t *blocked_set;
850 
851         target_to_host_sigset(&set, &sa->sa_mask);
852         /* SA_NODEFER indicates that the current signal should not be
853            blocked during the handler */
854         if (!(sa->sa_flags & TARGET_SA_NODEFER))
855             sigaddset(&set, target_to_host_signal(sig));
856 
857         /* save the previous blocked signal state to restore it at the
858            end of the signal execution (see do_sigreturn) */
859         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
860 
861         /* block signals in the handler */
862         blocked_set = ts->in_sigsuspend ?
863             &ts->sigsuspend_mask : &ts->signal_mask;
864         sigorset(&ts->signal_mask, blocked_set, &set);
865         ts->in_sigsuspend = 0;
866 
867         /* if the CPU is in VM86 mode, we restore the 32 bit values */
868 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
869         {
870             CPUX86State *env = cpu_env;
871             if (env->eflags & VM_MASK)
872                 save_v86_state(env);
873         }
874 #endif
875         /* prepare the stack frame of the virtual CPU */
876 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
877         if (sa->sa_flags & TARGET_SA_SIGINFO) {
878             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
879         } else {
880             setup_frame(sig, sa, &target_old_set, cpu_env);
881         }
882 #else
883         /* These targets do not have traditional signals.  */
884         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
885 #endif
886         if (sa->sa_flags & TARGET_SA_RESETHAND) {
887             sa->_sa_handler = TARGET_SIG_DFL;
888         }
889     }
890 }
891 
892 void process_pending_signals(CPUArchState *cpu_env)
893 {
894     CPUState *cpu = ENV_GET_CPU(cpu_env);
895     int sig;
896     TaskState *ts = cpu->opaque;
897     sigset_t set;
898     sigset_t *blocked_set;
899 
900     while (atomic_read(&ts->signal_pending)) {
901         /* FIXME: This is not threadsafe.  */
902         sigfillset(&set);
903         sigprocmask(SIG_SETMASK, &set, 0);
904 
905     restart_scan:
906         sig = ts->sync_signal.pending;
907         if (sig) {
908             /* Synchronous signals are forced,
909              * see force_sig_info() and callers in Linux
910              * Note that not all of our queue_signal() calls in QEMU correspond
911              * to force_sig_info() calls in Linux (some are send_sig_info()).
912              * However it seems like a kernel bug to me to allow the process
913              * to block a synchronous signal since it could then just end up
914              * looping round and round indefinitely.
915              */
916             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
917                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
918                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
919                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
920             }
921 
922             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
923         }
924 
925         for (sig = 1; sig <= TARGET_NSIG; sig++) {
926             blocked_set = ts->in_sigsuspend ?
927                 &ts->sigsuspend_mask : &ts->signal_mask;
928 
929             if (ts->sigtab[sig - 1].pending &&
930                 (!sigismember(blocked_set,
931                               target_to_host_signal_table[sig]))) {
932                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
933                 /* Restart scan from the beginning, as handle_pending_signal
934                  * might have resulted in a new synchronous signal (eg SIGSEGV).
935                  */
936                 goto restart_scan;
937             }
938         }
939 
940         /* if no signal is pending, unblock signals and recheck (the act
941          * of unblocking might cause us to take another host signal which
942          * will set signal_pending again).
943          */
944         atomic_set(&ts->signal_pending, 0);
945         ts->in_sigsuspend = 0;
946         set = ts->signal_mask;
947         sigdelset(&set, SIGSEGV);
948         sigdelset(&set, SIGBUS);
949         sigprocmask(SIG_SETMASK, &set, 0);
950     }
951     ts->in_sigsuspend = 0;
952 }
953