xref: /openbmc/qemu/linux-user/signal.c (revision b15c0f7d)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23 
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "signal-common.h"
28 
29 struct target_sigaltstack target_sigaltstack_used = {
30     .ss_sp = 0,
31     .ss_size = 0,
32     .ss_flags = TARGET_SS_DISABLE,
33 };
34 
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36 
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38                                 void *puc);
39 
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41     [SIGHUP] = TARGET_SIGHUP,
42     [SIGINT] = TARGET_SIGINT,
43     [SIGQUIT] = TARGET_SIGQUIT,
44     [SIGILL] = TARGET_SIGILL,
45     [SIGTRAP] = TARGET_SIGTRAP,
46     [SIGABRT] = TARGET_SIGABRT,
47 /*    [SIGIOT] = TARGET_SIGIOT,*/
48     [SIGBUS] = TARGET_SIGBUS,
49     [SIGFPE] = TARGET_SIGFPE,
50     [SIGKILL] = TARGET_SIGKILL,
51     [SIGUSR1] = TARGET_SIGUSR1,
52     [SIGSEGV] = TARGET_SIGSEGV,
53     [SIGUSR2] = TARGET_SIGUSR2,
54     [SIGPIPE] = TARGET_SIGPIPE,
55     [SIGALRM] = TARGET_SIGALRM,
56     [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58     [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60     [SIGCHLD] = TARGET_SIGCHLD,
61     [SIGCONT] = TARGET_SIGCONT,
62     [SIGSTOP] = TARGET_SIGSTOP,
63     [SIGTSTP] = TARGET_SIGTSTP,
64     [SIGTTIN] = TARGET_SIGTTIN,
65     [SIGTTOU] = TARGET_SIGTTOU,
66     [SIGURG] = TARGET_SIGURG,
67     [SIGXCPU] = TARGET_SIGXCPU,
68     [SIGXFSZ] = TARGET_SIGXFSZ,
69     [SIGVTALRM] = TARGET_SIGVTALRM,
70     [SIGPROF] = TARGET_SIGPROF,
71     [SIGWINCH] = TARGET_SIGWINCH,
72     [SIGIO] = TARGET_SIGIO,
73     [SIGPWR] = TARGET_SIGPWR,
74     [SIGSYS] = TARGET_SIGSYS,
75     /* next signals stay the same */
76     /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77        host libpthread signals.  This assumes no one actually uses SIGRTMAX :-/
78        To fix this properly we need to do manual signal delivery multiplexed
79        over a single host signal.  */
80     [__SIGRTMIN] = __SIGRTMAX,
81     [__SIGRTMAX] = __SIGRTMIN,
82 };
83 static uint8_t target_to_host_signal_table[_NSIG];
84 
85 int host_to_target_signal(int sig)
86 {
87     if (sig < 0 || sig >= _NSIG)
88         return sig;
89     return host_to_target_signal_table[sig];
90 }
91 
92 int target_to_host_signal(int sig)
93 {
94     if (sig < 0 || sig >= _NSIG)
95         return sig;
96     return target_to_host_signal_table[sig];
97 }
98 
99 static inline void target_sigaddset(target_sigset_t *set, int signum)
100 {
101     signum--;
102     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
103     set->sig[signum / TARGET_NSIG_BPW] |= mask;
104 }
105 
106 static inline int target_sigismember(const target_sigset_t *set, int signum)
107 {
108     signum--;
109     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
110     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
111 }
112 
113 void host_to_target_sigset_internal(target_sigset_t *d,
114                                     const sigset_t *s)
115 {
116     int i;
117     target_sigemptyset(d);
118     for (i = 1; i <= TARGET_NSIG; i++) {
119         if (sigismember(s, i)) {
120             target_sigaddset(d, host_to_target_signal(i));
121         }
122     }
123 }
124 
125 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
126 {
127     target_sigset_t d1;
128     int i;
129 
130     host_to_target_sigset_internal(&d1, s);
131     for(i = 0;i < TARGET_NSIG_WORDS; i++)
132         d->sig[i] = tswapal(d1.sig[i]);
133 }
134 
135 void target_to_host_sigset_internal(sigset_t *d,
136                                     const target_sigset_t *s)
137 {
138     int i;
139     sigemptyset(d);
140     for (i = 1; i <= TARGET_NSIG; i++) {
141         if (target_sigismember(s, i)) {
142             sigaddset(d, target_to_host_signal(i));
143         }
144     }
145 }
146 
147 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
148 {
149     target_sigset_t s1;
150     int i;
151 
152     for(i = 0;i < TARGET_NSIG_WORDS; i++)
153         s1.sig[i] = tswapal(s->sig[i]);
154     target_to_host_sigset_internal(d, &s1);
155 }
156 
157 void host_to_target_old_sigset(abi_ulong *old_sigset,
158                                const sigset_t *sigset)
159 {
160     target_sigset_t d;
161     host_to_target_sigset(&d, sigset);
162     *old_sigset = d.sig[0];
163 }
164 
165 void target_to_host_old_sigset(sigset_t *sigset,
166                                const abi_ulong *old_sigset)
167 {
168     target_sigset_t d;
169     int i;
170 
171     d.sig[0] = *old_sigset;
172     for(i = 1;i < TARGET_NSIG_WORDS; i++)
173         d.sig[i] = 0;
174     target_to_host_sigset(sigset, &d);
175 }
176 
177 int block_signals(void)
178 {
179     TaskState *ts = (TaskState *)thread_cpu->opaque;
180     sigset_t set;
181 
182     /* It's OK to block everything including SIGSEGV, because we won't
183      * run any further guest code before unblocking signals in
184      * process_pending_signals().
185      */
186     sigfillset(&set);
187     sigprocmask(SIG_SETMASK, &set, 0);
188 
189     return atomic_xchg(&ts->signal_pending, 1);
190 }
191 
192 /* Wrapper for sigprocmask function
193  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
194  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
195  * a signal was already pending and the syscall must be restarted, or
196  * 0 on success.
197  * If set is NULL, this is guaranteed not to fail.
198  */
199 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
200 {
201     TaskState *ts = (TaskState *)thread_cpu->opaque;
202 
203     if (oldset) {
204         *oldset = ts->signal_mask;
205     }
206 
207     if (set) {
208         int i;
209 
210         if (block_signals()) {
211             return -TARGET_ERESTARTSYS;
212         }
213 
214         switch (how) {
215         case SIG_BLOCK:
216             sigorset(&ts->signal_mask, &ts->signal_mask, set);
217             break;
218         case SIG_UNBLOCK:
219             for (i = 1; i <= NSIG; ++i) {
220                 if (sigismember(set, i)) {
221                     sigdelset(&ts->signal_mask, i);
222                 }
223             }
224             break;
225         case SIG_SETMASK:
226             ts->signal_mask = *set;
227             break;
228         default:
229             g_assert_not_reached();
230         }
231 
232         /* Silently ignore attempts to change blocking status of KILL or STOP */
233         sigdelset(&ts->signal_mask, SIGKILL);
234         sigdelset(&ts->signal_mask, SIGSTOP);
235     }
236     return 0;
237 }
238 
239 #if !defined(TARGET_NIOS2)
240 /* Just set the guest's signal mask to the specified value; the
241  * caller is assumed to have called block_signals() already.
242  */
243 void set_sigmask(const sigset_t *set)
244 {
245     TaskState *ts = (TaskState *)thread_cpu->opaque;
246 
247     ts->signal_mask = *set;
248 }
249 #endif
250 
251 /* sigaltstack management */
252 
253 int on_sig_stack(unsigned long sp)
254 {
255     return (sp - target_sigaltstack_used.ss_sp
256             < target_sigaltstack_used.ss_size);
257 }
258 
259 int sas_ss_flags(unsigned long sp)
260 {
261     return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
262             : on_sig_stack(sp) ? SS_ONSTACK : 0);
263 }
264 
265 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
266 {
267     /*
268      * This is the X/Open sanctioned signal stack switching.
269      */
270     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
271         return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
272     }
273     return sp;
274 }
275 
276 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
277 {
278     __put_user(target_sigaltstack_used.ss_sp, &uss->ss_sp);
279     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
280     __put_user(target_sigaltstack_used.ss_size, &uss->ss_size);
281 }
282 
283 /* siginfo conversion */
284 
285 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
286                                                  const siginfo_t *info)
287 {
288     int sig = host_to_target_signal(info->si_signo);
289     int si_code = info->si_code;
290     int si_type;
291     tinfo->si_signo = sig;
292     tinfo->si_errno = 0;
293     tinfo->si_code = info->si_code;
294 
295     /* This memset serves two purposes:
296      * (1) ensure we don't leak random junk to the guest later
297      * (2) placate false positives from gcc about fields
298      *     being used uninitialized if it chooses to inline both this
299      *     function and tswap_siginfo() into host_to_target_siginfo().
300      */
301     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
302 
303     /* This is awkward, because we have to use a combination of
304      * the si_code and si_signo to figure out which of the union's
305      * members are valid. (Within the host kernel it is always possible
306      * to tell, but the kernel carefully avoids giving userspace the
307      * high 16 bits of si_code, so we don't have the information to
308      * do this the easy way...) We therefore make our best guess,
309      * bearing in mind that a guest can spoof most of the si_codes
310      * via rt_sigqueueinfo() if it likes.
311      *
312      * Once we have made our guess, we record it in the top 16 bits of
313      * the si_code, so that tswap_siginfo() later can use it.
314      * tswap_siginfo() will strip these top bits out before writing
315      * si_code to the guest (sign-extending the lower bits).
316      */
317 
318     switch (si_code) {
319     case SI_USER:
320     case SI_TKILL:
321     case SI_KERNEL:
322         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
323          * These are the only unspoofable si_code values.
324          */
325         tinfo->_sifields._kill._pid = info->si_pid;
326         tinfo->_sifields._kill._uid = info->si_uid;
327         si_type = QEMU_SI_KILL;
328         break;
329     default:
330         /* Everything else is spoofable. Make best guess based on signal */
331         switch (sig) {
332         case TARGET_SIGCHLD:
333             tinfo->_sifields._sigchld._pid = info->si_pid;
334             tinfo->_sifields._sigchld._uid = info->si_uid;
335             tinfo->_sifields._sigchld._status
336                 = host_to_target_waitstatus(info->si_status);
337             tinfo->_sifields._sigchld._utime = info->si_utime;
338             tinfo->_sifields._sigchld._stime = info->si_stime;
339             si_type = QEMU_SI_CHLD;
340             break;
341         case TARGET_SIGIO:
342             tinfo->_sifields._sigpoll._band = info->si_band;
343             tinfo->_sifields._sigpoll._fd = info->si_fd;
344             si_type = QEMU_SI_POLL;
345             break;
346         default:
347             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
348             tinfo->_sifields._rt._pid = info->si_pid;
349             tinfo->_sifields._rt._uid = info->si_uid;
350             /* XXX: potential problem if 64 bit */
351             tinfo->_sifields._rt._sigval.sival_ptr
352                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
353             si_type = QEMU_SI_RT;
354             break;
355         }
356         break;
357     }
358 
359     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
360 }
361 
362 void tswap_siginfo(target_siginfo_t *tinfo,
363                    const target_siginfo_t *info)
364 {
365     int si_type = extract32(info->si_code, 16, 16);
366     int si_code = sextract32(info->si_code, 0, 16);
367 
368     __put_user(info->si_signo, &tinfo->si_signo);
369     __put_user(info->si_errno, &tinfo->si_errno);
370     __put_user(si_code, &tinfo->si_code);
371 
372     /* We can use our internal marker of which fields in the structure
373      * are valid, rather than duplicating the guesswork of
374      * host_to_target_siginfo_noswap() here.
375      */
376     switch (si_type) {
377     case QEMU_SI_KILL:
378         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
379         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
380         break;
381     case QEMU_SI_TIMER:
382         __put_user(info->_sifields._timer._timer1,
383                    &tinfo->_sifields._timer._timer1);
384         __put_user(info->_sifields._timer._timer2,
385                    &tinfo->_sifields._timer._timer2);
386         break;
387     case QEMU_SI_POLL:
388         __put_user(info->_sifields._sigpoll._band,
389                    &tinfo->_sifields._sigpoll._band);
390         __put_user(info->_sifields._sigpoll._fd,
391                    &tinfo->_sifields._sigpoll._fd);
392         break;
393     case QEMU_SI_FAULT:
394         __put_user(info->_sifields._sigfault._addr,
395                    &tinfo->_sifields._sigfault._addr);
396         break;
397     case QEMU_SI_CHLD:
398         __put_user(info->_sifields._sigchld._pid,
399                    &tinfo->_sifields._sigchld._pid);
400         __put_user(info->_sifields._sigchld._uid,
401                    &tinfo->_sifields._sigchld._uid);
402         __put_user(info->_sifields._sigchld._status,
403                    &tinfo->_sifields._sigchld._status);
404         __put_user(info->_sifields._sigchld._utime,
405                    &tinfo->_sifields._sigchld._utime);
406         __put_user(info->_sifields._sigchld._stime,
407                    &tinfo->_sifields._sigchld._stime);
408         break;
409     case QEMU_SI_RT:
410         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
411         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
412         __put_user(info->_sifields._rt._sigval.sival_ptr,
413                    &tinfo->_sifields._rt._sigval.sival_ptr);
414         break;
415     default:
416         g_assert_not_reached();
417     }
418 }
419 
420 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
421 {
422     target_siginfo_t tgt_tmp;
423     host_to_target_siginfo_noswap(&tgt_tmp, info);
424     tswap_siginfo(tinfo, &tgt_tmp);
425 }
426 
427 /* XXX: we support only POSIX RT signals are used. */
428 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
429 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
430 {
431     /* This conversion is used only for the rt_sigqueueinfo syscall,
432      * and so we know that the _rt fields are the valid ones.
433      */
434     abi_ulong sival_ptr;
435 
436     __get_user(info->si_signo, &tinfo->si_signo);
437     __get_user(info->si_errno, &tinfo->si_errno);
438     __get_user(info->si_code, &tinfo->si_code);
439     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
440     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
441     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
442     info->si_value.sival_ptr = (void *)(long)sival_ptr;
443 }
444 
445 static int fatal_signal (int sig)
446 {
447     switch (sig) {
448     case TARGET_SIGCHLD:
449     case TARGET_SIGURG:
450     case TARGET_SIGWINCH:
451         /* Ignored by default.  */
452         return 0;
453     case TARGET_SIGCONT:
454     case TARGET_SIGSTOP:
455     case TARGET_SIGTSTP:
456     case TARGET_SIGTTIN:
457     case TARGET_SIGTTOU:
458         /* Job control signals.  */
459         return 0;
460     default:
461         return 1;
462     }
463 }
464 
465 /* returns 1 if given signal should dump core if not handled */
466 static int core_dump_signal(int sig)
467 {
468     switch (sig) {
469     case TARGET_SIGABRT:
470     case TARGET_SIGFPE:
471     case TARGET_SIGILL:
472     case TARGET_SIGQUIT:
473     case TARGET_SIGSEGV:
474     case TARGET_SIGTRAP:
475     case TARGET_SIGBUS:
476         return (1);
477     default:
478         return (0);
479     }
480 }
481 
482 void signal_init(void)
483 {
484     TaskState *ts = (TaskState *)thread_cpu->opaque;
485     struct sigaction act;
486     struct sigaction oact;
487     int i, j;
488     int host_sig;
489 
490     /* generate signal conversion tables */
491     for(i = 1; i < _NSIG; i++) {
492         if (host_to_target_signal_table[i] == 0)
493             host_to_target_signal_table[i] = i;
494     }
495     for(i = 1; i < _NSIG; i++) {
496         j = host_to_target_signal_table[i];
497         target_to_host_signal_table[j] = i;
498     }
499 
500     /* Set the signal mask from the host mask. */
501     sigprocmask(0, 0, &ts->signal_mask);
502 
503     /* set all host signal handlers. ALL signals are blocked during
504        the handlers to serialize them. */
505     memset(sigact_table, 0, sizeof(sigact_table));
506 
507     sigfillset(&act.sa_mask);
508     act.sa_flags = SA_SIGINFO;
509     act.sa_sigaction = host_signal_handler;
510     for(i = 1; i <= TARGET_NSIG; i++) {
511 #ifdef TARGET_GPROF
512         if (i == SIGPROF) {
513             continue;
514         }
515 #endif
516         host_sig = target_to_host_signal(i);
517         sigaction(host_sig, NULL, &oact);
518         if (oact.sa_sigaction == (void *)SIG_IGN) {
519             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
520         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
521             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
522         }
523         /* If there's already a handler installed then something has
524            gone horribly wrong, so don't even try to handle that case.  */
525         /* Install some handlers for our own use.  We need at least
526            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
527            trap all signals because it affects syscall interrupt
528            behavior.  But do trap all default-fatal signals.  */
529         if (fatal_signal (i))
530             sigaction(host_sig, &act, NULL);
531     }
532 }
533 
534 /* Force a synchronously taken signal. The kernel force_sig() function
535  * also forces the signal to "not blocked, not ignored", but for QEMU
536  * that work is done in process_pending_signals().
537  */
538 void force_sig(int sig)
539 {
540     CPUState *cpu = thread_cpu;
541     CPUArchState *env = cpu->env_ptr;
542     target_siginfo_t info;
543 
544     info.si_signo = sig;
545     info.si_errno = 0;
546     info.si_code = TARGET_SI_KERNEL;
547     info._sifields._kill._pid = 0;
548     info._sifields._kill._uid = 0;
549     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
550 }
551 
552 /* Force a SIGSEGV if we couldn't write to memory trying to set
553  * up the signal frame. oldsig is the signal we were trying to handle
554  * at the point of failure.
555  */
556 #if !defined(TARGET_RISCV)
557 void force_sigsegv(int oldsig)
558 {
559     if (oldsig == SIGSEGV) {
560         /* Make sure we don't try to deliver the signal again; this will
561          * end up with handle_pending_signal() calling dump_core_and_abort().
562          */
563         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
564     }
565     force_sig(TARGET_SIGSEGV);
566 }
567 
568 #endif
569 
570 /* abort execution with signal */
571 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
572 {
573     CPUState *cpu = thread_cpu;
574     CPUArchState *env = cpu->env_ptr;
575     TaskState *ts = (TaskState *)cpu->opaque;
576     int host_sig, core_dumped = 0;
577     struct sigaction act;
578 
579     host_sig = target_to_host_signal(target_sig);
580     trace_user_force_sig(env, target_sig, host_sig);
581     gdb_signalled(env, target_sig);
582 
583     /* dump core if supported by target binary format */
584     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
585         stop_all_tasks();
586         core_dumped =
587             ((*ts->bprm->core_dump)(target_sig, env) == 0);
588     }
589     if (core_dumped) {
590         /* we already dumped the core of target process, we don't want
591          * a coredump of qemu itself */
592         struct rlimit nodump;
593         getrlimit(RLIMIT_CORE, &nodump);
594         nodump.rlim_cur=0;
595         setrlimit(RLIMIT_CORE, &nodump);
596         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
597             target_sig, strsignal(host_sig), "core dumped" );
598     }
599 
600     /* The proper exit code for dying from an uncaught signal is
601      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
602      * a negative value.  To get the proper exit code we need to
603      * actually die from an uncaught signal.  Here the default signal
604      * handler is installed, we send ourself a signal and we wait for
605      * it to arrive. */
606     sigfillset(&act.sa_mask);
607     act.sa_handler = SIG_DFL;
608     act.sa_flags = 0;
609     sigaction(host_sig, &act, NULL);
610 
611     /* For some reason raise(host_sig) doesn't send the signal when
612      * statically linked on x86-64. */
613     kill(getpid(), host_sig);
614 
615     /* Make sure the signal isn't masked (just reuse the mask inside
616     of act) */
617     sigdelset(&act.sa_mask, host_sig);
618     sigsuspend(&act.sa_mask);
619 
620     /* unreachable */
621     abort();
622 }
623 
624 /* queue a signal so that it will be send to the virtual CPU as soon
625    as possible */
626 int queue_signal(CPUArchState *env, int sig, int si_type,
627                  target_siginfo_t *info)
628 {
629     CPUState *cpu = ENV_GET_CPU(env);
630     TaskState *ts = cpu->opaque;
631 
632     trace_user_queue_signal(env, sig);
633 
634     info->si_code = deposit32(info->si_code, 16, 16, si_type);
635 
636     ts->sync_signal.info = *info;
637     ts->sync_signal.pending = sig;
638     /* signal that a new signal is pending */
639     atomic_set(&ts->signal_pending, 1);
640     return 1; /* indicates that the signal was queued */
641 }
642 
643 #ifndef HAVE_SAFE_SYSCALL
644 static inline void rewind_if_in_safe_syscall(void *puc)
645 {
646     /* Default version: never rewind */
647 }
648 #endif
649 
650 static void host_signal_handler(int host_signum, siginfo_t *info,
651                                 void *puc)
652 {
653     CPUArchState *env = thread_cpu->env_ptr;
654     CPUState *cpu = ENV_GET_CPU(env);
655     TaskState *ts = cpu->opaque;
656 
657     int sig;
658     target_siginfo_t tinfo;
659     ucontext_t *uc = puc;
660     struct emulated_sigtable *k;
661 
662     /* the CPU emulator uses some host signals to detect exceptions,
663        we forward to it some signals */
664     if ((host_signum == SIGSEGV || host_signum == SIGBUS)
665         && info->si_code > 0) {
666         if (cpu_signal_handler(host_signum, info, puc))
667             return;
668     }
669 
670     /* get target signal number */
671     sig = host_to_target_signal(host_signum);
672     if (sig < 1 || sig > TARGET_NSIG)
673         return;
674     trace_user_host_signal(env, host_signum, sig);
675 
676     rewind_if_in_safe_syscall(puc);
677 
678     host_to_target_siginfo_noswap(&tinfo, info);
679     k = &ts->sigtab[sig - 1];
680     k->info = tinfo;
681     k->pending = sig;
682     ts->signal_pending = 1;
683 
684     /* Block host signals until target signal handler entered. We
685      * can't block SIGSEGV or SIGBUS while we're executing guest
686      * code in case the guest code provokes one in the window between
687      * now and it getting out to the main loop. Signals will be
688      * unblocked again in process_pending_signals().
689      *
690      * WARNING: we cannot use sigfillset() here because the uc_sigmask
691      * field is a kernel sigset_t, which is much smaller than the
692      * libc sigset_t which sigfillset() operates on. Using sigfillset()
693      * would write 0xff bytes off the end of the structure and trash
694      * data on the struct.
695      * We can't use sizeof(uc->uc_sigmask) either, because the libc
696      * headers define the struct field with the wrong (too large) type.
697      */
698     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
699     sigdelset(&uc->uc_sigmask, SIGSEGV);
700     sigdelset(&uc->uc_sigmask, SIGBUS);
701 
702     /* interrupt the virtual CPU as soon as possible */
703     cpu_exit(thread_cpu);
704 }
705 
706 /* do_sigaltstack() returns target values and errnos. */
707 /* compare linux/kernel/signal.c:do_sigaltstack() */
708 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
709 {
710     int ret;
711     struct target_sigaltstack oss;
712 
713     /* XXX: test errors */
714     if(uoss_addr)
715     {
716         __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
717         __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
718         __put_user(sas_ss_flags(sp), &oss.ss_flags);
719     }
720 
721     if(uss_addr)
722     {
723         struct target_sigaltstack *uss;
724         struct target_sigaltstack ss;
725         size_t minstacksize = TARGET_MINSIGSTKSZ;
726 
727 #if defined(TARGET_PPC64)
728         /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
729         struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
730         if (get_ppc64_abi(image) > 1) {
731             minstacksize = 4096;
732         }
733 #endif
734 
735         ret = -TARGET_EFAULT;
736         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
737             goto out;
738         }
739         __get_user(ss.ss_sp, &uss->ss_sp);
740         __get_user(ss.ss_size, &uss->ss_size);
741         __get_user(ss.ss_flags, &uss->ss_flags);
742         unlock_user_struct(uss, uss_addr, 0);
743 
744         ret = -TARGET_EPERM;
745         if (on_sig_stack(sp))
746             goto out;
747 
748         ret = -TARGET_EINVAL;
749         if (ss.ss_flags != TARGET_SS_DISABLE
750             && ss.ss_flags != TARGET_SS_ONSTACK
751             && ss.ss_flags != 0)
752             goto out;
753 
754         if (ss.ss_flags == TARGET_SS_DISABLE) {
755             ss.ss_size = 0;
756             ss.ss_sp = 0;
757         } else {
758             ret = -TARGET_ENOMEM;
759             if (ss.ss_size < minstacksize) {
760                 goto out;
761             }
762         }
763 
764         target_sigaltstack_used.ss_sp = ss.ss_sp;
765         target_sigaltstack_used.ss_size = ss.ss_size;
766     }
767 
768     if (uoss_addr) {
769         ret = -TARGET_EFAULT;
770         if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
771             goto out;
772     }
773 
774     ret = 0;
775 out:
776     return ret;
777 }
778 
779 /* do_sigaction() return target values and host errnos */
780 int do_sigaction(int sig, const struct target_sigaction *act,
781                  struct target_sigaction *oact)
782 {
783     struct target_sigaction *k;
784     struct sigaction act1;
785     int host_sig;
786     int ret = 0;
787 
788     if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
789         return -TARGET_EINVAL;
790     }
791 
792     if (block_signals()) {
793         return -TARGET_ERESTARTSYS;
794     }
795 
796     k = &sigact_table[sig - 1];
797     if (oact) {
798         __put_user(k->_sa_handler, &oact->_sa_handler);
799         __put_user(k->sa_flags, &oact->sa_flags);
800 #ifdef TARGET_ARCH_HAS_SA_RESTORER
801         __put_user(k->sa_restorer, &oact->sa_restorer);
802 #endif
803         /* Not swapped.  */
804         oact->sa_mask = k->sa_mask;
805     }
806     if (act) {
807         /* FIXME: This is not threadsafe.  */
808         __get_user(k->_sa_handler, &act->_sa_handler);
809         __get_user(k->sa_flags, &act->sa_flags);
810 #ifdef TARGET_ARCH_HAS_SA_RESTORER
811         __get_user(k->sa_restorer, &act->sa_restorer);
812 #endif
813         /* To be swapped in target_to_host_sigset.  */
814         k->sa_mask = act->sa_mask;
815 
816         /* we update the host linux signal state */
817         host_sig = target_to_host_signal(sig);
818         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
819             sigfillset(&act1.sa_mask);
820             act1.sa_flags = SA_SIGINFO;
821             if (k->sa_flags & TARGET_SA_RESTART)
822                 act1.sa_flags |= SA_RESTART;
823             /* NOTE: it is important to update the host kernel signal
824                ignore state to avoid getting unexpected interrupted
825                syscalls */
826             if (k->_sa_handler == TARGET_SIG_IGN) {
827                 act1.sa_sigaction = (void *)SIG_IGN;
828             } else if (k->_sa_handler == TARGET_SIG_DFL) {
829                 if (fatal_signal (sig))
830                     act1.sa_sigaction = host_signal_handler;
831                 else
832                     act1.sa_sigaction = (void *)SIG_DFL;
833             } else {
834                 act1.sa_sigaction = host_signal_handler;
835             }
836             ret = sigaction(host_sig, &act1, NULL);
837         }
838     }
839     return ret;
840 }
841 
842 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
843                                   struct emulated_sigtable *k)
844 {
845     CPUState *cpu = ENV_GET_CPU(cpu_env);
846     abi_ulong handler;
847     sigset_t set;
848     target_sigset_t target_old_set;
849     struct target_sigaction *sa;
850     TaskState *ts = cpu->opaque;
851 
852     trace_user_handle_signal(cpu_env, sig);
853     /* dequeue signal */
854     k->pending = 0;
855 
856     sig = gdb_handlesig(cpu, sig);
857     if (!sig) {
858         sa = NULL;
859         handler = TARGET_SIG_IGN;
860     } else {
861         sa = &sigact_table[sig - 1];
862         handler = sa->_sa_handler;
863     }
864 
865     if (do_strace) {
866         print_taken_signal(sig, &k->info);
867     }
868 
869     if (handler == TARGET_SIG_DFL) {
870         /* default handler : ignore some signal. The other are job control or fatal */
871         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
872             kill(getpid(),SIGSTOP);
873         } else if (sig != TARGET_SIGCHLD &&
874                    sig != TARGET_SIGURG &&
875                    sig != TARGET_SIGWINCH &&
876                    sig != TARGET_SIGCONT) {
877             dump_core_and_abort(sig);
878         }
879     } else if (handler == TARGET_SIG_IGN) {
880         /* ignore sig */
881     } else if (handler == TARGET_SIG_ERR) {
882         dump_core_and_abort(sig);
883     } else {
884         /* compute the blocked signals during the handler execution */
885         sigset_t *blocked_set;
886 
887         target_to_host_sigset(&set, &sa->sa_mask);
888         /* SA_NODEFER indicates that the current signal should not be
889            blocked during the handler */
890         if (!(sa->sa_flags & TARGET_SA_NODEFER))
891             sigaddset(&set, target_to_host_signal(sig));
892 
893         /* save the previous blocked signal state to restore it at the
894            end of the signal execution (see do_sigreturn) */
895         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
896 
897         /* block signals in the handler */
898         blocked_set = ts->in_sigsuspend ?
899             &ts->sigsuspend_mask : &ts->signal_mask;
900         sigorset(&ts->signal_mask, blocked_set, &set);
901         ts->in_sigsuspend = 0;
902 
903         /* if the CPU is in VM86 mode, we restore the 32 bit values */
904 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
905         {
906             CPUX86State *env = cpu_env;
907             if (env->eflags & VM_MASK)
908                 save_v86_state(env);
909         }
910 #endif
911         /* prepare the stack frame of the virtual CPU */
912 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
913         if (sa->sa_flags & TARGET_SA_SIGINFO) {
914             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
915         } else {
916             setup_frame(sig, sa, &target_old_set, cpu_env);
917         }
918 #else
919         /* These targets do not have traditional signals.  */
920         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
921 #endif
922         if (sa->sa_flags & TARGET_SA_RESETHAND) {
923             sa->_sa_handler = TARGET_SIG_DFL;
924         }
925     }
926 }
927 
928 void process_pending_signals(CPUArchState *cpu_env)
929 {
930     CPUState *cpu = ENV_GET_CPU(cpu_env);
931     int sig;
932     TaskState *ts = cpu->opaque;
933     sigset_t set;
934     sigset_t *blocked_set;
935 
936     while (atomic_read(&ts->signal_pending)) {
937         /* FIXME: This is not threadsafe.  */
938         sigfillset(&set);
939         sigprocmask(SIG_SETMASK, &set, 0);
940 
941     restart_scan:
942         sig = ts->sync_signal.pending;
943         if (sig) {
944             /* Synchronous signals are forced,
945              * see force_sig_info() and callers in Linux
946              * Note that not all of our queue_signal() calls in QEMU correspond
947              * to force_sig_info() calls in Linux (some are send_sig_info()).
948              * However it seems like a kernel bug to me to allow the process
949              * to block a synchronous signal since it could then just end up
950              * looping round and round indefinitely.
951              */
952             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
953                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
954                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
955                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
956             }
957 
958             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
959         }
960 
961         for (sig = 1; sig <= TARGET_NSIG; sig++) {
962             blocked_set = ts->in_sigsuspend ?
963                 &ts->sigsuspend_mask : &ts->signal_mask;
964 
965             if (ts->sigtab[sig - 1].pending &&
966                 (!sigismember(blocked_set,
967                               target_to_host_signal_table[sig]))) {
968                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
969                 /* Restart scan from the beginning, as handle_pending_signal
970                  * might have resulted in a new synchronous signal (eg SIGSEGV).
971                  */
972                 goto restart_scan;
973             }
974         }
975 
976         /* if no signal is pending, unblock signals and recheck (the act
977          * of unblocking might cause us to take another host signal which
978          * will set signal_pending again).
979          */
980         atomic_set(&ts->signal_pending, 0);
981         ts->in_sigsuspend = 0;
982         set = ts->signal_mask;
983         sigdelset(&set, SIGSEGV);
984         sigdelset(&set, SIGBUS);
985         sigprocmask(SIG_SETMASK, &set, 0);
986     }
987     ts->in_sigsuspend = 0;
988 }
989