xref: /openbmc/qemu/linux-user/signal.c (revision a322714248b9e8dffe6a2bb379ffd5d59b394bb7)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23 
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28 #include "signal-common.h"
29 
30 struct target_sigaltstack target_sigaltstack_used = {
31     .ss_sp = 0,
32     .ss_size = 0,
33     .ss_flags = TARGET_SS_DISABLE,
34 };
35 
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37 
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39                                 void *puc);
40 
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42     [SIGHUP] = TARGET_SIGHUP,
43     [SIGINT] = TARGET_SIGINT,
44     [SIGQUIT] = TARGET_SIGQUIT,
45     [SIGILL] = TARGET_SIGILL,
46     [SIGTRAP] = TARGET_SIGTRAP,
47     [SIGABRT] = TARGET_SIGABRT,
48 /*    [SIGIOT] = TARGET_SIGIOT,*/
49     [SIGBUS] = TARGET_SIGBUS,
50     [SIGFPE] = TARGET_SIGFPE,
51     [SIGKILL] = TARGET_SIGKILL,
52     [SIGUSR1] = TARGET_SIGUSR1,
53     [SIGSEGV] = TARGET_SIGSEGV,
54     [SIGUSR2] = TARGET_SIGUSR2,
55     [SIGPIPE] = TARGET_SIGPIPE,
56     [SIGALRM] = TARGET_SIGALRM,
57     [SIGTERM] = TARGET_SIGTERM,
58 #ifdef SIGSTKFLT
59     [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 #endif
61     [SIGCHLD] = TARGET_SIGCHLD,
62     [SIGCONT] = TARGET_SIGCONT,
63     [SIGSTOP] = TARGET_SIGSTOP,
64     [SIGTSTP] = TARGET_SIGTSTP,
65     [SIGTTIN] = TARGET_SIGTTIN,
66     [SIGTTOU] = TARGET_SIGTTOU,
67     [SIGURG] = TARGET_SIGURG,
68     [SIGXCPU] = TARGET_SIGXCPU,
69     [SIGXFSZ] = TARGET_SIGXFSZ,
70     [SIGVTALRM] = TARGET_SIGVTALRM,
71     [SIGPROF] = TARGET_SIGPROF,
72     [SIGWINCH] = TARGET_SIGWINCH,
73     [SIGIO] = TARGET_SIGIO,
74     [SIGPWR] = TARGET_SIGPWR,
75     [SIGSYS] = TARGET_SIGSYS,
76     /* next signals stay the same */
77     /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78        host libpthread signals.  This assumes no one actually uses SIGRTMAX :-/
79        To fix this properly we need to do manual signal delivery multiplexed
80        over a single host signal.  */
81     [__SIGRTMIN] = __SIGRTMAX,
82     [__SIGRTMAX] = __SIGRTMIN,
83 };
84 static uint8_t target_to_host_signal_table[_NSIG];
85 
86 int host_to_target_signal(int sig)
87 {
88     if (sig < 0 || sig >= _NSIG)
89         return sig;
90     return host_to_target_signal_table[sig];
91 }
92 
93 int target_to_host_signal(int sig)
94 {
95     if (sig < 0 || sig >= _NSIG)
96         return sig;
97     return target_to_host_signal_table[sig];
98 }
99 
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
101 {
102     signum--;
103     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104     set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 }
106 
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
108 {
109     signum--;
110     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 }
113 
114 void host_to_target_sigset_internal(target_sigset_t *d,
115                                     const sigset_t *s)
116 {
117     int i;
118     target_sigemptyset(d);
119     for (i = 1; i <= TARGET_NSIG; i++) {
120         if (sigismember(s, i)) {
121             target_sigaddset(d, host_to_target_signal(i));
122         }
123     }
124 }
125 
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
127 {
128     target_sigset_t d1;
129     int i;
130 
131     host_to_target_sigset_internal(&d1, s);
132     for(i = 0;i < TARGET_NSIG_WORDS; i++)
133         d->sig[i] = tswapal(d1.sig[i]);
134 }
135 
136 void target_to_host_sigset_internal(sigset_t *d,
137                                     const target_sigset_t *s)
138 {
139     int i;
140     sigemptyset(d);
141     for (i = 1; i <= TARGET_NSIG; i++) {
142         if (target_sigismember(s, i)) {
143             sigaddset(d, target_to_host_signal(i));
144         }
145     }
146 }
147 
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149 {
150     target_sigset_t s1;
151     int i;
152 
153     for(i = 0;i < TARGET_NSIG_WORDS; i++)
154         s1.sig[i] = tswapal(s->sig[i]);
155     target_to_host_sigset_internal(d, &s1);
156 }
157 
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159                                const sigset_t *sigset)
160 {
161     target_sigset_t d;
162     host_to_target_sigset(&d, sigset);
163     *old_sigset = d.sig[0];
164 }
165 
166 void target_to_host_old_sigset(sigset_t *sigset,
167                                const abi_ulong *old_sigset)
168 {
169     target_sigset_t d;
170     int i;
171 
172     d.sig[0] = *old_sigset;
173     for(i = 1;i < TARGET_NSIG_WORDS; i++)
174         d.sig[i] = 0;
175     target_to_host_sigset(sigset, &d);
176 }
177 
178 int block_signals(void)
179 {
180     TaskState *ts = (TaskState *)thread_cpu->opaque;
181     sigset_t set;
182 
183     /* It's OK to block everything including SIGSEGV, because we won't
184      * run any further guest code before unblocking signals in
185      * process_pending_signals().
186      */
187     sigfillset(&set);
188     sigprocmask(SIG_SETMASK, &set, 0);
189 
190     return atomic_xchg(&ts->signal_pending, 1);
191 }
192 
193 /* Wrapper for sigprocmask function
194  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196  * a signal was already pending and the syscall must be restarted, or
197  * 0 on success.
198  * If set is NULL, this is guaranteed not to fail.
199  */
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201 {
202     TaskState *ts = (TaskState *)thread_cpu->opaque;
203 
204     if (oldset) {
205         *oldset = ts->signal_mask;
206     }
207 
208     if (set) {
209         int i;
210 
211         if (block_signals()) {
212             return -TARGET_ERESTARTSYS;
213         }
214 
215         switch (how) {
216         case SIG_BLOCK:
217             sigorset(&ts->signal_mask, &ts->signal_mask, set);
218             break;
219         case SIG_UNBLOCK:
220             for (i = 1; i <= NSIG; ++i) {
221                 if (sigismember(set, i)) {
222                     sigdelset(&ts->signal_mask, i);
223                 }
224             }
225             break;
226         case SIG_SETMASK:
227             ts->signal_mask = *set;
228             break;
229         default:
230             g_assert_not_reached();
231         }
232 
233         /* Silently ignore attempts to change blocking status of KILL or STOP */
234         sigdelset(&ts->signal_mask, SIGKILL);
235         sigdelset(&ts->signal_mask, SIGSTOP);
236     }
237     return 0;
238 }
239 
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242  * caller is assumed to have called block_signals() already.
243  */
244 void set_sigmask(const sigset_t *set)
245 {
246     TaskState *ts = (TaskState *)thread_cpu->opaque;
247 
248     ts->signal_mask = *set;
249 }
250 #endif
251 
252 /* sigaltstack management */
253 
254 int on_sig_stack(unsigned long sp)
255 {
256     return (sp - target_sigaltstack_used.ss_sp
257             < target_sigaltstack_used.ss_size);
258 }
259 
260 int sas_ss_flags(unsigned long sp)
261 {
262     return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
263             : on_sig_stack(sp) ? SS_ONSTACK : 0);
264 }
265 
266 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
267 {
268     /*
269      * This is the X/Open sanctioned signal stack switching.
270      */
271     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
272         return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
273     }
274     return sp;
275 }
276 
277 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
278 {
279     __put_user(target_sigaltstack_used.ss_sp, &uss->ss_sp);
280     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
281     __put_user(target_sigaltstack_used.ss_size, &uss->ss_size);
282 }
283 
284 /* siginfo conversion */
285 
286 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
287                                                  const siginfo_t *info)
288 {
289     int sig = host_to_target_signal(info->si_signo);
290     int si_code = info->si_code;
291     int si_type;
292     tinfo->si_signo = sig;
293     tinfo->si_errno = 0;
294     tinfo->si_code = info->si_code;
295 
296     /* This memset serves two purposes:
297      * (1) ensure we don't leak random junk to the guest later
298      * (2) placate false positives from gcc about fields
299      *     being used uninitialized if it chooses to inline both this
300      *     function and tswap_siginfo() into host_to_target_siginfo().
301      */
302     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
303 
304     /* This is awkward, because we have to use a combination of
305      * the si_code and si_signo to figure out which of the union's
306      * members are valid. (Within the host kernel it is always possible
307      * to tell, but the kernel carefully avoids giving userspace the
308      * high 16 bits of si_code, so we don't have the information to
309      * do this the easy way...) We therefore make our best guess,
310      * bearing in mind that a guest can spoof most of the si_codes
311      * via rt_sigqueueinfo() if it likes.
312      *
313      * Once we have made our guess, we record it in the top 16 bits of
314      * the si_code, so that tswap_siginfo() later can use it.
315      * tswap_siginfo() will strip these top bits out before writing
316      * si_code to the guest (sign-extending the lower bits).
317      */
318 
319     switch (si_code) {
320     case SI_USER:
321     case SI_TKILL:
322     case SI_KERNEL:
323         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
324          * These are the only unspoofable si_code values.
325          */
326         tinfo->_sifields._kill._pid = info->si_pid;
327         tinfo->_sifields._kill._uid = info->si_uid;
328         si_type = QEMU_SI_KILL;
329         break;
330     default:
331         /* Everything else is spoofable. Make best guess based on signal */
332         switch (sig) {
333         case TARGET_SIGCHLD:
334             tinfo->_sifields._sigchld._pid = info->si_pid;
335             tinfo->_sifields._sigchld._uid = info->si_uid;
336             tinfo->_sifields._sigchld._status
337                 = host_to_target_waitstatus(info->si_status);
338             tinfo->_sifields._sigchld._utime = info->si_utime;
339             tinfo->_sifields._sigchld._stime = info->si_stime;
340             si_type = QEMU_SI_CHLD;
341             break;
342         case TARGET_SIGIO:
343             tinfo->_sifields._sigpoll._band = info->si_band;
344             tinfo->_sifields._sigpoll._fd = info->si_fd;
345             si_type = QEMU_SI_POLL;
346             break;
347         default:
348             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
349             tinfo->_sifields._rt._pid = info->si_pid;
350             tinfo->_sifields._rt._uid = info->si_uid;
351             /* XXX: potential problem if 64 bit */
352             tinfo->_sifields._rt._sigval.sival_ptr
353                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
354             si_type = QEMU_SI_RT;
355             break;
356         }
357         break;
358     }
359 
360     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
361 }
362 
363 void tswap_siginfo(target_siginfo_t *tinfo,
364                    const target_siginfo_t *info)
365 {
366     int si_type = extract32(info->si_code, 16, 16);
367     int si_code = sextract32(info->si_code, 0, 16);
368 
369     __put_user(info->si_signo, &tinfo->si_signo);
370     __put_user(info->si_errno, &tinfo->si_errno);
371     __put_user(si_code, &tinfo->si_code);
372 
373     /* We can use our internal marker of which fields in the structure
374      * are valid, rather than duplicating the guesswork of
375      * host_to_target_siginfo_noswap() here.
376      */
377     switch (si_type) {
378     case QEMU_SI_KILL:
379         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
380         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
381         break;
382     case QEMU_SI_TIMER:
383         __put_user(info->_sifields._timer._timer1,
384                    &tinfo->_sifields._timer._timer1);
385         __put_user(info->_sifields._timer._timer2,
386                    &tinfo->_sifields._timer._timer2);
387         break;
388     case QEMU_SI_POLL:
389         __put_user(info->_sifields._sigpoll._band,
390                    &tinfo->_sifields._sigpoll._band);
391         __put_user(info->_sifields._sigpoll._fd,
392                    &tinfo->_sifields._sigpoll._fd);
393         break;
394     case QEMU_SI_FAULT:
395         __put_user(info->_sifields._sigfault._addr,
396                    &tinfo->_sifields._sigfault._addr);
397         break;
398     case QEMU_SI_CHLD:
399         __put_user(info->_sifields._sigchld._pid,
400                    &tinfo->_sifields._sigchld._pid);
401         __put_user(info->_sifields._sigchld._uid,
402                    &tinfo->_sifields._sigchld._uid);
403         __put_user(info->_sifields._sigchld._status,
404                    &tinfo->_sifields._sigchld._status);
405         __put_user(info->_sifields._sigchld._utime,
406                    &tinfo->_sifields._sigchld._utime);
407         __put_user(info->_sifields._sigchld._stime,
408                    &tinfo->_sifields._sigchld._stime);
409         break;
410     case QEMU_SI_RT:
411         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
412         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
413         __put_user(info->_sifields._rt._sigval.sival_ptr,
414                    &tinfo->_sifields._rt._sigval.sival_ptr);
415         break;
416     default:
417         g_assert_not_reached();
418     }
419 }
420 
421 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
422 {
423     target_siginfo_t tgt_tmp;
424     host_to_target_siginfo_noswap(&tgt_tmp, info);
425     tswap_siginfo(tinfo, &tgt_tmp);
426 }
427 
428 /* XXX: we support only POSIX RT signals are used. */
429 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
430 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
431 {
432     /* This conversion is used only for the rt_sigqueueinfo syscall,
433      * and so we know that the _rt fields are the valid ones.
434      */
435     abi_ulong sival_ptr;
436 
437     __get_user(info->si_signo, &tinfo->si_signo);
438     __get_user(info->si_errno, &tinfo->si_errno);
439     __get_user(info->si_code, &tinfo->si_code);
440     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
441     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
442     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
443     info->si_value.sival_ptr = (void *)(long)sival_ptr;
444 }
445 
446 static int fatal_signal (int sig)
447 {
448     switch (sig) {
449     case TARGET_SIGCHLD:
450     case TARGET_SIGURG:
451     case TARGET_SIGWINCH:
452         /* Ignored by default.  */
453         return 0;
454     case TARGET_SIGCONT:
455     case TARGET_SIGSTOP:
456     case TARGET_SIGTSTP:
457     case TARGET_SIGTTIN:
458     case TARGET_SIGTTOU:
459         /* Job control signals.  */
460         return 0;
461     default:
462         return 1;
463     }
464 }
465 
466 /* returns 1 if given signal should dump core if not handled */
467 static int core_dump_signal(int sig)
468 {
469     switch (sig) {
470     case TARGET_SIGABRT:
471     case TARGET_SIGFPE:
472     case TARGET_SIGILL:
473     case TARGET_SIGQUIT:
474     case TARGET_SIGSEGV:
475     case TARGET_SIGTRAP:
476     case TARGET_SIGBUS:
477         return (1);
478     default:
479         return (0);
480     }
481 }
482 
483 void signal_init(void)
484 {
485     TaskState *ts = (TaskState *)thread_cpu->opaque;
486     struct sigaction act;
487     struct sigaction oact;
488     int i, j;
489     int host_sig;
490 
491     /* generate signal conversion tables */
492     for(i = 1; i < _NSIG; i++) {
493         if (host_to_target_signal_table[i] == 0)
494             host_to_target_signal_table[i] = i;
495     }
496     for(i = 1; i < _NSIG; i++) {
497         j = host_to_target_signal_table[i];
498         target_to_host_signal_table[j] = i;
499     }
500 
501     /* Set the signal mask from the host mask. */
502     sigprocmask(0, 0, &ts->signal_mask);
503 
504     /* set all host signal handlers. ALL signals are blocked during
505        the handlers to serialize them. */
506     memset(sigact_table, 0, sizeof(sigact_table));
507 
508     sigfillset(&act.sa_mask);
509     act.sa_flags = SA_SIGINFO;
510     act.sa_sigaction = host_signal_handler;
511     for(i = 1; i <= TARGET_NSIG; i++) {
512         host_sig = target_to_host_signal(i);
513         sigaction(host_sig, NULL, &oact);
514         if (oact.sa_sigaction == (void *)SIG_IGN) {
515             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
516         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
517             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
518         }
519         /* If there's already a handler installed then something has
520            gone horribly wrong, so don't even try to handle that case.  */
521         /* Install some handlers for our own use.  We need at least
522            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
523            trap all signals because it affects syscall interrupt
524            behavior.  But do trap all default-fatal signals.  */
525         if (fatal_signal (i))
526             sigaction(host_sig, &act, NULL);
527     }
528 }
529 
530 /* Force a synchronously taken signal. The kernel force_sig() function
531  * also forces the signal to "not blocked, not ignored", but for QEMU
532  * that work is done in process_pending_signals().
533  */
534 void force_sig(int sig)
535 {
536     CPUState *cpu = thread_cpu;
537     CPUArchState *env = cpu->env_ptr;
538     target_siginfo_t info;
539 
540     info.si_signo = sig;
541     info.si_errno = 0;
542     info.si_code = TARGET_SI_KERNEL;
543     info._sifields._kill._pid = 0;
544     info._sifields._kill._uid = 0;
545     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
546 }
547 
548 /* Force a SIGSEGV if we couldn't write to memory trying to set
549  * up the signal frame. oldsig is the signal we were trying to handle
550  * at the point of failure.
551  */
552 #if !defined(TARGET_RISCV)
553 void force_sigsegv(int oldsig)
554 {
555     if (oldsig == SIGSEGV) {
556         /* Make sure we don't try to deliver the signal again; this will
557          * end up with handle_pending_signal() calling dump_core_and_abort().
558          */
559         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
560     }
561     force_sig(TARGET_SIGSEGV);
562 }
563 
564 #endif
565 
566 /* abort execution with signal */
567 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
568 {
569     CPUState *cpu = thread_cpu;
570     CPUArchState *env = cpu->env_ptr;
571     TaskState *ts = (TaskState *)cpu->opaque;
572     int host_sig, core_dumped = 0;
573     struct sigaction act;
574 
575     host_sig = target_to_host_signal(target_sig);
576     trace_user_force_sig(env, target_sig, host_sig);
577     gdb_signalled(env, target_sig);
578 
579     /* dump core if supported by target binary format */
580     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
581         stop_all_tasks();
582         core_dumped =
583             ((*ts->bprm->core_dump)(target_sig, env) == 0);
584     }
585     if (core_dumped) {
586         /* we already dumped the core of target process, we don't want
587          * a coredump of qemu itself */
588         struct rlimit nodump;
589         getrlimit(RLIMIT_CORE, &nodump);
590         nodump.rlim_cur=0;
591         setrlimit(RLIMIT_CORE, &nodump);
592         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
593             target_sig, strsignal(host_sig), "core dumped" );
594     }
595 
596     /* The proper exit code for dying from an uncaught signal is
597      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
598      * a negative value.  To get the proper exit code we need to
599      * actually die from an uncaught signal.  Here the default signal
600      * handler is installed, we send ourself a signal and we wait for
601      * it to arrive. */
602     sigfillset(&act.sa_mask);
603     act.sa_handler = SIG_DFL;
604     act.sa_flags = 0;
605     sigaction(host_sig, &act, NULL);
606 
607     /* For some reason raise(host_sig) doesn't send the signal when
608      * statically linked on x86-64. */
609     kill(getpid(), host_sig);
610 
611     /* Make sure the signal isn't masked (just reuse the mask inside
612     of act) */
613     sigdelset(&act.sa_mask, host_sig);
614     sigsuspend(&act.sa_mask);
615 
616     /* unreachable */
617     abort();
618 }
619 
620 /* queue a signal so that it will be send to the virtual CPU as soon
621    as possible */
622 int queue_signal(CPUArchState *env, int sig, int si_type,
623                  target_siginfo_t *info)
624 {
625     CPUState *cpu = ENV_GET_CPU(env);
626     TaskState *ts = cpu->opaque;
627 
628     trace_user_queue_signal(env, sig);
629 
630     info->si_code = deposit32(info->si_code, 16, 16, si_type);
631 
632     ts->sync_signal.info = *info;
633     ts->sync_signal.pending = sig;
634     /* signal that a new signal is pending */
635     atomic_set(&ts->signal_pending, 1);
636     return 1; /* indicates that the signal was queued */
637 }
638 
639 #ifndef HAVE_SAFE_SYSCALL
640 static inline void rewind_if_in_safe_syscall(void *puc)
641 {
642     /* Default version: never rewind */
643 }
644 #endif
645 
646 static void host_signal_handler(int host_signum, siginfo_t *info,
647                                 void *puc)
648 {
649     CPUArchState *env = thread_cpu->env_ptr;
650     CPUState *cpu = ENV_GET_CPU(env);
651     TaskState *ts = cpu->opaque;
652 
653     int sig;
654     target_siginfo_t tinfo;
655     ucontext_t *uc = puc;
656     struct emulated_sigtable *k;
657 
658     /* the CPU emulator uses some host signals to detect exceptions,
659        we forward to it some signals */
660     if ((host_signum == SIGSEGV || host_signum == SIGBUS)
661         && info->si_code > 0) {
662         if (cpu_signal_handler(host_signum, info, puc))
663             return;
664     }
665 
666     /* get target signal number */
667     sig = host_to_target_signal(host_signum);
668     if (sig < 1 || sig > TARGET_NSIG)
669         return;
670     trace_user_host_signal(env, host_signum, sig);
671 
672     rewind_if_in_safe_syscall(puc);
673 
674     host_to_target_siginfo_noswap(&tinfo, info);
675     k = &ts->sigtab[sig - 1];
676     k->info = tinfo;
677     k->pending = sig;
678     ts->signal_pending = 1;
679 
680     /* Block host signals until target signal handler entered. We
681      * can't block SIGSEGV or SIGBUS while we're executing guest
682      * code in case the guest code provokes one in the window between
683      * now and it getting out to the main loop. Signals will be
684      * unblocked again in process_pending_signals().
685      *
686      * WARNING: we cannot use sigfillset() here because the uc_sigmask
687      * field is a kernel sigset_t, which is much smaller than the
688      * libc sigset_t which sigfillset() operates on. Using sigfillset()
689      * would write 0xff bytes off the end of the structure and trash
690      * data on the struct.
691      * We can't use sizeof(uc->uc_sigmask) either, because the libc
692      * headers define the struct field with the wrong (too large) type.
693      */
694     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
695     sigdelset(&uc->uc_sigmask, SIGSEGV);
696     sigdelset(&uc->uc_sigmask, SIGBUS);
697 
698     /* interrupt the virtual CPU as soon as possible */
699     cpu_exit(thread_cpu);
700 }
701 
702 /* do_sigaltstack() returns target values and errnos. */
703 /* compare linux/kernel/signal.c:do_sigaltstack() */
704 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
705 {
706     int ret;
707     struct target_sigaltstack oss;
708 
709     /* XXX: test errors */
710     if(uoss_addr)
711     {
712         __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
713         __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
714         __put_user(sas_ss_flags(sp), &oss.ss_flags);
715     }
716 
717     if(uss_addr)
718     {
719         struct target_sigaltstack *uss;
720         struct target_sigaltstack ss;
721         size_t minstacksize = TARGET_MINSIGSTKSZ;
722 
723 #if defined(TARGET_PPC64)
724         /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
725         struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
726         if (get_ppc64_abi(image) > 1) {
727             minstacksize = 4096;
728         }
729 #endif
730 
731 	ret = -TARGET_EFAULT;
732         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
733             goto out;
734         }
735         __get_user(ss.ss_sp, &uss->ss_sp);
736         __get_user(ss.ss_size, &uss->ss_size);
737         __get_user(ss.ss_flags, &uss->ss_flags);
738         unlock_user_struct(uss, uss_addr, 0);
739 
740 	ret = -TARGET_EPERM;
741 	if (on_sig_stack(sp))
742             goto out;
743 
744 	ret = -TARGET_EINVAL;
745 	if (ss.ss_flags != TARGET_SS_DISABLE
746             && ss.ss_flags != TARGET_SS_ONSTACK
747             && ss.ss_flags != 0)
748             goto out;
749 
750 	if (ss.ss_flags == TARGET_SS_DISABLE) {
751             ss.ss_size = 0;
752             ss.ss_sp = 0;
753 	} else {
754             ret = -TARGET_ENOMEM;
755             if (ss.ss_size < minstacksize) {
756                 goto out;
757             }
758 	}
759 
760         target_sigaltstack_used.ss_sp = ss.ss_sp;
761         target_sigaltstack_used.ss_size = ss.ss_size;
762     }
763 
764     if (uoss_addr) {
765         ret = -TARGET_EFAULT;
766         if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
767             goto out;
768     }
769 
770     ret = 0;
771 out:
772     return ret;
773 }
774 
775 /* do_sigaction() return target values and host errnos */
776 int do_sigaction(int sig, const struct target_sigaction *act,
777                  struct target_sigaction *oact)
778 {
779     struct target_sigaction *k;
780     struct sigaction act1;
781     int host_sig;
782     int ret = 0;
783 
784     if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
785         return -TARGET_EINVAL;
786     }
787 
788     if (block_signals()) {
789         return -TARGET_ERESTARTSYS;
790     }
791 
792     k = &sigact_table[sig - 1];
793     if (oact) {
794         __put_user(k->_sa_handler, &oact->_sa_handler);
795         __put_user(k->sa_flags, &oact->sa_flags);
796 #ifdef TARGET_ARCH_HAS_SA_RESTORER
797         __put_user(k->sa_restorer, &oact->sa_restorer);
798 #endif
799         /* Not swapped.  */
800         oact->sa_mask = k->sa_mask;
801     }
802     if (act) {
803         /* FIXME: This is not threadsafe.  */
804         __get_user(k->_sa_handler, &act->_sa_handler);
805         __get_user(k->sa_flags, &act->sa_flags);
806 #ifdef TARGET_ARCH_HAS_SA_RESTORER
807         __get_user(k->sa_restorer, &act->sa_restorer);
808 #endif
809         /* To be swapped in target_to_host_sigset.  */
810         k->sa_mask = act->sa_mask;
811 
812         /* we update the host linux signal state */
813         host_sig = target_to_host_signal(sig);
814         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
815             sigfillset(&act1.sa_mask);
816             act1.sa_flags = SA_SIGINFO;
817             if (k->sa_flags & TARGET_SA_RESTART)
818                 act1.sa_flags |= SA_RESTART;
819             /* NOTE: it is important to update the host kernel signal
820                ignore state to avoid getting unexpected interrupted
821                syscalls */
822             if (k->_sa_handler == TARGET_SIG_IGN) {
823                 act1.sa_sigaction = (void *)SIG_IGN;
824             } else if (k->_sa_handler == TARGET_SIG_DFL) {
825                 if (fatal_signal (sig))
826                     act1.sa_sigaction = host_signal_handler;
827                 else
828                     act1.sa_sigaction = (void *)SIG_DFL;
829             } else {
830                 act1.sa_sigaction = host_signal_handler;
831             }
832             ret = sigaction(host_sig, &act1, NULL);
833         }
834     }
835     return ret;
836 }
837 
838 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
839                                   struct emulated_sigtable *k)
840 {
841     CPUState *cpu = ENV_GET_CPU(cpu_env);
842     abi_ulong handler;
843     sigset_t set;
844     target_sigset_t target_old_set;
845     struct target_sigaction *sa;
846     TaskState *ts = cpu->opaque;
847 
848     trace_user_handle_signal(cpu_env, sig);
849     /* dequeue signal */
850     k->pending = 0;
851 
852     sig = gdb_handlesig(cpu, sig);
853     if (!sig) {
854         sa = NULL;
855         handler = TARGET_SIG_IGN;
856     } else {
857         sa = &sigact_table[sig - 1];
858         handler = sa->_sa_handler;
859     }
860 
861     if (do_strace) {
862         print_taken_signal(sig, &k->info);
863     }
864 
865     if (handler == TARGET_SIG_DFL) {
866         /* default handler : ignore some signal. The other are job control or fatal */
867         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
868             kill(getpid(),SIGSTOP);
869         } else if (sig != TARGET_SIGCHLD &&
870                    sig != TARGET_SIGURG &&
871                    sig != TARGET_SIGWINCH &&
872                    sig != TARGET_SIGCONT) {
873             dump_core_and_abort(sig);
874         }
875     } else if (handler == TARGET_SIG_IGN) {
876         /* ignore sig */
877     } else if (handler == TARGET_SIG_ERR) {
878         dump_core_and_abort(sig);
879     } else {
880         /* compute the blocked signals during the handler execution */
881         sigset_t *blocked_set;
882 
883         target_to_host_sigset(&set, &sa->sa_mask);
884         /* SA_NODEFER indicates that the current signal should not be
885            blocked during the handler */
886         if (!(sa->sa_flags & TARGET_SA_NODEFER))
887             sigaddset(&set, target_to_host_signal(sig));
888 
889         /* save the previous blocked signal state to restore it at the
890            end of the signal execution (see do_sigreturn) */
891         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
892 
893         /* block signals in the handler */
894         blocked_set = ts->in_sigsuspend ?
895             &ts->sigsuspend_mask : &ts->signal_mask;
896         sigorset(&ts->signal_mask, blocked_set, &set);
897         ts->in_sigsuspend = 0;
898 
899         /* if the CPU is in VM86 mode, we restore the 32 bit values */
900 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
901         {
902             CPUX86State *env = cpu_env;
903             if (env->eflags & VM_MASK)
904                 save_v86_state(env);
905         }
906 #endif
907         /* prepare the stack frame of the virtual CPU */
908 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
909         if (sa->sa_flags & TARGET_SA_SIGINFO) {
910             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
911         } else {
912             setup_frame(sig, sa, &target_old_set, cpu_env);
913         }
914 #else
915         /* These targets do not have traditional signals.  */
916         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
917 #endif
918         if (sa->sa_flags & TARGET_SA_RESETHAND) {
919             sa->_sa_handler = TARGET_SIG_DFL;
920         }
921     }
922 }
923 
924 void process_pending_signals(CPUArchState *cpu_env)
925 {
926     CPUState *cpu = ENV_GET_CPU(cpu_env);
927     int sig;
928     TaskState *ts = cpu->opaque;
929     sigset_t set;
930     sigset_t *blocked_set;
931 
932     while (atomic_read(&ts->signal_pending)) {
933         /* FIXME: This is not threadsafe.  */
934         sigfillset(&set);
935         sigprocmask(SIG_SETMASK, &set, 0);
936 
937     restart_scan:
938         sig = ts->sync_signal.pending;
939         if (sig) {
940             /* Synchronous signals are forced,
941              * see force_sig_info() and callers in Linux
942              * Note that not all of our queue_signal() calls in QEMU correspond
943              * to force_sig_info() calls in Linux (some are send_sig_info()).
944              * However it seems like a kernel bug to me to allow the process
945              * to block a synchronous signal since it could then just end up
946              * looping round and round indefinitely.
947              */
948             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
949                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
950                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
951                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
952             }
953 
954             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
955         }
956 
957         for (sig = 1; sig <= TARGET_NSIG; sig++) {
958             blocked_set = ts->in_sigsuspend ?
959                 &ts->sigsuspend_mask : &ts->signal_mask;
960 
961             if (ts->sigtab[sig - 1].pending &&
962                 (!sigismember(blocked_set,
963                               target_to_host_signal_table[sig]))) {
964                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
965                 /* Restart scan from the beginning, as handle_pending_signal
966                  * might have resulted in a new synchronous signal (eg SIGSEGV).
967                  */
968                 goto restart_scan;
969             }
970         }
971 
972         /* if no signal is pending, unblock signals and recheck (the act
973          * of unblocking might cause us to take another host signal which
974          * will set signal_pending again).
975          */
976         atomic_set(&ts->signal_pending, 0);
977         ts->in_sigsuspend = 0;
978         set = ts->signal_mask;
979         sigdelset(&set, SIGSEGV);
980         sigdelset(&set, SIGBUS);
981         sigprocmask(SIG_SETMASK, &set, 0);
982     }
983     ts->in_sigsuspend = 0;
984 }
985