xref: /openbmc/qemu/linux-user/signal.c (revision b4b9a0e3)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "exec/gdbstub.h"
22 
23 #include <sys/ucontext.h>
24 #include <sys/resource.h>
25 
26 #include "qemu.h"
27 #include "user-internals.h"
28 #include "strace.h"
29 #include "loader.h"
30 #include "trace.h"
31 #include "signal-common.h"
32 
33 static struct target_sigaction sigact_table[TARGET_NSIG];
34 
35 static void host_signal_handler(int host_signum, siginfo_t *info,
36                                 void *puc);
37 
38 /* Fallback addresses into sigtramp page. */
39 abi_ulong default_sigreturn;
40 abi_ulong default_rt_sigreturn;
41 
42 /*
43  * System includes define _NSIG as SIGRTMAX + 1,
44  * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
45  * and the first signal is SIGHUP defined as 1
46  * Signal number 0 is reserved for use as kill(pid, 0), to test whether
47  * a process exists without sending it a signal.
48  */
49 #ifdef __SIGRTMAX
50 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
51 #endif
52 static uint8_t host_to_target_signal_table[_NSIG] = {
53     [SIGHUP] = TARGET_SIGHUP,
54     [SIGINT] = TARGET_SIGINT,
55     [SIGQUIT] = TARGET_SIGQUIT,
56     [SIGILL] = TARGET_SIGILL,
57     [SIGTRAP] = TARGET_SIGTRAP,
58     [SIGABRT] = TARGET_SIGABRT,
59 /*    [SIGIOT] = TARGET_SIGIOT,*/
60     [SIGBUS] = TARGET_SIGBUS,
61     [SIGFPE] = TARGET_SIGFPE,
62     [SIGKILL] = TARGET_SIGKILL,
63     [SIGUSR1] = TARGET_SIGUSR1,
64     [SIGSEGV] = TARGET_SIGSEGV,
65     [SIGUSR2] = TARGET_SIGUSR2,
66     [SIGPIPE] = TARGET_SIGPIPE,
67     [SIGALRM] = TARGET_SIGALRM,
68     [SIGTERM] = TARGET_SIGTERM,
69 #ifdef SIGSTKFLT
70     [SIGSTKFLT] = TARGET_SIGSTKFLT,
71 #endif
72     [SIGCHLD] = TARGET_SIGCHLD,
73     [SIGCONT] = TARGET_SIGCONT,
74     [SIGSTOP] = TARGET_SIGSTOP,
75     [SIGTSTP] = TARGET_SIGTSTP,
76     [SIGTTIN] = TARGET_SIGTTIN,
77     [SIGTTOU] = TARGET_SIGTTOU,
78     [SIGURG] = TARGET_SIGURG,
79     [SIGXCPU] = TARGET_SIGXCPU,
80     [SIGXFSZ] = TARGET_SIGXFSZ,
81     [SIGVTALRM] = TARGET_SIGVTALRM,
82     [SIGPROF] = TARGET_SIGPROF,
83     [SIGWINCH] = TARGET_SIGWINCH,
84     [SIGIO] = TARGET_SIGIO,
85     [SIGPWR] = TARGET_SIGPWR,
86     [SIGSYS] = TARGET_SIGSYS,
87     /* next signals stay the same */
88 };
89 
90 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
91 
92 /* valid sig is between 1 and _NSIG - 1 */
93 int host_to_target_signal(int sig)
94 {
95     if (sig < 1 || sig >= _NSIG) {
96         return sig;
97     }
98     return host_to_target_signal_table[sig];
99 }
100 
101 /* valid sig is between 1 and TARGET_NSIG */
102 int target_to_host_signal(int sig)
103 {
104     if (sig < 1 || sig > TARGET_NSIG) {
105         return sig;
106     }
107     return target_to_host_signal_table[sig];
108 }
109 
110 static inline void target_sigaddset(target_sigset_t *set, int signum)
111 {
112     signum--;
113     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
114     set->sig[signum / TARGET_NSIG_BPW] |= mask;
115 }
116 
117 static inline int target_sigismember(const target_sigset_t *set, int signum)
118 {
119     signum--;
120     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
121     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
122 }
123 
124 void host_to_target_sigset_internal(target_sigset_t *d,
125                                     const sigset_t *s)
126 {
127     int host_sig, target_sig;
128     target_sigemptyset(d);
129     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
130         target_sig = host_to_target_signal(host_sig);
131         if (target_sig < 1 || target_sig > TARGET_NSIG) {
132             continue;
133         }
134         if (sigismember(s, host_sig)) {
135             target_sigaddset(d, target_sig);
136         }
137     }
138 }
139 
140 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
141 {
142     target_sigset_t d1;
143     int i;
144 
145     host_to_target_sigset_internal(&d1, s);
146     for(i = 0;i < TARGET_NSIG_WORDS; i++)
147         d->sig[i] = tswapal(d1.sig[i]);
148 }
149 
150 void target_to_host_sigset_internal(sigset_t *d,
151                                     const target_sigset_t *s)
152 {
153     int host_sig, target_sig;
154     sigemptyset(d);
155     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
156         host_sig = target_to_host_signal(target_sig);
157         if (host_sig < 1 || host_sig >= _NSIG) {
158             continue;
159         }
160         if (target_sigismember(s, target_sig)) {
161             sigaddset(d, host_sig);
162         }
163     }
164 }
165 
166 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
167 {
168     target_sigset_t s1;
169     int i;
170 
171     for(i = 0;i < TARGET_NSIG_WORDS; i++)
172         s1.sig[i] = tswapal(s->sig[i]);
173     target_to_host_sigset_internal(d, &s1);
174 }
175 
176 void host_to_target_old_sigset(abi_ulong *old_sigset,
177                                const sigset_t *sigset)
178 {
179     target_sigset_t d;
180     host_to_target_sigset(&d, sigset);
181     *old_sigset = d.sig[0];
182 }
183 
184 void target_to_host_old_sigset(sigset_t *sigset,
185                                const abi_ulong *old_sigset)
186 {
187     target_sigset_t d;
188     int i;
189 
190     d.sig[0] = *old_sigset;
191     for(i = 1;i < TARGET_NSIG_WORDS; i++)
192         d.sig[i] = 0;
193     target_to_host_sigset(sigset, &d);
194 }
195 
196 int block_signals(void)
197 {
198     TaskState *ts = (TaskState *)thread_cpu->opaque;
199     sigset_t set;
200 
201     /* It's OK to block everything including SIGSEGV, because we won't
202      * run any further guest code before unblocking signals in
203      * process_pending_signals().
204      */
205     sigfillset(&set);
206     sigprocmask(SIG_SETMASK, &set, 0);
207 
208     return qatomic_xchg(&ts->signal_pending, 1);
209 }
210 
211 /* Wrapper for sigprocmask function
212  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
213  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
214  * a signal was already pending and the syscall must be restarted, or
215  * 0 on success.
216  * If set is NULL, this is guaranteed not to fail.
217  */
218 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
219 {
220     TaskState *ts = (TaskState *)thread_cpu->opaque;
221 
222     if (oldset) {
223         *oldset = ts->signal_mask;
224     }
225 
226     if (set) {
227         int i;
228 
229         if (block_signals()) {
230             return -TARGET_ERESTARTSYS;
231         }
232 
233         switch (how) {
234         case SIG_BLOCK:
235             sigorset(&ts->signal_mask, &ts->signal_mask, set);
236             break;
237         case SIG_UNBLOCK:
238             for (i = 1; i <= NSIG; ++i) {
239                 if (sigismember(set, i)) {
240                     sigdelset(&ts->signal_mask, i);
241                 }
242             }
243             break;
244         case SIG_SETMASK:
245             ts->signal_mask = *set;
246             break;
247         default:
248             g_assert_not_reached();
249         }
250 
251         /* Silently ignore attempts to change blocking status of KILL or STOP */
252         sigdelset(&ts->signal_mask, SIGKILL);
253         sigdelset(&ts->signal_mask, SIGSTOP);
254     }
255     return 0;
256 }
257 
258 #if !defined(TARGET_NIOS2)
259 /* Just set the guest's signal mask to the specified value; the
260  * caller is assumed to have called block_signals() already.
261  */
262 void set_sigmask(const sigset_t *set)
263 {
264     TaskState *ts = (TaskState *)thread_cpu->opaque;
265 
266     ts->signal_mask = *set;
267 }
268 #endif
269 
270 /* sigaltstack management */
271 
272 int on_sig_stack(unsigned long sp)
273 {
274     TaskState *ts = (TaskState *)thread_cpu->opaque;
275 
276     return (sp - ts->sigaltstack_used.ss_sp
277             < ts->sigaltstack_used.ss_size);
278 }
279 
280 int sas_ss_flags(unsigned long sp)
281 {
282     TaskState *ts = (TaskState *)thread_cpu->opaque;
283 
284     return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
285             : on_sig_stack(sp) ? SS_ONSTACK : 0);
286 }
287 
288 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
289 {
290     /*
291      * This is the X/Open sanctioned signal stack switching.
292      */
293     TaskState *ts = (TaskState *)thread_cpu->opaque;
294 
295     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
296         return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
297     }
298     return sp;
299 }
300 
301 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
302 {
303     TaskState *ts = (TaskState *)thread_cpu->opaque;
304 
305     __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
306     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
307     __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
308 }
309 
310 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
311 {
312     TaskState *ts = (TaskState *)thread_cpu->opaque;
313     size_t minstacksize = TARGET_MINSIGSTKSZ;
314     target_stack_t ss;
315 
316 #if defined(TARGET_PPC64)
317     /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
318     struct image_info *image = ts->info;
319     if (get_ppc64_abi(image) > 1) {
320         minstacksize = 4096;
321     }
322 #endif
323 
324     __get_user(ss.ss_sp, &uss->ss_sp);
325     __get_user(ss.ss_size, &uss->ss_size);
326     __get_user(ss.ss_flags, &uss->ss_flags);
327 
328     if (on_sig_stack(get_sp_from_cpustate(env))) {
329         return -TARGET_EPERM;
330     }
331 
332     switch (ss.ss_flags) {
333     default:
334         return -TARGET_EINVAL;
335 
336     case TARGET_SS_DISABLE:
337         ss.ss_size = 0;
338         ss.ss_sp = 0;
339         break;
340 
341     case TARGET_SS_ONSTACK:
342     case 0:
343         if (ss.ss_size < minstacksize) {
344             return -TARGET_ENOMEM;
345         }
346         break;
347     }
348 
349     ts->sigaltstack_used.ss_sp = ss.ss_sp;
350     ts->sigaltstack_used.ss_size = ss.ss_size;
351     return 0;
352 }
353 
354 /* siginfo conversion */
355 
356 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
357                                                  const siginfo_t *info)
358 {
359     int sig = host_to_target_signal(info->si_signo);
360     int si_code = info->si_code;
361     int si_type;
362     tinfo->si_signo = sig;
363     tinfo->si_errno = 0;
364     tinfo->si_code = info->si_code;
365 
366     /* This memset serves two purposes:
367      * (1) ensure we don't leak random junk to the guest later
368      * (2) placate false positives from gcc about fields
369      *     being used uninitialized if it chooses to inline both this
370      *     function and tswap_siginfo() into host_to_target_siginfo().
371      */
372     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
373 
374     /* This is awkward, because we have to use a combination of
375      * the si_code and si_signo to figure out which of the union's
376      * members are valid. (Within the host kernel it is always possible
377      * to tell, but the kernel carefully avoids giving userspace the
378      * high 16 bits of si_code, so we don't have the information to
379      * do this the easy way...) We therefore make our best guess,
380      * bearing in mind that a guest can spoof most of the si_codes
381      * via rt_sigqueueinfo() if it likes.
382      *
383      * Once we have made our guess, we record it in the top 16 bits of
384      * the si_code, so that tswap_siginfo() later can use it.
385      * tswap_siginfo() will strip these top bits out before writing
386      * si_code to the guest (sign-extending the lower bits).
387      */
388 
389     switch (si_code) {
390     case SI_USER:
391     case SI_TKILL:
392     case SI_KERNEL:
393         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
394          * These are the only unspoofable si_code values.
395          */
396         tinfo->_sifields._kill._pid = info->si_pid;
397         tinfo->_sifields._kill._uid = info->si_uid;
398         si_type = QEMU_SI_KILL;
399         break;
400     default:
401         /* Everything else is spoofable. Make best guess based on signal */
402         switch (sig) {
403         case TARGET_SIGCHLD:
404             tinfo->_sifields._sigchld._pid = info->si_pid;
405             tinfo->_sifields._sigchld._uid = info->si_uid;
406             tinfo->_sifields._sigchld._status = info->si_status;
407             tinfo->_sifields._sigchld._utime = info->si_utime;
408             tinfo->_sifields._sigchld._stime = info->si_stime;
409             si_type = QEMU_SI_CHLD;
410             break;
411         case TARGET_SIGIO:
412             tinfo->_sifields._sigpoll._band = info->si_band;
413             tinfo->_sifields._sigpoll._fd = info->si_fd;
414             si_type = QEMU_SI_POLL;
415             break;
416         default:
417             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
418             tinfo->_sifields._rt._pid = info->si_pid;
419             tinfo->_sifields._rt._uid = info->si_uid;
420             /* XXX: potential problem if 64 bit */
421             tinfo->_sifields._rt._sigval.sival_ptr
422                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
423             si_type = QEMU_SI_RT;
424             break;
425         }
426         break;
427     }
428 
429     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
430 }
431 
432 void tswap_siginfo(target_siginfo_t *tinfo,
433                    const target_siginfo_t *info)
434 {
435     int si_type = extract32(info->si_code, 16, 16);
436     int si_code = sextract32(info->si_code, 0, 16);
437 
438     __put_user(info->si_signo, &tinfo->si_signo);
439     __put_user(info->si_errno, &tinfo->si_errno);
440     __put_user(si_code, &tinfo->si_code);
441 
442     /* We can use our internal marker of which fields in the structure
443      * are valid, rather than duplicating the guesswork of
444      * host_to_target_siginfo_noswap() here.
445      */
446     switch (si_type) {
447     case QEMU_SI_KILL:
448         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
449         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
450         break;
451     case QEMU_SI_TIMER:
452         __put_user(info->_sifields._timer._timer1,
453                    &tinfo->_sifields._timer._timer1);
454         __put_user(info->_sifields._timer._timer2,
455                    &tinfo->_sifields._timer._timer2);
456         break;
457     case QEMU_SI_POLL:
458         __put_user(info->_sifields._sigpoll._band,
459                    &tinfo->_sifields._sigpoll._band);
460         __put_user(info->_sifields._sigpoll._fd,
461                    &tinfo->_sifields._sigpoll._fd);
462         break;
463     case QEMU_SI_FAULT:
464         __put_user(info->_sifields._sigfault._addr,
465                    &tinfo->_sifields._sigfault._addr);
466         break;
467     case QEMU_SI_CHLD:
468         __put_user(info->_sifields._sigchld._pid,
469                    &tinfo->_sifields._sigchld._pid);
470         __put_user(info->_sifields._sigchld._uid,
471                    &tinfo->_sifields._sigchld._uid);
472         __put_user(info->_sifields._sigchld._status,
473                    &tinfo->_sifields._sigchld._status);
474         __put_user(info->_sifields._sigchld._utime,
475                    &tinfo->_sifields._sigchld._utime);
476         __put_user(info->_sifields._sigchld._stime,
477                    &tinfo->_sifields._sigchld._stime);
478         break;
479     case QEMU_SI_RT:
480         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
481         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
482         __put_user(info->_sifields._rt._sigval.sival_ptr,
483                    &tinfo->_sifields._rt._sigval.sival_ptr);
484         break;
485     default:
486         g_assert_not_reached();
487     }
488 }
489 
490 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
491 {
492     target_siginfo_t tgt_tmp;
493     host_to_target_siginfo_noswap(&tgt_tmp, info);
494     tswap_siginfo(tinfo, &tgt_tmp);
495 }
496 
497 /* XXX: we support only POSIX RT signals are used. */
498 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
499 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
500 {
501     /* This conversion is used only for the rt_sigqueueinfo syscall,
502      * and so we know that the _rt fields are the valid ones.
503      */
504     abi_ulong sival_ptr;
505 
506     __get_user(info->si_signo, &tinfo->si_signo);
507     __get_user(info->si_errno, &tinfo->si_errno);
508     __get_user(info->si_code, &tinfo->si_code);
509     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
510     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
511     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
512     info->si_value.sival_ptr = (void *)(long)sival_ptr;
513 }
514 
515 static int fatal_signal (int sig)
516 {
517     switch (sig) {
518     case TARGET_SIGCHLD:
519     case TARGET_SIGURG:
520     case TARGET_SIGWINCH:
521         /* Ignored by default.  */
522         return 0;
523     case TARGET_SIGCONT:
524     case TARGET_SIGSTOP:
525     case TARGET_SIGTSTP:
526     case TARGET_SIGTTIN:
527     case TARGET_SIGTTOU:
528         /* Job control signals.  */
529         return 0;
530     default:
531         return 1;
532     }
533 }
534 
535 /* returns 1 if given signal should dump core if not handled */
536 static int core_dump_signal(int sig)
537 {
538     switch (sig) {
539     case TARGET_SIGABRT:
540     case TARGET_SIGFPE:
541     case TARGET_SIGILL:
542     case TARGET_SIGQUIT:
543     case TARGET_SIGSEGV:
544     case TARGET_SIGTRAP:
545     case TARGET_SIGBUS:
546         return (1);
547     default:
548         return (0);
549     }
550 }
551 
552 static void signal_table_init(void)
553 {
554     int host_sig, target_sig, count;
555 
556     /*
557      * Signals are supported starting from TARGET_SIGRTMIN and going up
558      * until we run out of host realtime signals.
559      * glibc at least uses only the lower 2 rt signals and probably
560      * nobody's using the upper ones.
561      * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
562      * To fix this properly we need to do manual signal delivery multiplexed
563      * over a single host signal.
564      * Attempts for configure "missing" signals via sigaction will be
565      * silently ignored.
566      */
567     for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
568         target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
569         if (target_sig <= TARGET_NSIG) {
570             host_to_target_signal_table[host_sig] = target_sig;
571         }
572     }
573 
574     /* generate signal conversion tables */
575     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
576         target_to_host_signal_table[target_sig] = _NSIG; /* poison */
577     }
578     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
579         if (host_to_target_signal_table[host_sig] == 0) {
580             host_to_target_signal_table[host_sig] = host_sig;
581         }
582         target_sig = host_to_target_signal_table[host_sig];
583         if (target_sig <= TARGET_NSIG) {
584             target_to_host_signal_table[target_sig] = host_sig;
585         }
586     }
587 
588     if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
589         for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
590             if (target_to_host_signal_table[target_sig] == _NSIG) {
591                 count++;
592             }
593         }
594         trace_signal_table_init(count);
595     }
596 }
597 
598 void signal_init(void)
599 {
600     TaskState *ts = (TaskState *)thread_cpu->opaque;
601     struct sigaction act;
602     struct sigaction oact;
603     int i;
604     int host_sig;
605 
606     /* initialize signal conversion tables */
607     signal_table_init();
608 
609     /* Set the signal mask from the host mask. */
610     sigprocmask(0, 0, &ts->signal_mask);
611 
612     sigfillset(&act.sa_mask);
613     act.sa_flags = SA_SIGINFO;
614     act.sa_sigaction = host_signal_handler;
615     for(i = 1; i <= TARGET_NSIG; i++) {
616 #ifdef CONFIG_GPROF
617         if (i == TARGET_SIGPROF) {
618             continue;
619         }
620 #endif
621         host_sig = target_to_host_signal(i);
622         sigaction(host_sig, NULL, &oact);
623         if (oact.sa_sigaction == (void *)SIG_IGN) {
624             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
625         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
626             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
627         }
628         /* If there's already a handler installed then something has
629            gone horribly wrong, so don't even try to handle that case.  */
630         /* Install some handlers for our own use.  We need at least
631            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
632            trap all signals because it affects syscall interrupt
633            behavior.  But do trap all default-fatal signals.  */
634         if (fatal_signal (i))
635             sigaction(host_sig, &act, NULL);
636     }
637 }
638 
639 /* Force a synchronously taken signal. The kernel force_sig() function
640  * also forces the signal to "not blocked, not ignored", but for QEMU
641  * that work is done in process_pending_signals().
642  */
643 void force_sig(int sig)
644 {
645     CPUState *cpu = thread_cpu;
646     CPUArchState *env = cpu->env_ptr;
647     target_siginfo_t info = {};
648 
649     info.si_signo = sig;
650     info.si_errno = 0;
651     info.si_code = TARGET_SI_KERNEL;
652     info._sifields._kill._pid = 0;
653     info._sifields._kill._uid = 0;
654     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
655 }
656 
657 /*
658  * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
659  * 'force' part is handled in process_pending_signals().
660  */
661 void force_sig_fault(int sig, int code, abi_ulong addr)
662 {
663     CPUState *cpu = thread_cpu;
664     CPUArchState *env = cpu->env_ptr;
665     target_siginfo_t info = {};
666 
667     info.si_signo = sig;
668     info.si_errno = 0;
669     info.si_code = code;
670     info._sifields._sigfault._addr = addr;
671     queue_signal(env, sig, QEMU_SI_FAULT, &info);
672 }
673 
674 /* Force a SIGSEGV if we couldn't write to memory trying to set
675  * up the signal frame. oldsig is the signal we were trying to handle
676  * at the point of failure.
677  */
678 #if !defined(TARGET_RISCV)
679 void force_sigsegv(int oldsig)
680 {
681     if (oldsig == SIGSEGV) {
682         /* Make sure we don't try to deliver the signal again; this will
683          * end up with handle_pending_signal() calling dump_core_and_abort().
684          */
685         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
686     }
687     force_sig(TARGET_SIGSEGV);
688 }
689 
690 #endif
691 
692 /* abort execution with signal */
693 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
694 {
695     CPUState *cpu = thread_cpu;
696     CPUArchState *env = cpu->env_ptr;
697     TaskState *ts = (TaskState *)cpu->opaque;
698     int host_sig, core_dumped = 0;
699     struct sigaction act;
700 
701     host_sig = target_to_host_signal(target_sig);
702     trace_user_force_sig(env, target_sig, host_sig);
703     gdb_signalled(env, target_sig);
704 
705     /* dump core if supported by target binary format */
706     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
707         stop_all_tasks();
708         core_dumped =
709             ((*ts->bprm->core_dump)(target_sig, env) == 0);
710     }
711     if (core_dumped) {
712         /* we already dumped the core of target process, we don't want
713          * a coredump of qemu itself */
714         struct rlimit nodump;
715         getrlimit(RLIMIT_CORE, &nodump);
716         nodump.rlim_cur=0;
717         setrlimit(RLIMIT_CORE, &nodump);
718         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
719             target_sig, strsignal(host_sig), "core dumped" );
720     }
721 
722     /* The proper exit code for dying from an uncaught signal is
723      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
724      * a negative value.  To get the proper exit code we need to
725      * actually die from an uncaught signal.  Here the default signal
726      * handler is installed, we send ourself a signal and we wait for
727      * it to arrive. */
728     sigfillset(&act.sa_mask);
729     act.sa_handler = SIG_DFL;
730     act.sa_flags = 0;
731     sigaction(host_sig, &act, NULL);
732 
733     /* For some reason raise(host_sig) doesn't send the signal when
734      * statically linked on x86-64. */
735     kill(getpid(), host_sig);
736 
737     /* Make sure the signal isn't masked (just reuse the mask inside
738     of act) */
739     sigdelset(&act.sa_mask, host_sig);
740     sigsuspend(&act.sa_mask);
741 
742     /* unreachable */
743     abort();
744 }
745 
746 /* queue a signal so that it will be send to the virtual CPU as soon
747    as possible */
748 int queue_signal(CPUArchState *env, int sig, int si_type,
749                  target_siginfo_t *info)
750 {
751     CPUState *cpu = env_cpu(env);
752     TaskState *ts = cpu->opaque;
753 
754     trace_user_queue_signal(env, sig);
755 
756     info->si_code = deposit32(info->si_code, 16, 16, si_type);
757 
758     ts->sync_signal.info = *info;
759     ts->sync_signal.pending = sig;
760     /* signal that a new signal is pending */
761     qatomic_set(&ts->signal_pending, 1);
762     return 1; /* indicates that the signal was queued */
763 }
764 
765 #ifndef HAVE_SAFE_SYSCALL
766 static inline void rewind_if_in_safe_syscall(void *puc)
767 {
768     /* Default version: never rewind */
769 }
770 #endif
771 
772 static void host_signal_handler(int host_signum, siginfo_t *info,
773                                 void *puc)
774 {
775     CPUArchState *env = thread_cpu->env_ptr;
776     CPUState *cpu = env_cpu(env);
777     TaskState *ts = cpu->opaque;
778 
779     int sig;
780     target_siginfo_t tinfo;
781     ucontext_t *uc = puc;
782     struct emulated_sigtable *k;
783 
784     /* the CPU emulator uses some host signals to detect exceptions,
785        we forward to it some signals */
786     if ((host_signum == SIGSEGV || host_signum == SIGBUS)
787         && info->si_code > 0) {
788         if (cpu_signal_handler(host_signum, info, puc))
789             return;
790     }
791 
792     /* get target signal number */
793     sig = host_to_target_signal(host_signum);
794     if (sig < 1 || sig > TARGET_NSIG)
795         return;
796     trace_user_host_signal(env, host_signum, sig);
797 
798     rewind_if_in_safe_syscall(puc);
799 
800     host_to_target_siginfo_noswap(&tinfo, info);
801     k = &ts->sigtab[sig - 1];
802     k->info = tinfo;
803     k->pending = sig;
804     ts->signal_pending = 1;
805 
806     /* Block host signals until target signal handler entered. We
807      * can't block SIGSEGV or SIGBUS while we're executing guest
808      * code in case the guest code provokes one in the window between
809      * now and it getting out to the main loop. Signals will be
810      * unblocked again in process_pending_signals().
811      *
812      * WARNING: we cannot use sigfillset() here because the uc_sigmask
813      * field is a kernel sigset_t, which is much smaller than the
814      * libc sigset_t which sigfillset() operates on. Using sigfillset()
815      * would write 0xff bytes off the end of the structure and trash
816      * data on the struct.
817      * We can't use sizeof(uc->uc_sigmask) either, because the libc
818      * headers define the struct field with the wrong (too large) type.
819      */
820     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
821     sigdelset(&uc->uc_sigmask, SIGSEGV);
822     sigdelset(&uc->uc_sigmask, SIGBUS);
823 
824     /* interrupt the virtual CPU as soon as possible */
825     cpu_exit(thread_cpu);
826 }
827 
828 /* do_sigaltstack() returns target values and errnos. */
829 /* compare linux/kernel/signal.c:do_sigaltstack() */
830 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
831                         CPUArchState *env)
832 {
833     target_stack_t oss, *uoss = NULL;
834     abi_long ret = -TARGET_EFAULT;
835 
836     if (uoss_addr) {
837         /* Verify writability now, but do not alter user memory yet. */
838         if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
839             goto out;
840         }
841         target_save_altstack(&oss, env);
842     }
843 
844     if (uss_addr) {
845         target_stack_t *uss;
846 
847         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
848             goto out;
849         }
850         ret = target_restore_altstack(uss, env);
851         if (ret) {
852             goto out;
853         }
854     }
855 
856     if (uoss_addr) {
857         memcpy(uoss, &oss, sizeof(oss));
858         unlock_user_struct(uoss, uoss_addr, 1);
859         uoss = NULL;
860     }
861     ret = 0;
862 
863  out:
864     if (uoss) {
865         unlock_user_struct(uoss, uoss_addr, 0);
866     }
867     return ret;
868 }
869 
870 /* do_sigaction() return target values and host errnos */
871 int do_sigaction(int sig, const struct target_sigaction *act,
872                  struct target_sigaction *oact, abi_ulong ka_restorer)
873 {
874     struct target_sigaction *k;
875     struct sigaction act1;
876     int host_sig;
877     int ret = 0;
878 
879     trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
880 
881     if (sig < 1 || sig > TARGET_NSIG) {
882         return -TARGET_EINVAL;
883     }
884 
885     if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
886         return -TARGET_EINVAL;
887     }
888 
889     if (block_signals()) {
890         return -TARGET_ERESTARTSYS;
891     }
892 
893     k = &sigact_table[sig - 1];
894     if (oact) {
895         __put_user(k->_sa_handler, &oact->_sa_handler);
896         __put_user(k->sa_flags, &oact->sa_flags);
897 #ifdef TARGET_ARCH_HAS_SA_RESTORER
898         __put_user(k->sa_restorer, &oact->sa_restorer);
899 #endif
900         /* Not swapped.  */
901         oact->sa_mask = k->sa_mask;
902     }
903     if (act) {
904         /* FIXME: This is not threadsafe.  */
905         __get_user(k->_sa_handler, &act->_sa_handler);
906         __get_user(k->sa_flags, &act->sa_flags);
907 #ifdef TARGET_ARCH_HAS_SA_RESTORER
908         __get_user(k->sa_restorer, &act->sa_restorer);
909 #endif
910 #ifdef TARGET_ARCH_HAS_KA_RESTORER
911         k->ka_restorer = ka_restorer;
912 #endif
913         /* To be swapped in target_to_host_sigset.  */
914         k->sa_mask = act->sa_mask;
915 
916         /* we update the host linux signal state */
917         host_sig = target_to_host_signal(sig);
918         trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
919         if (host_sig > SIGRTMAX) {
920             /* we don't have enough host signals to map all target signals */
921             qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
922                           sig);
923             /*
924              * we don't return an error here because some programs try to
925              * register an handler for all possible rt signals even if they
926              * don't need it.
927              * An error here can abort them whereas there can be no problem
928              * to not have the signal available later.
929              * This is the case for golang,
930              *   See https://github.com/golang/go/issues/33746
931              * So we silently ignore the error.
932              */
933             return 0;
934         }
935         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
936             sigfillset(&act1.sa_mask);
937             act1.sa_flags = SA_SIGINFO;
938             if (k->sa_flags & TARGET_SA_RESTART)
939                 act1.sa_flags |= SA_RESTART;
940             /* NOTE: it is important to update the host kernel signal
941                ignore state to avoid getting unexpected interrupted
942                syscalls */
943             if (k->_sa_handler == TARGET_SIG_IGN) {
944                 act1.sa_sigaction = (void *)SIG_IGN;
945             } else if (k->_sa_handler == TARGET_SIG_DFL) {
946                 if (fatal_signal (sig))
947                     act1.sa_sigaction = host_signal_handler;
948                 else
949                     act1.sa_sigaction = (void *)SIG_DFL;
950             } else {
951                 act1.sa_sigaction = host_signal_handler;
952             }
953             ret = sigaction(host_sig, &act1, NULL);
954         }
955     }
956     return ret;
957 }
958 
959 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
960                                   struct emulated_sigtable *k)
961 {
962     CPUState *cpu = env_cpu(cpu_env);
963     abi_ulong handler;
964     sigset_t set;
965     target_sigset_t target_old_set;
966     struct target_sigaction *sa;
967     TaskState *ts = cpu->opaque;
968 
969     trace_user_handle_signal(cpu_env, sig);
970     /* dequeue signal */
971     k->pending = 0;
972 
973     sig = gdb_handlesig(cpu, sig);
974     if (!sig) {
975         sa = NULL;
976         handler = TARGET_SIG_IGN;
977     } else {
978         sa = &sigact_table[sig - 1];
979         handler = sa->_sa_handler;
980     }
981 
982     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
983         print_taken_signal(sig, &k->info);
984     }
985 
986     if (handler == TARGET_SIG_DFL) {
987         /* default handler : ignore some signal. The other are job control or fatal */
988         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
989             kill(getpid(),SIGSTOP);
990         } else if (sig != TARGET_SIGCHLD &&
991                    sig != TARGET_SIGURG &&
992                    sig != TARGET_SIGWINCH &&
993                    sig != TARGET_SIGCONT) {
994             dump_core_and_abort(sig);
995         }
996     } else if (handler == TARGET_SIG_IGN) {
997         /* ignore sig */
998     } else if (handler == TARGET_SIG_ERR) {
999         dump_core_and_abort(sig);
1000     } else {
1001         /* compute the blocked signals during the handler execution */
1002         sigset_t *blocked_set;
1003 
1004         target_to_host_sigset(&set, &sa->sa_mask);
1005         /* SA_NODEFER indicates that the current signal should not be
1006            blocked during the handler */
1007         if (!(sa->sa_flags & TARGET_SA_NODEFER))
1008             sigaddset(&set, target_to_host_signal(sig));
1009 
1010         /* save the previous blocked signal state to restore it at the
1011            end of the signal execution (see do_sigreturn) */
1012         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1013 
1014         /* block signals in the handler */
1015         blocked_set = ts->in_sigsuspend ?
1016             &ts->sigsuspend_mask : &ts->signal_mask;
1017         sigorset(&ts->signal_mask, blocked_set, &set);
1018         ts->in_sigsuspend = 0;
1019 
1020         /* if the CPU is in VM86 mode, we restore the 32 bit values */
1021 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1022         {
1023             CPUX86State *env = cpu_env;
1024             if (env->eflags & VM_MASK)
1025                 save_v86_state(env);
1026         }
1027 #endif
1028         /* prepare the stack frame of the virtual CPU */
1029 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1030         if (sa->sa_flags & TARGET_SA_SIGINFO) {
1031             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1032         } else {
1033             setup_frame(sig, sa, &target_old_set, cpu_env);
1034         }
1035 #else
1036         /* These targets do not have traditional signals.  */
1037         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1038 #endif
1039         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1040             sa->_sa_handler = TARGET_SIG_DFL;
1041         }
1042     }
1043 }
1044 
1045 void process_pending_signals(CPUArchState *cpu_env)
1046 {
1047     CPUState *cpu = env_cpu(cpu_env);
1048     int sig;
1049     TaskState *ts = cpu->opaque;
1050     sigset_t set;
1051     sigset_t *blocked_set;
1052 
1053     while (qatomic_read(&ts->signal_pending)) {
1054         /* FIXME: This is not threadsafe.  */
1055         sigfillset(&set);
1056         sigprocmask(SIG_SETMASK, &set, 0);
1057 
1058     restart_scan:
1059         sig = ts->sync_signal.pending;
1060         if (sig) {
1061             /* Synchronous signals are forced,
1062              * see force_sig_info() and callers in Linux
1063              * Note that not all of our queue_signal() calls in QEMU correspond
1064              * to force_sig_info() calls in Linux (some are send_sig_info()).
1065              * However it seems like a kernel bug to me to allow the process
1066              * to block a synchronous signal since it could then just end up
1067              * looping round and round indefinitely.
1068              */
1069             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1070                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1071                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1072                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1073             }
1074 
1075             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1076         }
1077 
1078         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1079             blocked_set = ts->in_sigsuspend ?
1080                 &ts->sigsuspend_mask : &ts->signal_mask;
1081 
1082             if (ts->sigtab[sig - 1].pending &&
1083                 (!sigismember(blocked_set,
1084                               target_to_host_signal_table[sig]))) {
1085                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1086                 /* Restart scan from the beginning, as handle_pending_signal
1087                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1088                  */
1089                 goto restart_scan;
1090             }
1091         }
1092 
1093         /* if no signal is pending, unblock signals and recheck (the act
1094          * of unblocking might cause us to take another host signal which
1095          * will set signal_pending again).
1096          */
1097         qatomic_set(&ts->signal_pending, 0);
1098         ts->in_sigsuspend = 0;
1099         set = ts->signal_mask;
1100         sigdelset(&set, SIGSEGV);
1101         sigdelset(&set, SIGBUS);
1102         sigprocmask(SIG_SETMASK, &set, 0);
1103     }
1104     ts->in_sigsuspend = 0;
1105 }
1106