xref: /openbmc/qemu/linux-user/signal.c (revision 6016b7b4)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "exec/gdbstub.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 
24 #include <sys/ucontext.h>
25 #include <sys/resource.h>
26 
27 #include "qemu.h"
28 #include "user-internals.h"
29 #include "strace.h"
30 #include "loader.h"
31 #include "trace.h"
32 #include "signal-common.h"
33 #include "host-signal.h"
34 #include "user/safe-syscall.h"
35 
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37 
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39                                 void *puc);
40 
41 /* Fallback addresses into sigtramp page. */
42 abi_ulong default_sigreturn;
43 abi_ulong default_rt_sigreturn;
44 
45 /*
46  * System includes define _NSIG as SIGRTMAX + 1,
47  * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
48  * and the first signal is SIGHUP defined as 1
49  * Signal number 0 is reserved for use as kill(pid, 0), to test whether
50  * a process exists without sending it a signal.
51  */
52 #ifdef __SIGRTMAX
53 QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
54 #endif
55 static uint8_t host_to_target_signal_table[_NSIG] = {
56     [SIGHUP] = TARGET_SIGHUP,
57     [SIGINT] = TARGET_SIGINT,
58     [SIGQUIT] = TARGET_SIGQUIT,
59     [SIGILL] = TARGET_SIGILL,
60     [SIGTRAP] = TARGET_SIGTRAP,
61     [SIGABRT] = TARGET_SIGABRT,
62 /*    [SIGIOT] = TARGET_SIGIOT,*/
63     [SIGBUS] = TARGET_SIGBUS,
64     [SIGFPE] = TARGET_SIGFPE,
65     [SIGKILL] = TARGET_SIGKILL,
66     [SIGUSR1] = TARGET_SIGUSR1,
67     [SIGSEGV] = TARGET_SIGSEGV,
68     [SIGUSR2] = TARGET_SIGUSR2,
69     [SIGPIPE] = TARGET_SIGPIPE,
70     [SIGALRM] = TARGET_SIGALRM,
71     [SIGTERM] = TARGET_SIGTERM,
72 #ifdef SIGSTKFLT
73     [SIGSTKFLT] = TARGET_SIGSTKFLT,
74 #endif
75     [SIGCHLD] = TARGET_SIGCHLD,
76     [SIGCONT] = TARGET_SIGCONT,
77     [SIGSTOP] = TARGET_SIGSTOP,
78     [SIGTSTP] = TARGET_SIGTSTP,
79     [SIGTTIN] = TARGET_SIGTTIN,
80     [SIGTTOU] = TARGET_SIGTTOU,
81     [SIGURG] = TARGET_SIGURG,
82     [SIGXCPU] = TARGET_SIGXCPU,
83     [SIGXFSZ] = TARGET_SIGXFSZ,
84     [SIGVTALRM] = TARGET_SIGVTALRM,
85     [SIGPROF] = TARGET_SIGPROF,
86     [SIGWINCH] = TARGET_SIGWINCH,
87     [SIGIO] = TARGET_SIGIO,
88     [SIGPWR] = TARGET_SIGPWR,
89     [SIGSYS] = TARGET_SIGSYS,
90     /* next signals stay the same */
91 };
92 
93 static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
94 
95 /* valid sig is between 1 and _NSIG - 1 */
96 int host_to_target_signal(int sig)
97 {
98     if (sig < 1 || sig >= _NSIG) {
99         return sig;
100     }
101     return host_to_target_signal_table[sig];
102 }
103 
104 /* valid sig is between 1 and TARGET_NSIG */
105 int target_to_host_signal(int sig)
106 {
107     if (sig < 1 || sig > TARGET_NSIG) {
108         return sig;
109     }
110     return target_to_host_signal_table[sig];
111 }
112 
113 static inline void target_sigaddset(target_sigset_t *set, int signum)
114 {
115     signum--;
116     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
117     set->sig[signum / TARGET_NSIG_BPW] |= mask;
118 }
119 
120 static inline int target_sigismember(const target_sigset_t *set, int signum)
121 {
122     signum--;
123     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
124     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
125 }
126 
127 void host_to_target_sigset_internal(target_sigset_t *d,
128                                     const sigset_t *s)
129 {
130     int host_sig, target_sig;
131     target_sigemptyset(d);
132     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
133         target_sig = host_to_target_signal(host_sig);
134         if (target_sig < 1 || target_sig > TARGET_NSIG) {
135             continue;
136         }
137         if (sigismember(s, host_sig)) {
138             target_sigaddset(d, target_sig);
139         }
140     }
141 }
142 
143 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
144 {
145     target_sigset_t d1;
146     int i;
147 
148     host_to_target_sigset_internal(&d1, s);
149     for(i = 0;i < TARGET_NSIG_WORDS; i++)
150         d->sig[i] = tswapal(d1.sig[i]);
151 }
152 
153 void target_to_host_sigset_internal(sigset_t *d,
154                                     const target_sigset_t *s)
155 {
156     int host_sig, target_sig;
157     sigemptyset(d);
158     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
159         host_sig = target_to_host_signal(target_sig);
160         if (host_sig < 1 || host_sig >= _NSIG) {
161             continue;
162         }
163         if (target_sigismember(s, target_sig)) {
164             sigaddset(d, host_sig);
165         }
166     }
167 }
168 
169 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
170 {
171     target_sigset_t s1;
172     int i;
173 
174     for(i = 0;i < TARGET_NSIG_WORDS; i++)
175         s1.sig[i] = tswapal(s->sig[i]);
176     target_to_host_sigset_internal(d, &s1);
177 }
178 
179 void host_to_target_old_sigset(abi_ulong *old_sigset,
180                                const sigset_t *sigset)
181 {
182     target_sigset_t d;
183     host_to_target_sigset(&d, sigset);
184     *old_sigset = d.sig[0];
185 }
186 
187 void target_to_host_old_sigset(sigset_t *sigset,
188                                const abi_ulong *old_sigset)
189 {
190     target_sigset_t d;
191     int i;
192 
193     d.sig[0] = *old_sigset;
194     for(i = 1;i < TARGET_NSIG_WORDS; i++)
195         d.sig[i] = 0;
196     target_to_host_sigset(sigset, &d);
197 }
198 
199 int block_signals(void)
200 {
201     TaskState *ts = (TaskState *)thread_cpu->opaque;
202     sigset_t set;
203 
204     /* It's OK to block everything including SIGSEGV, because we won't
205      * run any further guest code before unblocking signals in
206      * process_pending_signals().
207      */
208     sigfillset(&set);
209     sigprocmask(SIG_SETMASK, &set, 0);
210 
211     return qatomic_xchg(&ts->signal_pending, 1);
212 }
213 
214 /* Wrapper for sigprocmask function
215  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
216  * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
217  * a signal was already pending and the syscall must be restarted, or
218  * 0 on success.
219  * If set is NULL, this is guaranteed not to fail.
220  */
221 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
222 {
223     TaskState *ts = (TaskState *)thread_cpu->opaque;
224 
225     if (oldset) {
226         *oldset = ts->signal_mask;
227     }
228 
229     if (set) {
230         int i;
231 
232         if (block_signals()) {
233             return -QEMU_ERESTARTSYS;
234         }
235 
236         switch (how) {
237         case SIG_BLOCK:
238             sigorset(&ts->signal_mask, &ts->signal_mask, set);
239             break;
240         case SIG_UNBLOCK:
241             for (i = 1; i <= NSIG; ++i) {
242                 if (sigismember(set, i)) {
243                     sigdelset(&ts->signal_mask, i);
244                 }
245             }
246             break;
247         case SIG_SETMASK:
248             ts->signal_mask = *set;
249             break;
250         default:
251             g_assert_not_reached();
252         }
253 
254         /* Silently ignore attempts to change blocking status of KILL or STOP */
255         sigdelset(&ts->signal_mask, SIGKILL);
256         sigdelset(&ts->signal_mask, SIGSTOP);
257     }
258     return 0;
259 }
260 
261 #if !defined(TARGET_NIOS2)
262 /* Just set the guest's signal mask to the specified value; the
263  * caller is assumed to have called block_signals() already.
264  */
265 void set_sigmask(const sigset_t *set)
266 {
267     TaskState *ts = (TaskState *)thread_cpu->opaque;
268 
269     ts->signal_mask = *set;
270 }
271 #endif
272 
273 /* sigaltstack management */
274 
275 int on_sig_stack(unsigned long sp)
276 {
277     TaskState *ts = (TaskState *)thread_cpu->opaque;
278 
279     return (sp - ts->sigaltstack_used.ss_sp
280             < ts->sigaltstack_used.ss_size);
281 }
282 
283 int sas_ss_flags(unsigned long sp)
284 {
285     TaskState *ts = (TaskState *)thread_cpu->opaque;
286 
287     return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
288             : on_sig_stack(sp) ? SS_ONSTACK : 0);
289 }
290 
291 abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
292 {
293     /*
294      * This is the X/Open sanctioned signal stack switching.
295      */
296     TaskState *ts = (TaskState *)thread_cpu->opaque;
297 
298     if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
299         return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
300     }
301     return sp;
302 }
303 
304 void target_save_altstack(target_stack_t *uss, CPUArchState *env)
305 {
306     TaskState *ts = (TaskState *)thread_cpu->opaque;
307 
308     __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
309     __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
310     __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
311 }
312 
313 abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
314 {
315     TaskState *ts = (TaskState *)thread_cpu->opaque;
316     size_t minstacksize = TARGET_MINSIGSTKSZ;
317     target_stack_t ss;
318 
319 #if defined(TARGET_PPC64)
320     /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
321     struct image_info *image = ts->info;
322     if (get_ppc64_abi(image) > 1) {
323         minstacksize = 4096;
324     }
325 #endif
326 
327     __get_user(ss.ss_sp, &uss->ss_sp);
328     __get_user(ss.ss_size, &uss->ss_size);
329     __get_user(ss.ss_flags, &uss->ss_flags);
330 
331     if (on_sig_stack(get_sp_from_cpustate(env))) {
332         return -TARGET_EPERM;
333     }
334 
335     switch (ss.ss_flags) {
336     default:
337         return -TARGET_EINVAL;
338 
339     case TARGET_SS_DISABLE:
340         ss.ss_size = 0;
341         ss.ss_sp = 0;
342         break;
343 
344     case TARGET_SS_ONSTACK:
345     case 0:
346         if (ss.ss_size < minstacksize) {
347             return -TARGET_ENOMEM;
348         }
349         break;
350     }
351 
352     ts->sigaltstack_used.ss_sp = ss.ss_sp;
353     ts->sigaltstack_used.ss_size = ss.ss_size;
354     return 0;
355 }
356 
357 /* siginfo conversion */
358 
359 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
360                                                  const siginfo_t *info)
361 {
362     int sig = host_to_target_signal(info->si_signo);
363     int si_code = info->si_code;
364     int si_type;
365     tinfo->si_signo = sig;
366     tinfo->si_errno = 0;
367     tinfo->si_code = info->si_code;
368 
369     /* This memset serves two purposes:
370      * (1) ensure we don't leak random junk to the guest later
371      * (2) placate false positives from gcc about fields
372      *     being used uninitialized if it chooses to inline both this
373      *     function and tswap_siginfo() into host_to_target_siginfo().
374      */
375     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
376 
377     /* This is awkward, because we have to use a combination of
378      * the si_code and si_signo to figure out which of the union's
379      * members are valid. (Within the host kernel it is always possible
380      * to tell, but the kernel carefully avoids giving userspace the
381      * high 16 bits of si_code, so we don't have the information to
382      * do this the easy way...) We therefore make our best guess,
383      * bearing in mind that a guest can spoof most of the si_codes
384      * via rt_sigqueueinfo() if it likes.
385      *
386      * Once we have made our guess, we record it in the top 16 bits of
387      * the si_code, so that tswap_siginfo() later can use it.
388      * tswap_siginfo() will strip these top bits out before writing
389      * si_code to the guest (sign-extending the lower bits).
390      */
391 
392     switch (si_code) {
393     case SI_USER:
394     case SI_TKILL:
395     case SI_KERNEL:
396         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
397          * These are the only unspoofable si_code values.
398          */
399         tinfo->_sifields._kill._pid = info->si_pid;
400         tinfo->_sifields._kill._uid = info->si_uid;
401         si_type = QEMU_SI_KILL;
402         break;
403     default:
404         /* Everything else is spoofable. Make best guess based on signal */
405         switch (sig) {
406         case TARGET_SIGCHLD:
407             tinfo->_sifields._sigchld._pid = info->si_pid;
408             tinfo->_sifields._sigchld._uid = info->si_uid;
409             tinfo->_sifields._sigchld._status = info->si_status;
410             tinfo->_sifields._sigchld._utime = info->si_utime;
411             tinfo->_sifields._sigchld._stime = info->si_stime;
412             si_type = QEMU_SI_CHLD;
413             break;
414         case TARGET_SIGIO:
415             tinfo->_sifields._sigpoll._band = info->si_band;
416             tinfo->_sifields._sigpoll._fd = info->si_fd;
417             si_type = QEMU_SI_POLL;
418             break;
419         default:
420             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
421             tinfo->_sifields._rt._pid = info->si_pid;
422             tinfo->_sifields._rt._uid = info->si_uid;
423             /* XXX: potential problem if 64 bit */
424             tinfo->_sifields._rt._sigval.sival_ptr
425                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
426             si_type = QEMU_SI_RT;
427             break;
428         }
429         break;
430     }
431 
432     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
433 }
434 
435 void tswap_siginfo(target_siginfo_t *tinfo,
436                    const target_siginfo_t *info)
437 {
438     int si_type = extract32(info->si_code, 16, 16);
439     int si_code = sextract32(info->si_code, 0, 16);
440 
441     __put_user(info->si_signo, &tinfo->si_signo);
442     __put_user(info->si_errno, &tinfo->si_errno);
443     __put_user(si_code, &tinfo->si_code);
444 
445     /* We can use our internal marker of which fields in the structure
446      * are valid, rather than duplicating the guesswork of
447      * host_to_target_siginfo_noswap() here.
448      */
449     switch (si_type) {
450     case QEMU_SI_KILL:
451         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
452         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
453         break;
454     case QEMU_SI_TIMER:
455         __put_user(info->_sifields._timer._timer1,
456                    &tinfo->_sifields._timer._timer1);
457         __put_user(info->_sifields._timer._timer2,
458                    &tinfo->_sifields._timer._timer2);
459         break;
460     case QEMU_SI_POLL:
461         __put_user(info->_sifields._sigpoll._band,
462                    &tinfo->_sifields._sigpoll._band);
463         __put_user(info->_sifields._sigpoll._fd,
464                    &tinfo->_sifields._sigpoll._fd);
465         break;
466     case QEMU_SI_FAULT:
467         __put_user(info->_sifields._sigfault._addr,
468                    &tinfo->_sifields._sigfault._addr);
469         break;
470     case QEMU_SI_CHLD:
471         __put_user(info->_sifields._sigchld._pid,
472                    &tinfo->_sifields._sigchld._pid);
473         __put_user(info->_sifields._sigchld._uid,
474                    &tinfo->_sifields._sigchld._uid);
475         __put_user(info->_sifields._sigchld._status,
476                    &tinfo->_sifields._sigchld._status);
477         __put_user(info->_sifields._sigchld._utime,
478                    &tinfo->_sifields._sigchld._utime);
479         __put_user(info->_sifields._sigchld._stime,
480                    &tinfo->_sifields._sigchld._stime);
481         break;
482     case QEMU_SI_RT:
483         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
484         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
485         __put_user(info->_sifields._rt._sigval.sival_ptr,
486                    &tinfo->_sifields._rt._sigval.sival_ptr);
487         break;
488     default:
489         g_assert_not_reached();
490     }
491 }
492 
493 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
494 {
495     target_siginfo_t tgt_tmp;
496     host_to_target_siginfo_noswap(&tgt_tmp, info);
497     tswap_siginfo(tinfo, &tgt_tmp);
498 }
499 
500 /* XXX: we support only POSIX RT signals are used. */
501 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
502 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
503 {
504     /* This conversion is used only for the rt_sigqueueinfo syscall,
505      * and so we know that the _rt fields are the valid ones.
506      */
507     abi_ulong sival_ptr;
508 
509     __get_user(info->si_signo, &tinfo->si_signo);
510     __get_user(info->si_errno, &tinfo->si_errno);
511     __get_user(info->si_code, &tinfo->si_code);
512     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
513     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
514     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
515     info->si_value.sival_ptr = (void *)(long)sival_ptr;
516 }
517 
518 static int fatal_signal (int sig)
519 {
520     switch (sig) {
521     case TARGET_SIGCHLD:
522     case TARGET_SIGURG:
523     case TARGET_SIGWINCH:
524         /* Ignored by default.  */
525         return 0;
526     case TARGET_SIGCONT:
527     case TARGET_SIGSTOP:
528     case TARGET_SIGTSTP:
529     case TARGET_SIGTTIN:
530     case TARGET_SIGTTOU:
531         /* Job control signals.  */
532         return 0;
533     default:
534         return 1;
535     }
536 }
537 
538 /* returns 1 if given signal should dump core if not handled */
539 static int core_dump_signal(int sig)
540 {
541     switch (sig) {
542     case TARGET_SIGABRT:
543     case TARGET_SIGFPE:
544     case TARGET_SIGILL:
545     case TARGET_SIGQUIT:
546     case TARGET_SIGSEGV:
547     case TARGET_SIGTRAP:
548     case TARGET_SIGBUS:
549         return (1);
550     default:
551         return (0);
552     }
553 }
554 
555 static void signal_table_init(void)
556 {
557     int host_sig, target_sig, count;
558 
559     /*
560      * Signals are supported starting from TARGET_SIGRTMIN and going up
561      * until we run out of host realtime signals.
562      * glibc at least uses only the lower 2 rt signals and probably
563      * nobody's using the upper ones.
564      * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
565      * To fix this properly we need to do manual signal delivery multiplexed
566      * over a single host signal.
567      * Attempts for configure "missing" signals via sigaction will be
568      * silently ignored.
569      */
570     for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
571         target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
572         if (target_sig <= TARGET_NSIG) {
573             host_to_target_signal_table[host_sig] = target_sig;
574         }
575     }
576 
577     /* generate signal conversion tables */
578     for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
579         target_to_host_signal_table[target_sig] = _NSIG; /* poison */
580     }
581     for (host_sig = 1; host_sig < _NSIG; host_sig++) {
582         if (host_to_target_signal_table[host_sig] == 0) {
583             host_to_target_signal_table[host_sig] = host_sig;
584         }
585         target_sig = host_to_target_signal_table[host_sig];
586         if (target_sig <= TARGET_NSIG) {
587             target_to_host_signal_table[target_sig] = host_sig;
588         }
589     }
590 
591     if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
592         for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
593             if (target_to_host_signal_table[target_sig] == _NSIG) {
594                 count++;
595             }
596         }
597         trace_signal_table_init(count);
598     }
599 }
600 
601 void signal_init(void)
602 {
603     TaskState *ts = (TaskState *)thread_cpu->opaque;
604     struct sigaction act;
605     struct sigaction oact;
606     int i;
607     int host_sig;
608 
609     /* initialize signal conversion tables */
610     signal_table_init();
611 
612     /* Set the signal mask from the host mask. */
613     sigprocmask(0, 0, &ts->signal_mask);
614 
615     sigfillset(&act.sa_mask);
616     act.sa_flags = SA_SIGINFO;
617     act.sa_sigaction = host_signal_handler;
618     for(i = 1; i <= TARGET_NSIG; i++) {
619 #ifdef CONFIG_GPROF
620         if (i == TARGET_SIGPROF) {
621             continue;
622         }
623 #endif
624         host_sig = target_to_host_signal(i);
625         sigaction(host_sig, NULL, &oact);
626         if (oact.sa_sigaction == (void *)SIG_IGN) {
627             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
628         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
629             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
630         }
631         /* If there's already a handler installed then something has
632            gone horribly wrong, so don't even try to handle that case.  */
633         /* Install some handlers for our own use.  We need at least
634            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
635            trap all signals because it affects syscall interrupt
636            behavior.  But do trap all default-fatal signals.  */
637         if (fatal_signal (i))
638             sigaction(host_sig, &act, NULL);
639     }
640 }
641 
642 /* Force a synchronously taken signal. The kernel force_sig() function
643  * also forces the signal to "not blocked, not ignored", but for QEMU
644  * that work is done in process_pending_signals().
645  */
646 void force_sig(int sig)
647 {
648     CPUState *cpu = thread_cpu;
649     CPUArchState *env = cpu->env_ptr;
650     target_siginfo_t info = {};
651 
652     info.si_signo = sig;
653     info.si_errno = 0;
654     info.si_code = TARGET_SI_KERNEL;
655     info._sifields._kill._pid = 0;
656     info._sifields._kill._uid = 0;
657     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
658 }
659 
660 /*
661  * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
662  * 'force' part is handled in process_pending_signals().
663  */
664 void force_sig_fault(int sig, int code, abi_ulong addr)
665 {
666     CPUState *cpu = thread_cpu;
667     CPUArchState *env = cpu->env_ptr;
668     target_siginfo_t info = {};
669 
670     info.si_signo = sig;
671     info.si_errno = 0;
672     info.si_code = code;
673     info._sifields._sigfault._addr = addr;
674     queue_signal(env, sig, QEMU_SI_FAULT, &info);
675 }
676 
677 /* Force a SIGSEGV if we couldn't write to memory trying to set
678  * up the signal frame. oldsig is the signal we were trying to handle
679  * at the point of failure.
680  */
681 #if !defined(TARGET_RISCV)
682 void force_sigsegv(int oldsig)
683 {
684     if (oldsig == SIGSEGV) {
685         /* Make sure we don't try to deliver the signal again; this will
686          * end up with handle_pending_signal() calling dump_core_and_abort().
687          */
688         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
689     }
690     force_sig(TARGET_SIGSEGV);
691 }
692 #endif
693 
694 void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
695                            MMUAccessType access_type, bool maperr, uintptr_t ra)
696 {
697     const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
698 
699     if (tcg_ops->record_sigsegv) {
700         tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
701     }
702 
703     force_sig_fault(TARGET_SIGSEGV,
704                     maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
705                     addr);
706     cpu->exception_index = EXCP_INTERRUPT;
707     cpu_loop_exit_restore(cpu, ra);
708 }
709 
710 void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
711                           MMUAccessType access_type, uintptr_t ra)
712 {
713     const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
714 
715     if (tcg_ops->record_sigbus) {
716         tcg_ops->record_sigbus(cpu, addr, access_type, ra);
717     }
718 
719     force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
720     cpu->exception_index = EXCP_INTERRUPT;
721     cpu_loop_exit_restore(cpu, ra);
722 }
723 
724 /* abort execution with signal */
725 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
726 {
727     CPUState *cpu = thread_cpu;
728     CPUArchState *env = cpu->env_ptr;
729     TaskState *ts = (TaskState *)cpu->opaque;
730     int host_sig, core_dumped = 0;
731     struct sigaction act;
732 
733     host_sig = target_to_host_signal(target_sig);
734     trace_user_force_sig(env, target_sig, host_sig);
735     gdb_signalled(env, target_sig);
736 
737     /* dump core if supported by target binary format */
738     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
739         stop_all_tasks();
740         core_dumped =
741             ((*ts->bprm->core_dump)(target_sig, env) == 0);
742     }
743     if (core_dumped) {
744         /* we already dumped the core of target process, we don't want
745          * a coredump of qemu itself */
746         struct rlimit nodump;
747         getrlimit(RLIMIT_CORE, &nodump);
748         nodump.rlim_cur=0;
749         setrlimit(RLIMIT_CORE, &nodump);
750         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
751             target_sig, strsignal(host_sig), "core dumped" );
752     }
753 
754     /* The proper exit code for dying from an uncaught signal is
755      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
756      * a negative value.  To get the proper exit code we need to
757      * actually die from an uncaught signal.  Here the default signal
758      * handler is installed, we send ourself a signal and we wait for
759      * it to arrive. */
760     sigfillset(&act.sa_mask);
761     act.sa_handler = SIG_DFL;
762     act.sa_flags = 0;
763     sigaction(host_sig, &act, NULL);
764 
765     /* For some reason raise(host_sig) doesn't send the signal when
766      * statically linked on x86-64. */
767     kill(getpid(), host_sig);
768 
769     /* Make sure the signal isn't masked (just reuse the mask inside
770     of act) */
771     sigdelset(&act.sa_mask, host_sig);
772     sigsuspend(&act.sa_mask);
773 
774     /* unreachable */
775     abort();
776 }
777 
778 /* queue a signal so that it will be send to the virtual CPU as soon
779    as possible */
780 int queue_signal(CPUArchState *env, int sig, int si_type,
781                  target_siginfo_t *info)
782 {
783     CPUState *cpu = env_cpu(env);
784     TaskState *ts = cpu->opaque;
785 
786     trace_user_queue_signal(env, sig);
787 
788     info->si_code = deposit32(info->si_code, 16, 16, si_type);
789 
790     ts->sync_signal.info = *info;
791     ts->sync_signal.pending = sig;
792     /* signal that a new signal is pending */
793     qatomic_set(&ts->signal_pending, 1);
794     return 1; /* indicates that the signal was queued */
795 }
796 
797 
798 /* Adjust the signal context to rewind out of safe-syscall if we're in it */
799 static inline void rewind_if_in_safe_syscall(void *puc)
800 {
801     ucontext_t *uc = (ucontext_t *)puc;
802     uintptr_t pcreg = host_signal_pc(uc);
803 
804     if (pcreg > (uintptr_t)safe_syscall_start
805         && pcreg < (uintptr_t)safe_syscall_end) {
806         host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
807     }
808 }
809 
810 static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
811 {
812     CPUArchState *env = thread_cpu->env_ptr;
813     CPUState *cpu = env_cpu(env);
814     TaskState *ts = cpu->opaque;
815     target_siginfo_t tinfo;
816     ucontext_t *uc = puc;
817     struct emulated_sigtable *k;
818     int guest_sig;
819     uintptr_t pc = 0;
820     bool sync_sig = false;
821 
822     /*
823      * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
824      * handling wrt signal blocking and unwinding.
825      */
826     if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
827         MMUAccessType access_type;
828         uintptr_t host_addr;
829         abi_ptr guest_addr;
830         bool is_write;
831 
832         host_addr = (uintptr_t)info->si_addr;
833 
834         /*
835          * Convert forcefully to guest address space: addresses outside
836          * reserved_va are still valid to report via SEGV_MAPERR.
837          */
838         guest_addr = h2g_nocheck(host_addr);
839 
840         pc = host_signal_pc(uc);
841         is_write = host_signal_write(info, uc);
842         access_type = adjust_signal_pc(&pc, is_write);
843 
844         if (host_sig == SIGSEGV) {
845             bool maperr = true;
846 
847             if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
848                 /* If this was a write to a TB protected page, restart. */
849                 if (is_write &&
850                     handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
851                                                 pc, guest_addr)) {
852                     return;
853                 }
854 
855                 /*
856                  * With reserved_va, the whole address space is PROT_NONE,
857                  * which means that we may get ACCERR when we want MAPERR.
858                  */
859                 if (page_get_flags(guest_addr) & PAGE_VALID) {
860                     maperr = false;
861                 } else {
862                     info->si_code = SEGV_MAPERR;
863                 }
864             }
865 
866             sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
867             cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
868         } else {
869             sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
870             if (info->si_code == BUS_ADRALN) {
871                 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
872             }
873         }
874 
875         sync_sig = true;
876     }
877 
878     /* get target signal number */
879     guest_sig = host_to_target_signal(host_sig);
880     if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
881         return;
882     }
883     trace_user_host_signal(env, host_sig, guest_sig);
884 
885     host_to_target_siginfo_noswap(&tinfo, info);
886     k = &ts->sigtab[guest_sig - 1];
887     k->info = tinfo;
888     k->pending = guest_sig;
889     ts->signal_pending = 1;
890 
891     /*
892      * For synchronous signals, unwind the cpu state to the faulting
893      * insn and then exit back to the main loop so that the signal
894      * is delivered immediately.
895      */
896     if (sync_sig) {
897         cpu->exception_index = EXCP_INTERRUPT;
898         cpu_loop_exit_restore(cpu, pc);
899     }
900 
901     rewind_if_in_safe_syscall(puc);
902 
903     /*
904      * Block host signals until target signal handler entered. We
905      * can't block SIGSEGV or SIGBUS while we're executing guest
906      * code in case the guest code provokes one in the window between
907      * now and it getting out to the main loop. Signals will be
908      * unblocked again in process_pending_signals().
909      *
910      * WARNING: we cannot use sigfillset() here because the uc_sigmask
911      * field is a kernel sigset_t, which is much smaller than the
912      * libc sigset_t which sigfillset() operates on. Using sigfillset()
913      * would write 0xff bytes off the end of the structure and trash
914      * data on the struct.
915      * We can't use sizeof(uc->uc_sigmask) either, because the libc
916      * headers define the struct field with the wrong (too large) type.
917      */
918     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
919     sigdelset(&uc->uc_sigmask, SIGSEGV);
920     sigdelset(&uc->uc_sigmask, SIGBUS);
921 
922     /* interrupt the virtual CPU as soon as possible */
923     cpu_exit(thread_cpu);
924 }
925 
926 /* do_sigaltstack() returns target values and errnos. */
927 /* compare linux/kernel/signal.c:do_sigaltstack() */
928 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
929                         CPUArchState *env)
930 {
931     target_stack_t oss, *uoss = NULL;
932     abi_long ret = -TARGET_EFAULT;
933 
934     if (uoss_addr) {
935         /* Verify writability now, but do not alter user memory yet. */
936         if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
937             goto out;
938         }
939         target_save_altstack(&oss, env);
940     }
941 
942     if (uss_addr) {
943         target_stack_t *uss;
944 
945         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
946             goto out;
947         }
948         ret = target_restore_altstack(uss, env);
949         if (ret) {
950             goto out;
951         }
952     }
953 
954     if (uoss_addr) {
955         memcpy(uoss, &oss, sizeof(oss));
956         unlock_user_struct(uoss, uoss_addr, 1);
957         uoss = NULL;
958     }
959     ret = 0;
960 
961  out:
962     if (uoss) {
963         unlock_user_struct(uoss, uoss_addr, 0);
964     }
965     return ret;
966 }
967 
968 /* do_sigaction() return target values and host errnos */
969 int do_sigaction(int sig, const struct target_sigaction *act,
970                  struct target_sigaction *oact, abi_ulong ka_restorer)
971 {
972     struct target_sigaction *k;
973     struct sigaction act1;
974     int host_sig;
975     int ret = 0;
976 
977     trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
978 
979     if (sig < 1 || sig > TARGET_NSIG) {
980         return -TARGET_EINVAL;
981     }
982 
983     if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
984         return -TARGET_EINVAL;
985     }
986 
987     if (block_signals()) {
988         return -QEMU_ERESTARTSYS;
989     }
990 
991     k = &sigact_table[sig - 1];
992     if (oact) {
993         __put_user(k->_sa_handler, &oact->_sa_handler);
994         __put_user(k->sa_flags, &oact->sa_flags);
995 #ifdef TARGET_ARCH_HAS_SA_RESTORER
996         __put_user(k->sa_restorer, &oact->sa_restorer);
997 #endif
998         /* Not swapped.  */
999         oact->sa_mask = k->sa_mask;
1000     }
1001     if (act) {
1002         /* FIXME: This is not threadsafe.  */
1003         __get_user(k->_sa_handler, &act->_sa_handler);
1004         __get_user(k->sa_flags, &act->sa_flags);
1005 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1006         __get_user(k->sa_restorer, &act->sa_restorer);
1007 #endif
1008 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1009         k->ka_restorer = ka_restorer;
1010 #endif
1011         /* To be swapped in target_to_host_sigset.  */
1012         k->sa_mask = act->sa_mask;
1013 
1014         /* we update the host linux signal state */
1015         host_sig = target_to_host_signal(sig);
1016         trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1017         if (host_sig > SIGRTMAX) {
1018             /* we don't have enough host signals to map all target signals */
1019             qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1020                           sig);
1021             /*
1022              * we don't return an error here because some programs try to
1023              * register an handler for all possible rt signals even if they
1024              * don't need it.
1025              * An error here can abort them whereas there can be no problem
1026              * to not have the signal available later.
1027              * This is the case for golang,
1028              *   See https://github.com/golang/go/issues/33746
1029              * So we silently ignore the error.
1030              */
1031             return 0;
1032         }
1033         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1034             sigfillset(&act1.sa_mask);
1035             act1.sa_flags = SA_SIGINFO;
1036             if (k->sa_flags & TARGET_SA_RESTART)
1037                 act1.sa_flags |= SA_RESTART;
1038             /* NOTE: it is important to update the host kernel signal
1039                ignore state to avoid getting unexpected interrupted
1040                syscalls */
1041             if (k->_sa_handler == TARGET_SIG_IGN) {
1042                 act1.sa_sigaction = (void *)SIG_IGN;
1043             } else if (k->_sa_handler == TARGET_SIG_DFL) {
1044                 if (fatal_signal (sig))
1045                     act1.sa_sigaction = host_signal_handler;
1046                 else
1047                     act1.sa_sigaction = (void *)SIG_DFL;
1048             } else {
1049                 act1.sa_sigaction = host_signal_handler;
1050             }
1051             ret = sigaction(host_sig, &act1, NULL);
1052         }
1053     }
1054     return ret;
1055 }
1056 
1057 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1058                                   struct emulated_sigtable *k)
1059 {
1060     CPUState *cpu = env_cpu(cpu_env);
1061     abi_ulong handler;
1062     sigset_t set;
1063     target_sigset_t target_old_set;
1064     struct target_sigaction *sa;
1065     TaskState *ts = cpu->opaque;
1066 
1067     trace_user_handle_signal(cpu_env, sig);
1068     /* dequeue signal */
1069     k->pending = 0;
1070 
1071     sig = gdb_handlesig(cpu, sig);
1072     if (!sig) {
1073         sa = NULL;
1074         handler = TARGET_SIG_IGN;
1075     } else {
1076         sa = &sigact_table[sig - 1];
1077         handler = sa->_sa_handler;
1078     }
1079 
1080     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1081         print_taken_signal(sig, &k->info);
1082     }
1083 
1084     if (handler == TARGET_SIG_DFL) {
1085         /* default handler : ignore some signal. The other are job control or fatal */
1086         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1087             kill(getpid(),SIGSTOP);
1088         } else if (sig != TARGET_SIGCHLD &&
1089                    sig != TARGET_SIGURG &&
1090                    sig != TARGET_SIGWINCH &&
1091                    sig != TARGET_SIGCONT) {
1092             dump_core_and_abort(sig);
1093         }
1094     } else if (handler == TARGET_SIG_IGN) {
1095         /* ignore sig */
1096     } else if (handler == TARGET_SIG_ERR) {
1097         dump_core_and_abort(sig);
1098     } else {
1099         /* compute the blocked signals during the handler execution */
1100         sigset_t *blocked_set;
1101 
1102         target_to_host_sigset(&set, &sa->sa_mask);
1103         /* SA_NODEFER indicates that the current signal should not be
1104            blocked during the handler */
1105         if (!(sa->sa_flags & TARGET_SA_NODEFER))
1106             sigaddset(&set, target_to_host_signal(sig));
1107 
1108         /* save the previous blocked signal state to restore it at the
1109            end of the signal execution (see do_sigreturn) */
1110         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1111 
1112         /* block signals in the handler */
1113         blocked_set = ts->in_sigsuspend ?
1114             &ts->sigsuspend_mask : &ts->signal_mask;
1115         sigorset(&ts->signal_mask, blocked_set, &set);
1116         ts->in_sigsuspend = 0;
1117 
1118         /* if the CPU is in VM86 mode, we restore the 32 bit values */
1119 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1120         {
1121             CPUX86State *env = cpu_env;
1122             if (env->eflags & VM_MASK)
1123                 save_v86_state(env);
1124         }
1125 #endif
1126         /* prepare the stack frame of the virtual CPU */
1127 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1128         if (sa->sa_flags & TARGET_SA_SIGINFO) {
1129             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1130         } else {
1131             setup_frame(sig, sa, &target_old_set, cpu_env);
1132         }
1133 #else
1134         /* These targets do not have traditional signals.  */
1135         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1136 #endif
1137         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1138             sa->_sa_handler = TARGET_SIG_DFL;
1139         }
1140     }
1141 }
1142 
1143 void process_pending_signals(CPUArchState *cpu_env)
1144 {
1145     CPUState *cpu = env_cpu(cpu_env);
1146     int sig;
1147     TaskState *ts = cpu->opaque;
1148     sigset_t set;
1149     sigset_t *blocked_set;
1150 
1151     while (qatomic_read(&ts->signal_pending)) {
1152         /* FIXME: This is not threadsafe.  */
1153         sigfillset(&set);
1154         sigprocmask(SIG_SETMASK, &set, 0);
1155 
1156     restart_scan:
1157         sig = ts->sync_signal.pending;
1158         if (sig) {
1159             /* Synchronous signals are forced,
1160              * see force_sig_info() and callers in Linux
1161              * Note that not all of our queue_signal() calls in QEMU correspond
1162              * to force_sig_info() calls in Linux (some are send_sig_info()).
1163              * However it seems like a kernel bug to me to allow the process
1164              * to block a synchronous signal since it could then just end up
1165              * looping round and round indefinitely.
1166              */
1167             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1168                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1169                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1170                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1171             }
1172 
1173             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1174         }
1175 
1176         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1177             blocked_set = ts->in_sigsuspend ?
1178                 &ts->sigsuspend_mask : &ts->signal_mask;
1179 
1180             if (ts->sigtab[sig - 1].pending &&
1181                 (!sigismember(blocked_set,
1182                               target_to_host_signal_table[sig]))) {
1183                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1184                 /* Restart scan from the beginning, as handle_pending_signal
1185                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1186                  */
1187                 goto restart_scan;
1188             }
1189         }
1190 
1191         /* if no signal is pending, unblock signals and recheck (the act
1192          * of unblocking might cause us to take another host signal which
1193          * will set signal_pending again).
1194          */
1195         qatomic_set(&ts->signal_pending, 0);
1196         ts->in_sigsuspend = 0;
1197         set = ts->signal_mask;
1198         sigdelset(&set, SIGSEGV);
1199         sigdelset(&set, SIGBUS);
1200         sigprocmask(SIG_SETMASK, &set, 0);
1201     }
1202     ts->in_sigsuspend = 0;
1203 }
1204