xref: /openbmc/qemu/linux-user/signal.c (revision 9f172adb35123a093aec8feb74de0e126ae2138e)
1 /*
2  *  Emulation of Linux signals
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23 
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28 #include "signal-common.h"
29 
30 struct target_sigaltstack target_sigaltstack_used = {
31     .ss_sp = 0,
32     .ss_size = 0,
33     .ss_flags = TARGET_SS_DISABLE,
34 };
35 
36 static struct target_sigaction sigact_table[TARGET_NSIG];
37 
38 static void host_signal_handler(int host_signum, siginfo_t *info,
39                                 void *puc);
40 
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42     [SIGHUP] = TARGET_SIGHUP,
43     [SIGINT] = TARGET_SIGINT,
44     [SIGQUIT] = TARGET_SIGQUIT,
45     [SIGILL] = TARGET_SIGILL,
46     [SIGTRAP] = TARGET_SIGTRAP,
47     [SIGABRT] = TARGET_SIGABRT,
48 /*    [SIGIOT] = TARGET_SIGIOT,*/
49     [SIGBUS] = TARGET_SIGBUS,
50     [SIGFPE] = TARGET_SIGFPE,
51     [SIGKILL] = TARGET_SIGKILL,
52     [SIGUSR1] = TARGET_SIGUSR1,
53     [SIGSEGV] = TARGET_SIGSEGV,
54     [SIGUSR2] = TARGET_SIGUSR2,
55     [SIGPIPE] = TARGET_SIGPIPE,
56     [SIGALRM] = TARGET_SIGALRM,
57     [SIGTERM] = TARGET_SIGTERM,
58 #ifdef SIGSTKFLT
59     [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 #endif
61     [SIGCHLD] = TARGET_SIGCHLD,
62     [SIGCONT] = TARGET_SIGCONT,
63     [SIGSTOP] = TARGET_SIGSTOP,
64     [SIGTSTP] = TARGET_SIGTSTP,
65     [SIGTTIN] = TARGET_SIGTTIN,
66     [SIGTTOU] = TARGET_SIGTTOU,
67     [SIGURG] = TARGET_SIGURG,
68     [SIGXCPU] = TARGET_SIGXCPU,
69     [SIGXFSZ] = TARGET_SIGXFSZ,
70     [SIGVTALRM] = TARGET_SIGVTALRM,
71     [SIGPROF] = TARGET_SIGPROF,
72     [SIGWINCH] = TARGET_SIGWINCH,
73     [SIGIO] = TARGET_SIGIO,
74     [SIGPWR] = TARGET_SIGPWR,
75     [SIGSYS] = TARGET_SIGSYS,
76     /* next signals stay the same */
77     /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78        host libpthread signals.  This assumes no one actually uses SIGRTMAX :-/
79        To fix this properly we need to do manual signal delivery multiplexed
80        over a single host signal.  */
81     [__SIGRTMIN] = __SIGRTMAX,
82     [__SIGRTMAX] = __SIGRTMIN,
83 };
84 static uint8_t target_to_host_signal_table[_NSIG];
85 
86 int host_to_target_signal(int sig)
87 {
88     if (sig < 0 || sig >= _NSIG)
89         return sig;
90     return host_to_target_signal_table[sig];
91 }
92 
93 int target_to_host_signal(int sig)
94 {
95     if (sig < 0 || sig >= _NSIG)
96         return sig;
97     return target_to_host_signal_table[sig];
98 }
99 
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
101 {
102     signum--;
103     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104     set->sig[signum / TARGET_NSIG_BPW] |= mask;
105 }
106 
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
108 {
109     signum--;
110     abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111     return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112 }
113 
114 void host_to_target_sigset_internal(target_sigset_t *d,
115                                     const sigset_t *s)
116 {
117     int i;
118     target_sigemptyset(d);
119     for (i = 1; i <= TARGET_NSIG; i++) {
120         if (sigismember(s, i)) {
121             target_sigaddset(d, host_to_target_signal(i));
122         }
123     }
124 }
125 
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
127 {
128     target_sigset_t d1;
129     int i;
130 
131     host_to_target_sigset_internal(&d1, s);
132     for(i = 0;i < TARGET_NSIG_WORDS; i++)
133         d->sig[i] = tswapal(d1.sig[i]);
134 }
135 
136 void target_to_host_sigset_internal(sigset_t *d,
137                                     const target_sigset_t *s)
138 {
139     int i;
140     sigemptyset(d);
141     for (i = 1; i <= TARGET_NSIG; i++) {
142         if (target_sigismember(s, i)) {
143             sigaddset(d, target_to_host_signal(i));
144         }
145     }
146 }
147 
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
149 {
150     target_sigset_t s1;
151     int i;
152 
153     for(i = 0;i < TARGET_NSIG_WORDS; i++)
154         s1.sig[i] = tswapal(s->sig[i]);
155     target_to_host_sigset_internal(d, &s1);
156 }
157 
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159                                const sigset_t *sigset)
160 {
161     target_sigset_t d;
162     host_to_target_sigset(&d, sigset);
163     *old_sigset = d.sig[0];
164 }
165 
166 void target_to_host_old_sigset(sigset_t *sigset,
167                                const abi_ulong *old_sigset)
168 {
169     target_sigset_t d;
170     int i;
171 
172     d.sig[0] = *old_sigset;
173     for(i = 1;i < TARGET_NSIG_WORDS; i++)
174         d.sig[i] = 0;
175     target_to_host_sigset(sigset, &d);
176 }
177 
178 int block_signals(void)
179 {
180     TaskState *ts = (TaskState *)thread_cpu->opaque;
181     sigset_t set;
182 
183     /* It's OK to block everything including SIGSEGV, because we won't
184      * run any further guest code before unblocking signals in
185      * process_pending_signals().
186      */
187     sigfillset(&set);
188     sigprocmask(SIG_SETMASK, &set, 0);
189 
190     return atomic_xchg(&ts->signal_pending, 1);
191 }
192 
193 /* Wrapper for sigprocmask function
194  * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195  * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196  * a signal was already pending and the syscall must be restarted, or
197  * 0 on success.
198  * If set is NULL, this is guaranteed not to fail.
199  */
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
201 {
202     TaskState *ts = (TaskState *)thread_cpu->opaque;
203 
204     if (oldset) {
205         *oldset = ts->signal_mask;
206     }
207 
208     if (set) {
209         int i;
210 
211         if (block_signals()) {
212             return -TARGET_ERESTARTSYS;
213         }
214 
215         switch (how) {
216         case SIG_BLOCK:
217             sigorset(&ts->signal_mask, &ts->signal_mask, set);
218             break;
219         case SIG_UNBLOCK:
220             for (i = 1; i <= NSIG; ++i) {
221                 if (sigismember(set, i)) {
222                     sigdelset(&ts->signal_mask, i);
223                 }
224             }
225             break;
226         case SIG_SETMASK:
227             ts->signal_mask = *set;
228             break;
229         default:
230             g_assert_not_reached();
231         }
232 
233         /* Silently ignore attempts to change blocking status of KILL or STOP */
234         sigdelset(&ts->signal_mask, SIGKILL);
235         sigdelset(&ts->signal_mask, SIGSTOP);
236     }
237     return 0;
238 }
239 
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242  * caller is assumed to have called block_signals() already.
243  */
244 void set_sigmask(const sigset_t *set)
245 {
246     TaskState *ts = (TaskState *)thread_cpu->opaque;
247 
248     ts->signal_mask = *set;
249 }
250 #endif
251 
252 /* siginfo conversion */
253 
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
255                                                  const siginfo_t *info)
256 {
257     int sig = host_to_target_signal(info->si_signo);
258     int si_code = info->si_code;
259     int si_type;
260     tinfo->si_signo = sig;
261     tinfo->si_errno = 0;
262     tinfo->si_code = info->si_code;
263 
264     /* This memset serves two purposes:
265      * (1) ensure we don't leak random junk to the guest later
266      * (2) placate false positives from gcc about fields
267      *     being used uninitialized if it chooses to inline both this
268      *     function and tswap_siginfo() into host_to_target_siginfo().
269      */
270     memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
271 
272     /* This is awkward, because we have to use a combination of
273      * the si_code and si_signo to figure out which of the union's
274      * members are valid. (Within the host kernel it is always possible
275      * to tell, but the kernel carefully avoids giving userspace the
276      * high 16 bits of si_code, so we don't have the information to
277      * do this the easy way...) We therefore make our best guess,
278      * bearing in mind that a guest can spoof most of the si_codes
279      * via rt_sigqueueinfo() if it likes.
280      *
281      * Once we have made our guess, we record it in the top 16 bits of
282      * the si_code, so that tswap_siginfo() later can use it.
283      * tswap_siginfo() will strip these top bits out before writing
284      * si_code to the guest (sign-extending the lower bits).
285      */
286 
287     switch (si_code) {
288     case SI_USER:
289     case SI_TKILL:
290     case SI_KERNEL:
291         /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292          * These are the only unspoofable si_code values.
293          */
294         tinfo->_sifields._kill._pid = info->si_pid;
295         tinfo->_sifields._kill._uid = info->si_uid;
296         si_type = QEMU_SI_KILL;
297         break;
298     default:
299         /* Everything else is spoofable. Make best guess based on signal */
300         switch (sig) {
301         case TARGET_SIGCHLD:
302             tinfo->_sifields._sigchld._pid = info->si_pid;
303             tinfo->_sifields._sigchld._uid = info->si_uid;
304             tinfo->_sifields._sigchld._status
305                 = host_to_target_waitstatus(info->si_status);
306             tinfo->_sifields._sigchld._utime = info->si_utime;
307             tinfo->_sifields._sigchld._stime = info->si_stime;
308             si_type = QEMU_SI_CHLD;
309             break;
310         case TARGET_SIGIO:
311             tinfo->_sifields._sigpoll._band = info->si_band;
312             tinfo->_sifields._sigpoll._fd = info->si_fd;
313             si_type = QEMU_SI_POLL;
314             break;
315         default:
316             /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317             tinfo->_sifields._rt._pid = info->si_pid;
318             tinfo->_sifields._rt._uid = info->si_uid;
319             /* XXX: potential problem if 64 bit */
320             tinfo->_sifields._rt._sigval.sival_ptr
321                 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
322             si_type = QEMU_SI_RT;
323             break;
324         }
325         break;
326     }
327 
328     tinfo->si_code = deposit32(si_code, 16, 16, si_type);
329 }
330 
331 void tswap_siginfo(target_siginfo_t *tinfo,
332                    const target_siginfo_t *info)
333 {
334     int si_type = extract32(info->si_code, 16, 16);
335     int si_code = sextract32(info->si_code, 0, 16);
336 
337     __put_user(info->si_signo, &tinfo->si_signo);
338     __put_user(info->si_errno, &tinfo->si_errno);
339     __put_user(si_code, &tinfo->si_code);
340 
341     /* We can use our internal marker of which fields in the structure
342      * are valid, rather than duplicating the guesswork of
343      * host_to_target_siginfo_noswap() here.
344      */
345     switch (si_type) {
346     case QEMU_SI_KILL:
347         __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
348         __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
349         break;
350     case QEMU_SI_TIMER:
351         __put_user(info->_sifields._timer._timer1,
352                    &tinfo->_sifields._timer._timer1);
353         __put_user(info->_sifields._timer._timer2,
354                    &tinfo->_sifields._timer._timer2);
355         break;
356     case QEMU_SI_POLL:
357         __put_user(info->_sifields._sigpoll._band,
358                    &tinfo->_sifields._sigpoll._band);
359         __put_user(info->_sifields._sigpoll._fd,
360                    &tinfo->_sifields._sigpoll._fd);
361         break;
362     case QEMU_SI_FAULT:
363         __put_user(info->_sifields._sigfault._addr,
364                    &tinfo->_sifields._sigfault._addr);
365         break;
366     case QEMU_SI_CHLD:
367         __put_user(info->_sifields._sigchld._pid,
368                    &tinfo->_sifields._sigchld._pid);
369         __put_user(info->_sifields._sigchld._uid,
370                    &tinfo->_sifields._sigchld._uid);
371         __put_user(info->_sifields._sigchld._status,
372                    &tinfo->_sifields._sigchld._status);
373         __put_user(info->_sifields._sigchld._utime,
374                    &tinfo->_sifields._sigchld._utime);
375         __put_user(info->_sifields._sigchld._stime,
376                    &tinfo->_sifields._sigchld._stime);
377         break;
378     case QEMU_SI_RT:
379         __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
380         __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
381         __put_user(info->_sifields._rt._sigval.sival_ptr,
382                    &tinfo->_sifields._rt._sigval.sival_ptr);
383         break;
384     default:
385         g_assert_not_reached();
386     }
387 }
388 
389 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
390 {
391     target_siginfo_t tgt_tmp;
392     host_to_target_siginfo_noswap(&tgt_tmp, info);
393     tswap_siginfo(tinfo, &tgt_tmp);
394 }
395 
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
399 {
400     /* This conversion is used only for the rt_sigqueueinfo syscall,
401      * and so we know that the _rt fields are the valid ones.
402      */
403     abi_ulong sival_ptr;
404 
405     __get_user(info->si_signo, &tinfo->si_signo);
406     __get_user(info->si_errno, &tinfo->si_errno);
407     __get_user(info->si_code, &tinfo->si_code);
408     __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
409     __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
410     __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
411     info->si_value.sival_ptr = (void *)(long)sival_ptr;
412 }
413 
414 static int fatal_signal (int sig)
415 {
416     switch (sig) {
417     case TARGET_SIGCHLD:
418     case TARGET_SIGURG:
419     case TARGET_SIGWINCH:
420         /* Ignored by default.  */
421         return 0;
422     case TARGET_SIGCONT:
423     case TARGET_SIGSTOP:
424     case TARGET_SIGTSTP:
425     case TARGET_SIGTTIN:
426     case TARGET_SIGTTOU:
427         /* Job control signals.  */
428         return 0;
429     default:
430         return 1;
431     }
432 }
433 
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig)
436 {
437     switch (sig) {
438     case TARGET_SIGABRT:
439     case TARGET_SIGFPE:
440     case TARGET_SIGILL:
441     case TARGET_SIGQUIT:
442     case TARGET_SIGSEGV:
443     case TARGET_SIGTRAP:
444     case TARGET_SIGBUS:
445         return (1);
446     default:
447         return (0);
448     }
449 }
450 
451 void signal_init(void)
452 {
453     TaskState *ts = (TaskState *)thread_cpu->opaque;
454     struct sigaction act;
455     struct sigaction oact;
456     int i, j;
457     int host_sig;
458 
459     /* generate signal conversion tables */
460     for(i = 1; i < _NSIG; i++) {
461         if (host_to_target_signal_table[i] == 0)
462             host_to_target_signal_table[i] = i;
463     }
464     for(i = 1; i < _NSIG; i++) {
465         j = host_to_target_signal_table[i];
466         target_to_host_signal_table[j] = i;
467     }
468 
469     /* Set the signal mask from the host mask. */
470     sigprocmask(0, 0, &ts->signal_mask);
471 
472     /* set all host signal handlers. ALL signals are blocked during
473        the handlers to serialize them. */
474     memset(sigact_table, 0, sizeof(sigact_table));
475 
476     sigfillset(&act.sa_mask);
477     act.sa_flags = SA_SIGINFO;
478     act.sa_sigaction = host_signal_handler;
479     for(i = 1; i <= TARGET_NSIG; i++) {
480         host_sig = target_to_host_signal(i);
481         sigaction(host_sig, NULL, &oact);
482         if (oact.sa_sigaction == (void *)SIG_IGN) {
483             sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
484         } else if (oact.sa_sigaction == (void *)SIG_DFL) {
485             sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
486         }
487         /* If there's already a handler installed then something has
488            gone horribly wrong, so don't even try to handle that case.  */
489         /* Install some handlers for our own use.  We need at least
490            SIGSEGV and SIGBUS, to detect exceptions.  We can not just
491            trap all signals because it affects syscall interrupt
492            behavior.  But do trap all default-fatal signals.  */
493         if (fatal_signal (i))
494             sigaction(host_sig, &act, NULL);
495     }
496 }
497 
498 /* Force a synchronously taken signal. The kernel force_sig() function
499  * also forces the signal to "not blocked, not ignored", but for QEMU
500  * that work is done in process_pending_signals().
501  */
502 void force_sig(int sig)
503 {
504     CPUState *cpu = thread_cpu;
505     CPUArchState *env = cpu->env_ptr;
506     target_siginfo_t info;
507 
508     info.si_signo = sig;
509     info.si_errno = 0;
510     info.si_code = TARGET_SI_KERNEL;
511     info._sifields._kill._pid = 0;
512     info._sifields._kill._uid = 0;
513     queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
514 }
515 
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517  * up the signal frame. oldsig is the signal we were trying to handle
518  * at the point of failure.
519  */
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig)
522 {
523     if (oldsig == SIGSEGV) {
524         /* Make sure we don't try to deliver the signal again; this will
525          * end up with handle_pending_signal() calling dump_core_and_abort().
526          */
527         sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
528     }
529     force_sig(TARGET_SIGSEGV);
530 }
531 
532 #endif
533 
534 /* abort execution with signal */
535 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
536 {
537     CPUState *cpu = thread_cpu;
538     CPUArchState *env = cpu->env_ptr;
539     TaskState *ts = (TaskState *)cpu->opaque;
540     int host_sig, core_dumped = 0;
541     struct sigaction act;
542 
543     host_sig = target_to_host_signal(target_sig);
544     trace_user_force_sig(env, target_sig, host_sig);
545     gdb_signalled(env, target_sig);
546 
547     /* dump core if supported by target binary format */
548     if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
549         stop_all_tasks();
550         core_dumped =
551             ((*ts->bprm->core_dump)(target_sig, env) == 0);
552     }
553     if (core_dumped) {
554         /* we already dumped the core of target process, we don't want
555          * a coredump of qemu itself */
556         struct rlimit nodump;
557         getrlimit(RLIMIT_CORE, &nodump);
558         nodump.rlim_cur=0;
559         setrlimit(RLIMIT_CORE, &nodump);
560         (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
561             target_sig, strsignal(host_sig), "core dumped" );
562     }
563 
564     /* The proper exit code for dying from an uncaught signal is
565      * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
566      * a negative value.  To get the proper exit code we need to
567      * actually die from an uncaught signal.  Here the default signal
568      * handler is installed, we send ourself a signal and we wait for
569      * it to arrive. */
570     sigfillset(&act.sa_mask);
571     act.sa_handler = SIG_DFL;
572     act.sa_flags = 0;
573     sigaction(host_sig, &act, NULL);
574 
575     /* For some reason raise(host_sig) doesn't send the signal when
576      * statically linked on x86-64. */
577     kill(getpid(), host_sig);
578 
579     /* Make sure the signal isn't masked (just reuse the mask inside
580     of act) */
581     sigdelset(&act.sa_mask, host_sig);
582     sigsuspend(&act.sa_mask);
583 
584     /* unreachable */
585     abort();
586 }
587 
588 /* queue a signal so that it will be send to the virtual CPU as soon
589    as possible */
590 int queue_signal(CPUArchState *env, int sig, int si_type,
591                  target_siginfo_t *info)
592 {
593     CPUState *cpu = ENV_GET_CPU(env);
594     TaskState *ts = cpu->opaque;
595 
596     trace_user_queue_signal(env, sig);
597 
598     info->si_code = deposit32(info->si_code, 16, 16, si_type);
599 
600     ts->sync_signal.info = *info;
601     ts->sync_signal.pending = sig;
602     /* signal that a new signal is pending */
603     atomic_set(&ts->signal_pending, 1);
604     return 1; /* indicates that the signal was queued */
605 }
606 
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc)
609 {
610     /* Default version: never rewind */
611 }
612 #endif
613 
614 static void host_signal_handler(int host_signum, siginfo_t *info,
615                                 void *puc)
616 {
617     CPUArchState *env = thread_cpu->env_ptr;
618     CPUState *cpu = ENV_GET_CPU(env);
619     TaskState *ts = cpu->opaque;
620 
621     int sig;
622     target_siginfo_t tinfo;
623     ucontext_t *uc = puc;
624     struct emulated_sigtable *k;
625 
626     /* the CPU emulator uses some host signals to detect exceptions,
627        we forward to it some signals */
628     if ((host_signum == SIGSEGV || host_signum == SIGBUS)
629         && info->si_code > 0) {
630         if (cpu_signal_handler(host_signum, info, puc))
631             return;
632     }
633 
634     /* get target signal number */
635     sig = host_to_target_signal(host_signum);
636     if (sig < 1 || sig > TARGET_NSIG)
637         return;
638     trace_user_host_signal(env, host_signum, sig);
639 
640     rewind_if_in_safe_syscall(puc);
641 
642     host_to_target_siginfo_noswap(&tinfo, info);
643     k = &ts->sigtab[sig - 1];
644     k->info = tinfo;
645     k->pending = sig;
646     ts->signal_pending = 1;
647 
648     /* Block host signals until target signal handler entered. We
649      * can't block SIGSEGV or SIGBUS while we're executing guest
650      * code in case the guest code provokes one in the window between
651      * now and it getting out to the main loop. Signals will be
652      * unblocked again in process_pending_signals().
653      *
654      * WARNING: we cannot use sigfillset() here because the uc_sigmask
655      * field is a kernel sigset_t, which is much smaller than the
656      * libc sigset_t which sigfillset() operates on. Using sigfillset()
657      * would write 0xff bytes off the end of the structure and trash
658      * data on the struct.
659      * We can't use sizeof(uc->uc_sigmask) either, because the libc
660      * headers define the struct field with the wrong (too large) type.
661      */
662     memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
663     sigdelset(&uc->uc_sigmask, SIGSEGV);
664     sigdelset(&uc->uc_sigmask, SIGBUS);
665 
666     /* interrupt the virtual CPU as soon as possible */
667     cpu_exit(thread_cpu);
668 }
669 
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
673 {
674     int ret;
675     struct target_sigaltstack oss;
676 
677     /* XXX: test errors */
678     if(uoss_addr)
679     {
680         __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
681         __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
682         __put_user(sas_ss_flags(sp), &oss.ss_flags);
683     }
684 
685     if(uss_addr)
686     {
687         struct target_sigaltstack *uss;
688         struct target_sigaltstack ss;
689         size_t minstacksize = TARGET_MINSIGSTKSZ;
690 
691 #if defined(TARGET_PPC64)
692         /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693         struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
694         if (get_ppc64_abi(image) > 1) {
695             minstacksize = 4096;
696         }
697 #endif
698 
699 	ret = -TARGET_EFAULT;
700         if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
701             goto out;
702         }
703         __get_user(ss.ss_sp, &uss->ss_sp);
704         __get_user(ss.ss_size, &uss->ss_size);
705         __get_user(ss.ss_flags, &uss->ss_flags);
706         unlock_user_struct(uss, uss_addr, 0);
707 
708 	ret = -TARGET_EPERM;
709 	if (on_sig_stack(sp))
710             goto out;
711 
712 	ret = -TARGET_EINVAL;
713 	if (ss.ss_flags != TARGET_SS_DISABLE
714             && ss.ss_flags != TARGET_SS_ONSTACK
715             && ss.ss_flags != 0)
716             goto out;
717 
718 	if (ss.ss_flags == TARGET_SS_DISABLE) {
719             ss.ss_size = 0;
720             ss.ss_sp = 0;
721 	} else {
722             ret = -TARGET_ENOMEM;
723             if (ss.ss_size < minstacksize) {
724                 goto out;
725             }
726 	}
727 
728         target_sigaltstack_used.ss_sp = ss.ss_sp;
729         target_sigaltstack_used.ss_size = ss.ss_size;
730     }
731 
732     if (uoss_addr) {
733         ret = -TARGET_EFAULT;
734         if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
735             goto out;
736     }
737 
738     ret = 0;
739 out:
740     return ret;
741 }
742 
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig, const struct target_sigaction *act,
745                  struct target_sigaction *oact)
746 {
747     struct target_sigaction *k;
748     struct sigaction act1;
749     int host_sig;
750     int ret = 0;
751 
752     if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
753         return -TARGET_EINVAL;
754     }
755 
756     if (block_signals()) {
757         return -TARGET_ERESTARTSYS;
758     }
759 
760     k = &sigact_table[sig - 1];
761     if (oact) {
762         __put_user(k->_sa_handler, &oact->_sa_handler);
763         __put_user(k->sa_flags, &oact->sa_flags);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765         __put_user(k->sa_restorer, &oact->sa_restorer);
766 #endif
767         /* Not swapped.  */
768         oact->sa_mask = k->sa_mask;
769     }
770     if (act) {
771         /* FIXME: This is not threadsafe.  */
772         __get_user(k->_sa_handler, &act->_sa_handler);
773         __get_user(k->sa_flags, &act->sa_flags);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775         __get_user(k->sa_restorer, &act->sa_restorer);
776 #endif
777         /* To be swapped in target_to_host_sigset.  */
778         k->sa_mask = act->sa_mask;
779 
780         /* we update the host linux signal state */
781         host_sig = target_to_host_signal(sig);
782         if (host_sig != SIGSEGV && host_sig != SIGBUS) {
783             sigfillset(&act1.sa_mask);
784             act1.sa_flags = SA_SIGINFO;
785             if (k->sa_flags & TARGET_SA_RESTART)
786                 act1.sa_flags |= SA_RESTART;
787             /* NOTE: it is important to update the host kernel signal
788                ignore state to avoid getting unexpected interrupted
789                syscalls */
790             if (k->_sa_handler == TARGET_SIG_IGN) {
791                 act1.sa_sigaction = (void *)SIG_IGN;
792             } else if (k->_sa_handler == TARGET_SIG_DFL) {
793                 if (fatal_signal (sig))
794                     act1.sa_sigaction = host_signal_handler;
795                 else
796                     act1.sa_sigaction = (void *)SIG_DFL;
797             } else {
798                 act1.sa_sigaction = host_signal_handler;
799             }
800             ret = sigaction(host_sig, &act1, NULL);
801         }
802     }
803     return ret;
804 }
805 
806 #if defined(TARGET_MIPS) || defined(TARGET_MIPS64)
807 
808 # if defined(TARGET_ABI_MIPSO32)
809 struct target_sigcontext {
810     uint32_t   sc_regmask;     /* Unused */
811     uint32_t   sc_status;
812     uint64_t   sc_pc;
813     uint64_t   sc_regs[32];
814     uint64_t   sc_fpregs[32];
815     uint32_t   sc_ownedfp;     /* Unused */
816     uint32_t   sc_fpc_csr;
817     uint32_t   sc_fpc_eir;     /* Unused */
818     uint32_t   sc_used_math;
819     uint32_t   sc_dsp;         /* dsp status, was sc_ssflags */
820     uint32_t   pad0;
821     uint64_t   sc_mdhi;
822     uint64_t   sc_mdlo;
823     target_ulong   sc_hi1;         /* Was sc_cause */
824     target_ulong   sc_lo1;         /* Was sc_badvaddr */
825     target_ulong   sc_hi2;         /* Was sc_sigset[4] */
826     target_ulong   sc_lo2;
827     target_ulong   sc_hi3;
828     target_ulong   sc_lo3;
829 };
830 # else /* N32 || N64 */
831 struct target_sigcontext {
832     uint64_t sc_regs[32];
833     uint64_t sc_fpregs[32];
834     uint64_t sc_mdhi;
835     uint64_t sc_hi1;
836     uint64_t sc_hi2;
837     uint64_t sc_hi3;
838     uint64_t sc_mdlo;
839     uint64_t sc_lo1;
840     uint64_t sc_lo2;
841     uint64_t sc_lo3;
842     uint64_t sc_pc;
843     uint32_t sc_fpc_csr;
844     uint32_t sc_used_math;
845     uint32_t sc_dsp;
846     uint32_t sc_reserved;
847 };
848 # endif /* O32 */
849 
850 struct sigframe {
851     uint32_t sf_ass[4];			/* argument save space for o32 */
852     uint32_t sf_code[2];			/* signal trampoline */
853     struct target_sigcontext sf_sc;
854     target_sigset_t sf_mask;
855 };
856 
857 struct target_ucontext {
858     target_ulong tuc_flags;
859     target_ulong tuc_link;
860     target_stack_t tuc_stack;
861     target_ulong pad0;
862     struct target_sigcontext tuc_mcontext;
863     target_sigset_t tuc_sigmask;
864 };
865 
866 struct target_rt_sigframe {
867     uint32_t rs_ass[4];               /* argument save space for o32 */
868     uint32_t rs_code[2];              /* signal trampoline */
869     struct target_siginfo rs_info;
870     struct target_ucontext rs_uc;
871 };
872 
873 /* Install trampoline to jump back from signal handler */
874 static inline int install_sigtramp(unsigned int *tramp,   unsigned int syscall)
875 {
876     int err = 0;
877 
878     /*
879      * Set up the return code ...
880      *
881      *         li      v0, __NR__foo_sigreturn
882      *         syscall
883      */
884 
885     __put_user(0x24020000 + syscall, tramp + 0);
886     __put_user(0x0000000c          , tramp + 1);
887     return err;
888 }
889 
890 static inline void setup_sigcontext(CPUMIPSState *regs,
891                                     struct target_sigcontext *sc)
892 {
893     int i;
894 
895     __put_user(exception_resume_pc(regs), &sc->sc_pc);
896     regs->hflags &= ~MIPS_HFLAG_BMASK;
897 
898     __put_user(0, &sc->sc_regs[0]);
899     for (i = 1; i < 32; ++i) {
900         __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
901     }
902 
903     __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
904     __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
905 
906     /* Rather than checking for dsp existence, always copy.  The storage
907        would just be garbage otherwise.  */
908     __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
909     __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
910     __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
911     __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
912     __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
913     __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
914     {
915         uint32_t dsp = cpu_rddsp(0x3ff, regs);
916         __put_user(dsp, &sc->sc_dsp);
917     }
918 
919     __put_user(1, &sc->sc_used_math);
920 
921     for (i = 0; i < 32; ++i) {
922         __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
923     }
924 }
925 
926 static inline void
927 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
928 {
929     int i;
930 
931     __get_user(regs->CP0_EPC, &sc->sc_pc);
932 
933     __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
934     __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
935 
936     for (i = 1; i < 32; ++i) {
937         __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
938     }
939 
940     __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
941     __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
942     __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
943     __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
944     __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
945     __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
946     {
947         uint32_t dsp;
948         __get_user(dsp, &sc->sc_dsp);
949         cpu_wrdsp(dsp, 0x3ff, regs);
950     }
951 
952     for (i = 0; i < 32; ++i) {
953         __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
954     }
955 }
956 
957 /*
958  * Determine which stack to use..
959  */
960 static inline abi_ulong
961 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
962 {
963     unsigned long sp;
964 
965     /* Default to using normal stack */
966     sp = regs->active_tc.gpr[29];
967 
968     /*
969      * FPU emulator may have its own trampoline active just
970      * above the user stack, 16-bytes before the next lowest
971      * 16 byte boundary.  Try to avoid trashing it.
972      */
973     sp -= 32;
974 
975     /* This is the X/Open sanctioned signal stack switching.  */
976     if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
977         sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
978     }
979 
980     return (sp - frame_size) & ~7;
981 }
982 
983 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
984 {
985     if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
986         env->hflags &= ~MIPS_HFLAG_M16;
987         env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
988         env->active_tc.PC &= ~(target_ulong) 1;
989     }
990 }
991 
992 # if defined(TARGET_ABI_MIPSO32)
993 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
994 static void setup_frame(int sig, struct target_sigaction * ka,
995                         target_sigset_t *set, CPUMIPSState *regs)
996 {
997     struct sigframe *frame;
998     abi_ulong frame_addr;
999     int i;
1000 
1001     frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1002     trace_user_setup_frame(regs, frame_addr);
1003     if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1004         goto give_sigsegv;
1005     }
1006 
1007     install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
1008 
1009     setup_sigcontext(regs, &frame->sf_sc);
1010 
1011     for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1012         __put_user(set->sig[i], &frame->sf_mask.sig[i]);
1013     }
1014 
1015     /*
1016     * Arguments to signal handler:
1017     *
1018     *   a0 = signal number
1019     *   a1 = 0 (should be cause)
1020     *   a2 = pointer to struct sigcontext
1021     *
1022     * $25 and PC point to the signal handler, $29 points to the
1023     * struct sigframe.
1024     */
1025     regs->active_tc.gpr[ 4] = sig;
1026     regs->active_tc.gpr[ 5] = 0;
1027     regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
1028     regs->active_tc.gpr[29] = frame_addr;
1029     regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
1030     /* The original kernel code sets CP0_EPC to the handler
1031     * since it returns to userland using eret
1032     * we cannot do this here, and we must set PC directly */
1033     regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
1034     mips_set_hflags_isa_mode_from_pc(regs);
1035     unlock_user_struct(frame, frame_addr, 1);
1036     return;
1037 
1038 give_sigsegv:
1039     force_sigsegv(sig);
1040 }
1041 
1042 long do_sigreturn(CPUMIPSState *regs)
1043 {
1044     struct sigframe *frame;
1045     abi_ulong frame_addr;
1046     sigset_t blocked;
1047     target_sigset_t target_set;
1048     int i;
1049 
1050     frame_addr = regs->active_tc.gpr[29];
1051     trace_user_do_sigreturn(regs, frame_addr);
1052     if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1053         goto badframe;
1054 
1055     for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1056         __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
1057     }
1058 
1059     target_to_host_sigset_internal(&blocked, &target_set);
1060     set_sigmask(&blocked);
1061 
1062     restore_sigcontext(regs, &frame->sf_sc);
1063 
1064 #if 0
1065     /*
1066      * Don't let your children do this ...
1067      */
1068     __asm__ __volatile__(
1069    	"move\t$29, %0\n\t"
1070    	"j\tsyscall_exit"
1071    	:/* no outputs */
1072    	:"r" (&regs));
1073     /* Unreached */
1074 #endif
1075 
1076     regs->active_tc.PC = regs->CP0_EPC;
1077     mips_set_hflags_isa_mode_from_pc(regs);
1078     /* I am not sure this is right, but it seems to work
1079     * maybe a problem with nested signals ? */
1080     regs->CP0_EPC = 0;
1081     return -TARGET_QEMU_ESIGRETURN;
1082 
1083 badframe:
1084     force_sig(TARGET_SIGSEGV);
1085     return -TARGET_QEMU_ESIGRETURN;
1086 }
1087 # endif /* O32 */
1088 
1089 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1090                            target_siginfo_t *info,
1091                            target_sigset_t *set, CPUMIPSState *env)
1092 {
1093     struct target_rt_sigframe *frame;
1094     abi_ulong frame_addr;
1095     int i;
1096 
1097     frame_addr = get_sigframe(ka, env, sizeof(*frame));
1098     trace_user_setup_rt_frame(env, frame_addr);
1099     if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1100         goto give_sigsegv;
1101     }
1102 
1103     install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
1104 
1105     tswap_siginfo(&frame->rs_info, info);
1106 
1107     __put_user(0, &frame->rs_uc.tuc_flags);
1108     __put_user(0, &frame->rs_uc.tuc_link);
1109     __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
1110     __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
1111     __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1112                &frame->rs_uc.tuc_stack.ss_flags);
1113 
1114     setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
1115 
1116     for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1117         __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
1118     }
1119 
1120     /*
1121     * Arguments to signal handler:
1122     *
1123     *   a0 = signal number
1124     *   a1 = pointer to siginfo_t
1125     *   a2 = pointer to ucontext_t
1126     *
1127     * $25 and PC point to the signal handler, $29 points to the
1128     * struct sigframe.
1129     */
1130     env->active_tc.gpr[ 4] = sig;
1131     env->active_tc.gpr[ 5] = frame_addr
1132                              + offsetof(struct target_rt_sigframe, rs_info);
1133     env->active_tc.gpr[ 6] = frame_addr
1134                              + offsetof(struct target_rt_sigframe, rs_uc);
1135     env->active_tc.gpr[29] = frame_addr;
1136     env->active_tc.gpr[31] = frame_addr
1137                              + offsetof(struct target_rt_sigframe, rs_code);
1138     /* The original kernel code sets CP0_EPC to the handler
1139     * since it returns to userland using eret
1140     * we cannot do this here, and we must set PC directly */
1141     env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
1142     mips_set_hflags_isa_mode_from_pc(env);
1143     unlock_user_struct(frame, frame_addr, 1);
1144     return;
1145 
1146 give_sigsegv:
1147     unlock_user_struct(frame, frame_addr, 1);
1148     force_sigsegv(sig);
1149 }
1150 
1151 long do_rt_sigreturn(CPUMIPSState *env)
1152 {
1153     struct target_rt_sigframe *frame;
1154     abi_ulong frame_addr;
1155     sigset_t blocked;
1156 
1157     frame_addr = env->active_tc.gpr[29];
1158     trace_user_do_rt_sigreturn(env, frame_addr);
1159     if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1160         goto badframe;
1161     }
1162 
1163     target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
1164     set_sigmask(&blocked);
1165 
1166     restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
1167 
1168     if (do_sigaltstack(frame_addr +
1169                        offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
1170                        0, get_sp_from_cpustate(env)) == -EFAULT)
1171         goto badframe;
1172 
1173     env->active_tc.PC = env->CP0_EPC;
1174     mips_set_hflags_isa_mode_from_pc(env);
1175     /* I am not sure this is right, but it seems to work
1176     * maybe a problem with nested signals ? */
1177     env->CP0_EPC = 0;
1178     return -TARGET_QEMU_ESIGRETURN;
1179 
1180 badframe:
1181     force_sig(TARGET_SIGSEGV);
1182     return -TARGET_QEMU_ESIGRETURN;
1183 }
1184 
1185 #elif defined(TARGET_PPC)
1186 
1187 /* Size of dummy stack frame allocated when calling signal handler.
1188    See arch/powerpc/include/asm/ptrace.h.  */
1189 #if defined(TARGET_PPC64)
1190 #define SIGNAL_FRAMESIZE 128
1191 #else
1192 #define SIGNAL_FRAMESIZE 64
1193 #endif
1194 
1195 /* See arch/powerpc/include/asm/ucontext.h.  Only used for 32-bit PPC;
1196    on 64-bit PPC, sigcontext and mcontext are one and the same.  */
1197 struct target_mcontext {
1198     target_ulong mc_gregs[48];
1199     /* Includes fpscr.  */
1200     uint64_t mc_fregs[33];
1201 #if defined(TARGET_PPC64)
1202     /* Pointer to the vector regs */
1203     target_ulong v_regs;
1204 #else
1205     target_ulong mc_pad[2];
1206 #endif
1207     /* We need to handle Altivec and SPE at the same time, which no
1208        kernel needs to do.  Fortunately, the kernel defines this bit to
1209        be Altivec-register-large all the time, rather than trying to
1210        twiddle it based on the specific platform.  */
1211     union {
1212         /* SPE vector registers.  One extra for SPEFSCR.  */
1213         uint32_t spe[33];
1214         /* Altivec vector registers.  The packing of VSCR and VRSAVE
1215            varies depending on whether we're PPC64 or not: PPC64 splits
1216            them apart; PPC32 stuffs them together.
1217            We also need to account for the VSX registers on PPC64
1218         */
1219 #if defined(TARGET_PPC64)
1220 #define QEMU_NVRREG (34 + 16)
1221         /* On ppc64, this mcontext structure is naturally *unaligned*,
1222          * or rather it is aligned on a 8 bytes boundary but not on
1223          * a 16 bytes one. This pad fixes it up. This is also why the
1224          * vector regs are referenced by the v_regs pointer above so
1225          * any amount of padding can be added here
1226          */
1227         target_ulong pad;
1228 #else
1229         /* On ppc32, we are already aligned to 16 bytes */
1230 #define QEMU_NVRREG 33
1231 #endif
1232         /* We cannot use ppc_avr_t here as we do *not* want the implied
1233          * 16-bytes alignment that would result from it. This would have
1234          * the effect of making the whole struct target_mcontext aligned
1235          * which breaks the layout of struct target_ucontext on ppc64.
1236          */
1237         uint64_t altivec[QEMU_NVRREG][2];
1238 #undef QEMU_NVRREG
1239     } mc_vregs;
1240 };
1241 
1242 /* See arch/powerpc/include/asm/sigcontext.h.  */
1243 struct target_sigcontext {
1244     target_ulong _unused[4];
1245     int32_t signal;
1246 #if defined(TARGET_PPC64)
1247     int32_t pad0;
1248 #endif
1249     target_ulong handler;
1250     target_ulong oldmask;
1251     target_ulong regs;      /* struct pt_regs __user * */
1252 #if defined(TARGET_PPC64)
1253     struct target_mcontext mcontext;
1254 #endif
1255 };
1256 
1257 /* Indices for target_mcontext.mc_gregs, below.
1258    See arch/powerpc/include/asm/ptrace.h for details.  */
1259 enum {
1260     TARGET_PT_R0 = 0,
1261     TARGET_PT_R1 = 1,
1262     TARGET_PT_R2 = 2,
1263     TARGET_PT_R3 = 3,
1264     TARGET_PT_R4 = 4,
1265     TARGET_PT_R5 = 5,
1266     TARGET_PT_R6 = 6,
1267     TARGET_PT_R7 = 7,
1268     TARGET_PT_R8 = 8,
1269     TARGET_PT_R9 = 9,
1270     TARGET_PT_R10 = 10,
1271     TARGET_PT_R11 = 11,
1272     TARGET_PT_R12 = 12,
1273     TARGET_PT_R13 = 13,
1274     TARGET_PT_R14 = 14,
1275     TARGET_PT_R15 = 15,
1276     TARGET_PT_R16 = 16,
1277     TARGET_PT_R17 = 17,
1278     TARGET_PT_R18 = 18,
1279     TARGET_PT_R19 = 19,
1280     TARGET_PT_R20 = 20,
1281     TARGET_PT_R21 = 21,
1282     TARGET_PT_R22 = 22,
1283     TARGET_PT_R23 = 23,
1284     TARGET_PT_R24 = 24,
1285     TARGET_PT_R25 = 25,
1286     TARGET_PT_R26 = 26,
1287     TARGET_PT_R27 = 27,
1288     TARGET_PT_R28 = 28,
1289     TARGET_PT_R29 = 29,
1290     TARGET_PT_R30 = 30,
1291     TARGET_PT_R31 = 31,
1292     TARGET_PT_NIP = 32,
1293     TARGET_PT_MSR = 33,
1294     TARGET_PT_ORIG_R3 = 34,
1295     TARGET_PT_CTR = 35,
1296     TARGET_PT_LNK = 36,
1297     TARGET_PT_XER = 37,
1298     TARGET_PT_CCR = 38,
1299     /* Yes, there are two registers with #39.  One is 64-bit only.  */
1300     TARGET_PT_MQ = 39,
1301     TARGET_PT_SOFTE = 39,
1302     TARGET_PT_TRAP = 40,
1303     TARGET_PT_DAR = 41,
1304     TARGET_PT_DSISR = 42,
1305     TARGET_PT_RESULT = 43,
1306     TARGET_PT_REGS_COUNT = 44
1307 };
1308 
1309 
1310 struct target_ucontext {
1311     target_ulong tuc_flags;
1312     target_ulong tuc_link;    /* ucontext_t __user * */
1313     struct target_sigaltstack tuc_stack;
1314 #if !defined(TARGET_PPC64)
1315     int32_t tuc_pad[7];
1316     target_ulong tuc_regs;    /* struct mcontext __user *
1317                                 points to uc_mcontext field */
1318 #endif
1319     target_sigset_t tuc_sigmask;
1320 #if defined(TARGET_PPC64)
1321     target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
1322     struct target_sigcontext tuc_sigcontext;
1323 #else
1324     int32_t tuc_maskext[30];
1325     int32_t tuc_pad2[3];
1326     struct target_mcontext tuc_mcontext;
1327 #endif
1328 };
1329 
1330 /* See arch/powerpc/kernel/signal_32.c.  */
1331 struct target_sigframe {
1332     struct target_sigcontext sctx;
1333     struct target_mcontext mctx;
1334     int32_t abigap[56];
1335 };
1336 
1337 #if defined(TARGET_PPC64)
1338 
1339 #define TARGET_TRAMP_SIZE 6
1340 
1341 struct target_rt_sigframe {
1342     /* sys_rt_sigreturn requires the ucontext be the first field */
1343     struct target_ucontext uc;
1344     target_ulong  _unused[2];
1345     uint32_t trampoline[TARGET_TRAMP_SIZE];
1346     target_ulong pinfo; /* struct siginfo __user * */
1347     target_ulong puc; /* void __user * */
1348     struct target_siginfo info;
1349     /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
1350     char abigap[288];
1351 } __attribute__((aligned(16)));
1352 
1353 #else
1354 
1355 struct target_rt_sigframe {
1356     struct target_siginfo info;
1357     struct target_ucontext uc;
1358     int32_t abigap[56];
1359 };
1360 
1361 #endif
1362 
1363 #if defined(TARGET_PPC64)
1364 
1365 struct target_func_ptr {
1366     target_ulong entry;
1367     target_ulong toc;
1368 };
1369 
1370 #endif
1371 
1372 /* We use the mc_pad field for the signal return trampoline.  */
1373 #define tramp mc_pad
1374 
1375 /* See arch/powerpc/kernel/signal.c.  */
1376 static target_ulong get_sigframe(struct target_sigaction *ka,
1377                                  CPUPPCState *env,
1378                                  int frame_size)
1379 {
1380     target_ulong oldsp;
1381 
1382     oldsp = env->gpr[1];
1383 
1384     if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
1385             (sas_ss_flags(oldsp) == 0)) {
1386         oldsp = (target_sigaltstack_used.ss_sp
1387                  + target_sigaltstack_used.ss_size);
1388     }
1389 
1390     return (oldsp - frame_size) & ~0xFUL;
1391 }
1392 
1393 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
1394      (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
1395 #define PPC_VEC_HI      0
1396 #define PPC_VEC_LO      1
1397 #else
1398 #define PPC_VEC_HI      1
1399 #define PPC_VEC_LO      0
1400 #endif
1401 
1402 
1403 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
1404 {
1405     target_ulong msr = env->msr;
1406     int i;
1407     target_ulong ccr = 0;
1408 
1409     /* In general, the kernel attempts to be intelligent about what it
1410        needs to save for Altivec/FP/SPE registers.  We don't care that
1411        much, so we just go ahead and save everything.  */
1412 
1413     /* Save general registers.  */
1414     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1415         __put_user(env->gpr[i], &frame->mc_gregs[i]);
1416     }
1417     __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
1418     __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
1419     __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
1420     __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
1421 
1422     for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
1423         ccr |= env->crf[i] << (32 - ((i + 1) * 4));
1424     }
1425     __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
1426 
1427     /* Save Altivec registers if necessary.  */
1428     if (env->insns_flags & PPC_ALTIVEC) {
1429         uint32_t *vrsave;
1430         for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
1431             ppc_avr_t *avr = &env->avr[i];
1432             ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
1433 
1434             __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
1435             __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
1436         }
1437         /* Set MSR_VR in the saved MSR value to indicate that
1438            frame->mc_vregs contains valid data.  */
1439         msr |= MSR_VR;
1440 #if defined(TARGET_PPC64)
1441         vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
1442         /* 64-bit needs to put a pointer to the vectors in the frame */
1443         __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
1444 #else
1445         vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
1446 #endif
1447         __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
1448     }
1449 
1450     /* Save VSX second halves */
1451     if (env->insns_flags2 & PPC2_VSX) {
1452         uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
1453         for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
1454             __put_user(env->vsr[i], &vsregs[i]);
1455         }
1456     }
1457 
1458     /* Save floating point registers.  */
1459     if (env->insns_flags & PPC_FLOAT) {
1460         for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
1461             __put_user(env->fpr[i], &frame->mc_fregs[i]);
1462         }
1463         __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
1464     }
1465 
1466     /* Save SPE registers.  The kernel only saves the high half.  */
1467     if (env->insns_flags & PPC_SPE) {
1468 #if defined(TARGET_PPC64)
1469         for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1470             __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
1471         }
1472 #else
1473         for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
1474             __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
1475         }
1476 #endif
1477         /* Set MSR_SPE in the saved MSR value to indicate that
1478            frame->mc_vregs contains valid data.  */
1479         msr |= MSR_SPE;
1480         __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
1481     }
1482 
1483     /* Store MSR.  */
1484     __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
1485 }
1486 
1487 static void encode_trampoline(int sigret, uint32_t *tramp)
1488 {
1489     /* Set up the sigreturn trampoline: li r0,sigret; sc.  */
1490     if (sigret) {
1491         __put_user(0x38000000 | sigret, &tramp[0]);
1492         __put_user(0x44000002, &tramp[1]);
1493     }
1494 }
1495 
1496 static void restore_user_regs(CPUPPCState *env,
1497                               struct target_mcontext *frame, int sig)
1498 {
1499     target_ulong save_r2 = 0;
1500     target_ulong msr;
1501     target_ulong ccr;
1502 
1503     int i;
1504 
1505     if (!sig) {
1506         save_r2 = env->gpr[2];
1507     }
1508 
1509     /* Restore general registers.  */
1510     for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1511         __get_user(env->gpr[i], &frame->mc_gregs[i]);
1512     }
1513     __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
1514     __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
1515     __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
1516     __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
1517     __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
1518 
1519     for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
1520         env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
1521     }
1522 
1523     if (!sig) {
1524         env->gpr[2] = save_r2;
1525     }
1526     /* Restore MSR.  */
1527     __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
1528 
1529     /* If doing signal return, restore the previous little-endian mode.  */
1530     if (sig)
1531         env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
1532 
1533     /* Restore Altivec registers if necessary.  */
1534     if (env->insns_flags & PPC_ALTIVEC) {
1535         ppc_avr_t *v_regs;
1536         uint32_t *vrsave;
1537 #if defined(TARGET_PPC64)
1538         uint64_t v_addr;
1539         /* 64-bit needs to recover the pointer to the vectors from the frame */
1540         __get_user(v_addr, &frame->v_regs);
1541         v_regs = g2h(v_addr);
1542 #else
1543         v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
1544 #endif
1545         for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
1546             ppc_avr_t *avr = &env->avr[i];
1547             ppc_avr_t *vreg = &v_regs[i];
1548 
1549             __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
1550             __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
1551         }
1552         /* Set MSR_VEC in the saved MSR value to indicate that
1553            frame->mc_vregs contains valid data.  */
1554 #if defined(TARGET_PPC64)
1555         vrsave = (uint32_t *)&v_regs[33];
1556 #else
1557         vrsave = (uint32_t *)&v_regs[32];
1558 #endif
1559         __get_user(env->spr[SPR_VRSAVE], vrsave);
1560     }
1561 
1562     /* Restore VSX second halves */
1563     if (env->insns_flags2 & PPC2_VSX) {
1564         uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
1565         for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
1566             __get_user(env->vsr[i], &vsregs[i]);
1567         }
1568     }
1569 
1570     /* Restore floating point registers.  */
1571     if (env->insns_flags & PPC_FLOAT) {
1572         uint64_t fpscr;
1573         for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
1574             __get_user(env->fpr[i], &frame->mc_fregs[i]);
1575         }
1576         __get_user(fpscr, &frame->mc_fregs[32]);
1577         env->fpscr = (uint32_t) fpscr;
1578     }
1579 
1580     /* Save SPE registers.  The kernel only saves the high half.  */
1581     if (env->insns_flags & PPC_SPE) {
1582 #if defined(TARGET_PPC64)
1583         for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1584             uint32_t hi;
1585 
1586             __get_user(hi, &frame->mc_vregs.spe[i]);
1587             env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
1588         }
1589 #else
1590         for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
1591             __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
1592         }
1593 #endif
1594         __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
1595     }
1596 }
1597 
1598 #if !defined(TARGET_PPC64)
1599 static void setup_frame(int sig, struct target_sigaction *ka,
1600                         target_sigset_t *set, CPUPPCState *env)
1601 {
1602     struct target_sigframe *frame;
1603     struct target_sigcontext *sc;
1604     target_ulong frame_addr, newsp;
1605     int err = 0;
1606 
1607     frame_addr = get_sigframe(ka, env, sizeof(*frame));
1608     trace_user_setup_frame(env, frame_addr);
1609     if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
1610         goto sigsegv;
1611     sc = &frame->sctx;
1612 
1613     __put_user(ka->_sa_handler, &sc->handler);
1614     __put_user(set->sig[0], &sc->oldmask);
1615     __put_user(set->sig[1], &sc->_unused[3]);
1616     __put_user(h2g(&frame->mctx), &sc->regs);
1617     __put_user(sig, &sc->signal);
1618 
1619     /* Save user regs.  */
1620     save_user_regs(env, &frame->mctx);
1621 
1622     /* Construct the trampoline code on the stack. */
1623     encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
1624 
1625     /* The kernel checks for the presence of a VDSO here.  We don't
1626        emulate a vdso, so use a sigreturn system call.  */
1627     env->lr = (target_ulong) h2g(frame->mctx.tramp);
1628 
1629     /* Turn off all fp exceptions.  */
1630     env->fpscr = 0;
1631 
1632     /* Create a stack frame for the caller of the handler.  */
1633     newsp = frame_addr - SIGNAL_FRAMESIZE;
1634     err |= put_user(env->gpr[1], newsp, target_ulong);
1635 
1636     if (err)
1637         goto sigsegv;
1638 
1639     /* Set up registers for signal handler.  */
1640     env->gpr[1] = newsp;
1641     env->gpr[3] = sig;
1642     env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
1643 
1644     env->nip = (target_ulong) ka->_sa_handler;
1645 
1646     /* Signal handlers are entered in big-endian mode.  */
1647     env->msr &= ~(1ull << MSR_LE);
1648 
1649     unlock_user_struct(frame, frame_addr, 1);
1650     return;
1651 
1652 sigsegv:
1653     unlock_user_struct(frame, frame_addr, 1);
1654     force_sigsegv(sig);
1655 }
1656 #endif /* !defined(TARGET_PPC64) */
1657 
1658 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1659                            target_siginfo_t *info,
1660                            target_sigset_t *set, CPUPPCState *env)
1661 {
1662     struct target_rt_sigframe *rt_sf;
1663     uint32_t *trampptr = 0;
1664     struct target_mcontext *mctx = 0;
1665     target_ulong rt_sf_addr, newsp = 0;
1666     int i, err = 0;
1667 #if defined(TARGET_PPC64)
1668     struct target_sigcontext *sc = 0;
1669     struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
1670 #endif
1671 
1672     rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
1673     if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
1674         goto sigsegv;
1675 
1676     tswap_siginfo(&rt_sf->info, info);
1677 
1678     __put_user(0, &rt_sf->uc.tuc_flags);
1679     __put_user(0, &rt_sf->uc.tuc_link);
1680     __put_user((target_ulong)target_sigaltstack_used.ss_sp,
1681                &rt_sf->uc.tuc_stack.ss_sp);
1682     __put_user(sas_ss_flags(env->gpr[1]),
1683                &rt_sf->uc.tuc_stack.ss_flags);
1684     __put_user(target_sigaltstack_used.ss_size,
1685                &rt_sf->uc.tuc_stack.ss_size);
1686 #if !defined(TARGET_PPC64)
1687     __put_user(h2g (&rt_sf->uc.tuc_mcontext),
1688                &rt_sf->uc.tuc_regs);
1689 #endif
1690     for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1691         __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
1692     }
1693 
1694 #if defined(TARGET_PPC64)
1695     mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
1696     trampptr = &rt_sf->trampoline[0];
1697 
1698     sc = &rt_sf->uc.tuc_sigcontext;
1699     __put_user(h2g(mctx), &sc->regs);
1700     __put_user(sig, &sc->signal);
1701 #else
1702     mctx = &rt_sf->uc.tuc_mcontext;
1703     trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
1704 #endif
1705 
1706     save_user_regs(env, mctx);
1707     encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
1708 
1709     /* The kernel checks for the presence of a VDSO here.  We don't
1710        emulate a vdso, so use a sigreturn system call.  */
1711     env->lr = (target_ulong) h2g(trampptr);
1712 
1713     /* Turn off all fp exceptions.  */
1714     env->fpscr = 0;
1715 
1716     /* Create a stack frame for the caller of the handler.  */
1717     newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
1718     err |= put_user(env->gpr[1], newsp, target_ulong);
1719 
1720     if (err)
1721         goto sigsegv;
1722 
1723     /* Set up registers for signal handler.  */
1724     env->gpr[1] = newsp;
1725     env->gpr[3] = (target_ulong) sig;
1726     env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
1727     env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
1728     env->gpr[6] = (target_ulong) h2g(rt_sf);
1729 
1730 #if defined(TARGET_PPC64)
1731     if (get_ppc64_abi(image) < 2) {
1732         /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
1733         struct target_func_ptr *handler =
1734             (struct target_func_ptr *)g2h(ka->_sa_handler);
1735         env->nip = tswapl(handler->entry);
1736         env->gpr[2] = tswapl(handler->toc);
1737     } else {
1738         /* ELFv2 PPC64 function pointers are entry points, but R12
1739          * must also be set */
1740         env->nip = tswapl((target_ulong) ka->_sa_handler);
1741         env->gpr[12] = env->nip;
1742     }
1743 #else
1744     env->nip = (target_ulong) ka->_sa_handler;
1745 #endif
1746 
1747     /* Signal handlers are entered in big-endian mode.  */
1748     env->msr &= ~(1ull << MSR_LE);
1749 
1750     unlock_user_struct(rt_sf, rt_sf_addr, 1);
1751     return;
1752 
1753 sigsegv:
1754     unlock_user_struct(rt_sf, rt_sf_addr, 1);
1755     force_sigsegv(sig);
1756 
1757 }
1758 
1759 #if !defined(TARGET_PPC64)
1760 long do_sigreturn(CPUPPCState *env)
1761 {
1762     struct target_sigcontext *sc = NULL;
1763     struct target_mcontext *sr = NULL;
1764     target_ulong sr_addr = 0, sc_addr;
1765     sigset_t blocked;
1766     target_sigset_t set;
1767 
1768     sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
1769     if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
1770         goto sigsegv;
1771 
1772 #if defined(TARGET_PPC64)
1773     set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
1774 #else
1775     __get_user(set.sig[0], &sc->oldmask);
1776     __get_user(set.sig[1], &sc->_unused[3]);
1777 #endif
1778     target_to_host_sigset_internal(&blocked, &set);
1779     set_sigmask(&blocked);
1780 
1781     __get_user(sr_addr, &sc->regs);
1782     if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
1783         goto sigsegv;
1784     restore_user_regs(env, sr, 1);
1785 
1786     unlock_user_struct(sr, sr_addr, 1);
1787     unlock_user_struct(sc, sc_addr, 1);
1788     return -TARGET_QEMU_ESIGRETURN;
1789 
1790 sigsegv:
1791     unlock_user_struct(sr, sr_addr, 1);
1792     unlock_user_struct(sc, sc_addr, 1);
1793     force_sig(TARGET_SIGSEGV);
1794     return -TARGET_QEMU_ESIGRETURN;
1795 }
1796 #endif /* !defined(TARGET_PPC64) */
1797 
1798 /* See arch/powerpc/kernel/signal_32.c.  */
1799 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
1800 {
1801     struct target_mcontext *mcp;
1802     target_ulong mcp_addr;
1803     sigset_t blocked;
1804     target_sigset_t set;
1805 
1806     if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
1807                        sizeof (set)))
1808         return 1;
1809 
1810 #if defined(TARGET_PPC64)
1811     mcp_addr = h2g(ucp) +
1812         offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
1813 #else
1814     __get_user(mcp_addr, &ucp->tuc_regs);
1815 #endif
1816 
1817     if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
1818         return 1;
1819 
1820     target_to_host_sigset_internal(&blocked, &set);
1821     set_sigmask(&blocked);
1822     restore_user_regs(env, mcp, sig);
1823 
1824     unlock_user_struct(mcp, mcp_addr, 1);
1825     return 0;
1826 }
1827 
1828 long do_rt_sigreturn(CPUPPCState *env)
1829 {
1830     struct target_rt_sigframe *rt_sf = NULL;
1831     target_ulong rt_sf_addr;
1832 
1833     rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
1834     if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
1835         goto sigsegv;
1836 
1837     if (do_setcontext(&rt_sf->uc, env, 1))
1838         goto sigsegv;
1839 
1840     do_sigaltstack(rt_sf_addr
1841                    + offsetof(struct target_rt_sigframe, uc.tuc_stack),
1842                    0, env->gpr[1]);
1843 
1844     unlock_user_struct(rt_sf, rt_sf_addr, 1);
1845     return -TARGET_QEMU_ESIGRETURN;
1846 
1847 sigsegv:
1848     unlock_user_struct(rt_sf, rt_sf_addr, 1);
1849     force_sig(TARGET_SIGSEGV);
1850     return -TARGET_QEMU_ESIGRETURN;
1851 }
1852 #endif
1853 
1854 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1855                                   struct emulated_sigtable *k)
1856 {
1857     CPUState *cpu = ENV_GET_CPU(cpu_env);
1858     abi_ulong handler;
1859     sigset_t set;
1860     target_sigset_t target_old_set;
1861     struct target_sigaction *sa;
1862     TaskState *ts = cpu->opaque;
1863 
1864     trace_user_handle_signal(cpu_env, sig);
1865     /* dequeue signal */
1866     k->pending = 0;
1867 
1868     sig = gdb_handlesig(cpu, sig);
1869     if (!sig) {
1870         sa = NULL;
1871         handler = TARGET_SIG_IGN;
1872     } else {
1873         sa = &sigact_table[sig - 1];
1874         handler = sa->_sa_handler;
1875     }
1876 
1877     if (do_strace) {
1878         print_taken_signal(sig, &k->info);
1879     }
1880 
1881     if (handler == TARGET_SIG_DFL) {
1882         /* default handler : ignore some signal. The other are job control or fatal */
1883         if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1884             kill(getpid(),SIGSTOP);
1885         } else if (sig != TARGET_SIGCHLD &&
1886                    sig != TARGET_SIGURG &&
1887                    sig != TARGET_SIGWINCH &&
1888                    sig != TARGET_SIGCONT) {
1889             dump_core_and_abort(sig);
1890         }
1891     } else if (handler == TARGET_SIG_IGN) {
1892         /* ignore sig */
1893     } else if (handler == TARGET_SIG_ERR) {
1894         dump_core_and_abort(sig);
1895     } else {
1896         /* compute the blocked signals during the handler execution */
1897         sigset_t *blocked_set;
1898 
1899         target_to_host_sigset(&set, &sa->sa_mask);
1900         /* SA_NODEFER indicates that the current signal should not be
1901            blocked during the handler */
1902         if (!(sa->sa_flags & TARGET_SA_NODEFER))
1903             sigaddset(&set, target_to_host_signal(sig));
1904 
1905         /* save the previous blocked signal state to restore it at the
1906            end of the signal execution (see do_sigreturn) */
1907         host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1908 
1909         /* block signals in the handler */
1910         blocked_set = ts->in_sigsuspend ?
1911             &ts->sigsuspend_mask : &ts->signal_mask;
1912         sigorset(&ts->signal_mask, blocked_set, &set);
1913         ts->in_sigsuspend = 0;
1914 
1915         /* if the CPU is in VM86 mode, we restore the 32 bit values */
1916 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1917         {
1918             CPUX86State *env = cpu_env;
1919             if (env->eflags & VM_MASK)
1920                 save_v86_state(env);
1921         }
1922 #endif
1923         /* prepare the stack frame of the virtual CPU */
1924 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
1925         || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
1926         || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
1927         || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
1928         || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
1929         /* These targets do not have traditional signals.  */
1930         setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1931 #else
1932         if (sa->sa_flags & TARGET_SA_SIGINFO)
1933             setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1934         else
1935             setup_frame(sig, sa, &target_old_set, cpu_env);
1936 #endif
1937         if (sa->sa_flags & TARGET_SA_RESETHAND) {
1938             sa->_sa_handler = TARGET_SIG_DFL;
1939         }
1940     }
1941 }
1942 
1943 void process_pending_signals(CPUArchState *cpu_env)
1944 {
1945     CPUState *cpu = ENV_GET_CPU(cpu_env);
1946     int sig;
1947     TaskState *ts = cpu->opaque;
1948     sigset_t set;
1949     sigset_t *blocked_set;
1950 
1951     while (atomic_read(&ts->signal_pending)) {
1952         /* FIXME: This is not threadsafe.  */
1953         sigfillset(&set);
1954         sigprocmask(SIG_SETMASK, &set, 0);
1955 
1956     restart_scan:
1957         sig = ts->sync_signal.pending;
1958         if (sig) {
1959             /* Synchronous signals are forced,
1960              * see force_sig_info() and callers in Linux
1961              * Note that not all of our queue_signal() calls in QEMU correspond
1962              * to force_sig_info() calls in Linux (some are send_sig_info()).
1963              * However it seems like a kernel bug to me to allow the process
1964              * to block a synchronous signal since it could then just end up
1965              * looping round and round indefinitely.
1966              */
1967             if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1968                 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1969                 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1970                 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1971             }
1972 
1973             handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1974         }
1975 
1976         for (sig = 1; sig <= TARGET_NSIG; sig++) {
1977             blocked_set = ts->in_sigsuspend ?
1978                 &ts->sigsuspend_mask : &ts->signal_mask;
1979 
1980             if (ts->sigtab[sig - 1].pending &&
1981                 (!sigismember(blocked_set,
1982                               target_to_host_signal_table[sig]))) {
1983                 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1984                 /* Restart scan from the beginning, as handle_pending_signal
1985                  * might have resulted in a new synchronous signal (eg SIGSEGV).
1986                  */
1987                 goto restart_scan;
1988             }
1989         }
1990 
1991         /* if no signal is pending, unblock signals and recheck (the act
1992          * of unblocking might cause us to take another host signal which
1993          * will set signal_pending again).
1994          */
1995         atomic_set(&ts->signal_pending, 0);
1996         ts->in_sigsuspend = 0;
1997         set = ts->signal_mask;
1998         sigdelset(&set, SIGSEGV);
1999         sigdelset(&set, SIGBUS);
2000         sigprocmask(SIG_SETMASK, &set, 0);
2001     }
2002     ts->in_sigsuspend = 0;
2003 }
2004