xref: /openbmc/qemu/linux-user/syscall.c (revision 771f3be1b5d6c540c427bc7274ab36e2cccba694)
1  /*
2   *  Linux syscalls
3   *
4   *  Copyright (c) 2003 Fabrice Bellard
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
8   *  the Free Software Foundation; either version 2 of the License, or
9   *  (at your option) any later version.
10   *
11   *  This program is distributed in the hope that it will be useful,
12   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   *  GNU General Public License for more details.
15   *
16   *  You should have received a copy of the GNU General Public License
17   *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18   */
19  #define _ATFILE_SOURCE
20  #include "qemu/osdep.h"
21  #include "qemu/cutils.h"
22  #include "qemu/path.h"
23  #include "qemu/memfd.h"
24  #include "qemu/queue.h"
25  #include <elf.h>
26  #include <endian.h>
27  #include <grp.h>
28  #include <sys/ipc.h>
29  #include <sys/msg.h>
30  #include <sys/wait.h>
31  #include <sys/mount.h>
32  #include <sys/file.h>
33  #include <sys/fsuid.h>
34  #include <sys/personality.h>
35  #include <sys/prctl.h>
36  #include <sys/resource.h>
37  #include <sys/swap.h>
38  #include <linux/capability.h>
39  #include <sched.h>
40  #include <sys/timex.h>
41  #include <sys/socket.h>
42  #include <linux/sockios.h>
43  #include <sys/un.h>
44  #include <sys/uio.h>
45  #include <poll.h>
46  #include <sys/times.h>
47  #include <sys/shm.h>
48  #include <sys/sem.h>
49  #include <sys/statfs.h>
50  #include <utime.h>
51  #include <sys/sysinfo.h>
52  #include <sys/signalfd.h>
53  //#include <sys/user.h>
54  #include <netinet/in.h>
55  #include <netinet/ip.h>
56  #include <netinet/tcp.h>
57  #include <netinet/udp.h>
58  #include <linux/wireless.h>
59  #include <linux/icmp.h>
60  #include <linux/icmpv6.h>
61  #include <linux/if_tun.h>
62  #include <linux/in6.h>
63  #include <linux/errqueue.h>
64  #include <linux/random.h>
65  #ifdef CONFIG_TIMERFD
66  #include <sys/timerfd.h>
67  #endif
68  #ifdef CONFIG_EVENTFD
69  #include <sys/eventfd.h>
70  #endif
71  #ifdef CONFIG_EPOLL
72  #include <sys/epoll.h>
73  #endif
74  #ifdef CONFIG_ATTR
75  #include "qemu/xattr.h"
76  #endif
77  #ifdef CONFIG_SENDFILE
78  #include <sys/sendfile.h>
79  #endif
80  #ifdef HAVE_SYS_KCOV_H
81  #include <sys/kcov.h>
82  #endif
83  
84  #define termios host_termios
85  #define winsize host_winsize
86  #define termio host_termio
87  #define sgttyb host_sgttyb /* same as target */
88  #define tchars host_tchars /* same as target */
89  #define ltchars host_ltchars /* same as target */
90  
91  #include <linux/termios.h>
92  #include <linux/unistd.h>
93  #include <linux/cdrom.h>
94  #include <linux/hdreg.h>
95  #include <linux/soundcard.h>
96  #include <linux/kd.h>
97  #include <linux/mtio.h>
98  #include <linux/fs.h>
99  #include <linux/fd.h>
100  #if defined(CONFIG_FIEMAP)
101  #include <linux/fiemap.h>
102  #endif
103  #include <linux/fb.h>
104  #if defined(CONFIG_USBFS)
105  #include <linux/usbdevice_fs.h>
106  #include <linux/usb/ch9.h>
107  #endif
108  #include <linux/vt.h>
109  #include <linux/dm-ioctl.h>
110  #include <linux/reboot.h>
111  #include <linux/route.h>
112  #include <linux/filter.h>
113  #include <linux/blkpg.h>
114  #include <netpacket/packet.h>
115  #include <linux/netlink.h>
116  #include <linux/if_alg.h>
117  #include <linux/rtc.h>
118  #include <sound/asound.h>
119  #ifdef HAVE_BTRFS_H
120  #include <linux/btrfs.h>
121  #endif
122  #ifdef HAVE_DRM_H
123  #include <libdrm/drm.h>
124  #include <libdrm/i915_drm.h>
125  #endif
126  #include "linux_loop.h"
127  #include "uname.h"
128  
129  #include "qemu.h"
130  #include "qemu/guest-random.h"
131  #include "qemu/selfmap.h"
132  #include "user/syscall-trace.h"
133  #include "qapi/error.h"
134  #include "fd-trans.h"
135  #include "tcg/tcg.h"
136  
137  #ifndef CLONE_IO
138  #define CLONE_IO                0x80000000      /* Clone io context */
139  #endif
140  
141  /* We can't directly call the host clone syscall, because this will
142   * badly confuse libc (breaking mutexes, for example). So we must
143   * divide clone flags into:
144   *  * flag combinations that look like pthread_create()
145   *  * flag combinations that look like fork()
146   *  * flags we can implement within QEMU itself
147   *  * flags we can't support and will return an error for
148   */
149  /* For thread creation, all these flags must be present; for
150   * fork, none must be present.
151   */
152  #define CLONE_THREAD_FLAGS                              \
153      (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154       CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155  
156  /* These flags are ignored:
157   * CLONE_DETACHED is now ignored by the kernel;
158   * CLONE_IO is just an optimisation hint to the I/O scheduler
159   */
160  #define CLONE_IGNORED_FLAGS                     \
161      (CLONE_DETACHED | CLONE_IO)
162  
163  /* Flags for fork which we can implement within QEMU itself */
164  #define CLONE_OPTIONAL_FORK_FLAGS               \
165      (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166       CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167  
168  /* Flags for thread creation which we can implement within QEMU itself */
169  #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170      (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171       CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172  
173  #define CLONE_INVALID_FORK_FLAGS                                        \
174      (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175  
176  #define CLONE_INVALID_THREAD_FLAGS                                      \
177      (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178         CLONE_IGNORED_FLAGS))
179  
180  /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181   * have almost all been allocated. We cannot support any of
182   * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183   * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184   * The checks against the invalid thread masks above will catch these.
185   * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186   */
187  
188  /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189   * once. This exercises the codepaths for restart.
190   */
191  //#define DEBUG_ERESTARTSYS
192  
193  //#include <linux/msdos_fs.h>
194  #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195  #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196  
197  #undef _syscall0
198  #undef _syscall1
199  #undef _syscall2
200  #undef _syscall3
201  #undef _syscall4
202  #undef _syscall5
203  #undef _syscall6
204  
205  #define _syscall0(type,name)		\
206  static type name (void)			\
207  {					\
208  	return syscall(__NR_##name);	\
209  }
210  
211  #define _syscall1(type,name,type1,arg1)		\
212  static type name (type1 arg1)			\
213  {						\
214  	return syscall(__NR_##name, arg1);	\
215  }
216  
217  #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218  static type name (type1 arg1,type2 arg2)		\
219  {							\
220  	return syscall(__NR_##name, arg1, arg2);	\
221  }
222  
223  #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224  static type name (type1 arg1,type2 arg2,type3 arg3)		\
225  {								\
226  	return syscall(__NR_##name, arg1, arg2, arg3);		\
227  }
228  
229  #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231  {										\
232  	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233  }
234  
235  #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236  		  type5,arg5)							\
237  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238  {										\
239  	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240  }
241  
242  
243  #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244  		  type5,arg5,type6,arg6)					\
245  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                    type6 arg6)							\
247  {										\
248  	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249  }
250  
251  
252  #define __NR_sys_uname __NR_uname
253  #define __NR_sys_getcwd1 __NR_getcwd
254  #define __NR_sys_getdents __NR_getdents
255  #define __NR_sys_getdents64 __NR_getdents64
256  #define __NR_sys_getpriority __NR_getpriority
257  #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258  #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259  #define __NR_sys_syslog __NR_syslog
260  #if defined(__NR_futex)
261  # define __NR_sys_futex __NR_futex
262  #endif
263  #if defined(__NR_futex_time64)
264  # define __NR_sys_futex_time64 __NR_futex_time64
265  #endif
266  #define __NR_sys_inotify_init __NR_inotify_init
267  #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268  #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269  #define __NR_sys_statx __NR_statx
270  
271  #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272  #define __NR__llseek __NR_lseek
273  #endif
274  
275  /* Newer kernel ports have llseek() instead of _llseek() */
276  #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277  #define TARGET_NR__llseek TARGET_NR_llseek
278  #endif
279  
280  /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281  #ifndef TARGET_O_NONBLOCK_MASK
282  #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283  #endif
284  
285  #define __NR_sys_gettid __NR_gettid
286  _syscall0(int, sys_gettid)
287  
288  /* For the 64-bit guest on 32-bit host case we must emulate
289   * getdents using getdents64, because otherwise the host
290   * might hand us back more dirent records than we can fit
291   * into the guest buffer after structure format conversion.
292   * Otherwise we emulate getdents with getdents if the host has it.
293   */
294  #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295  #define EMULATE_GETDENTS_WITH_GETDENTS
296  #endif
297  
298  #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299  _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300  #endif
301  #if (defined(TARGET_NR_getdents) && \
302        !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303      (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304  _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305  #endif
306  #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307  _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308            loff_t *, res, uint, wh);
309  #endif
310  _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311  _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312            siginfo_t *, uinfo)
313  _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314  #ifdef __NR_exit_group
315  _syscall1(int,exit_group,int,error_code)
316  #endif
317  #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318  _syscall1(int,set_tid_address,int *,tidptr)
319  #endif
320  #if defined(__NR_futex)
321  _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322            const struct timespec *,timeout,int *,uaddr2,int,val3)
323  #endif
324  #if defined(__NR_futex_time64)
325  _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326            const struct timespec *,timeout,int *,uaddr2,int,val3)
327  #endif
328  #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329  _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330            unsigned long *, user_mask_ptr);
331  #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332  _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333            unsigned long *, user_mask_ptr);
334  #define __NR_sys_getcpu __NR_getcpu
335  _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336  _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337            void *, arg);
338  _syscall2(int, capget, struct __user_cap_header_struct *, header,
339            struct __user_cap_data_struct *, data);
340  _syscall2(int, capset, struct __user_cap_header_struct *, header,
341            struct __user_cap_data_struct *, data);
342  #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343  _syscall2(int, ioprio_get, int, which, int, who)
344  #endif
345  #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346  _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347  #endif
348  #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349  _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350  #endif
351  
352  #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353  _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354            unsigned long, idx1, unsigned long, idx2)
355  #endif
356  
357  /*
358   * It is assumed that struct statx is architecture independent.
359   */
360  #if defined(TARGET_NR_statx) && defined(__NR_statx)
361  _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362            unsigned int, mask, struct target_statx *, statxbuf)
363  #endif
364  #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365  _syscall2(int, membarrier, int, cmd, int, flags)
366  #endif
367  
368  static const bitmask_transtbl fcntl_flags_tbl[] = {
369    { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370    { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371    { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372    { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373    { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374    { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375    { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376    { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377    { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378    { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379    { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380    { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381    { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382  #if defined(O_DIRECT)
383    { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384  #endif
385  #if defined(O_NOATIME)
386    { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387  #endif
388  #if defined(O_CLOEXEC)
389    { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390  #endif
391  #if defined(O_PATH)
392    { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393  #endif
394  #if defined(O_TMPFILE)
395    { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396  #endif
397    /* Don't terminate the list prematurely on 64-bit host+guest.  */
398  #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399    { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400  #endif
401    { 0, 0, 0, 0 }
402  };
403  
404  _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405  
406  #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407  #if defined(__NR_utimensat)
408  #define __NR_sys_utimensat __NR_utimensat
409  _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410            const struct timespec *,tsp,int,flags)
411  #else
412  static int sys_utimensat(int dirfd, const char *pathname,
413                           const struct timespec times[2], int flags)
414  {
415      errno = ENOSYS;
416      return -1;
417  }
418  #endif
419  #endif /* TARGET_NR_utimensat */
420  
421  #ifdef TARGET_NR_renameat2
422  #if defined(__NR_renameat2)
423  #define __NR_sys_renameat2 __NR_renameat2
424  _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425            const char *, new, unsigned int, flags)
426  #else
427  static int sys_renameat2(int oldfd, const char *old,
428                           int newfd, const char *new, int flags)
429  {
430      if (flags == 0) {
431          return renameat(oldfd, old, newfd, new);
432      }
433      errno = ENOSYS;
434      return -1;
435  }
436  #endif
437  #endif /* TARGET_NR_renameat2 */
438  
439  #ifdef CONFIG_INOTIFY
440  #include <sys/inotify.h>
441  
442  #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443  static int sys_inotify_init(void)
444  {
445    return (inotify_init());
446  }
447  #endif
448  #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449  static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450  {
451    return (inotify_add_watch(fd, pathname, mask));
452  }
453  #endif
454  #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455  static int sys_inotify_rm_watch(int fd, int32_t wd)
456  {
457    return (inotify_rm_watch(fd, wd));
458  }
459  #endif
460  #ifdef CONFIG_INOTIFY1
461  #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462  static int sys_inotify_init1(int flags)
463  {
464    return (inotify_init1(flags));
465  }
466  #endif
467  #endif
468  #else
469  /* Userspace can usually survive runtime without inotify */
470  #undef TARGET_NR_inotify_init
471  #undef TARGET_NR_inotify_init1
472  #undef TARGET_NR_inotify_add_watch
473  #undef TARGET_NR_inotify_rm_watch
474  #endif /* CONFIG_INOTIFY  */
475  
476  #if defined(TARGET_NR_prlimit64)
477  #ifndef __NR_prlimit64
478  # define __NR_prlimit64 -1
479  #endif
480  #define __NR_sys_prlimit64 __NR_prlimit64
481  /* The glibc rlimit structure may not be that used by the underlying syscall */
482  struct host_rlimit64 {
483      uint64_t rlim_cur;
484      uint64_t rlim_max;
485  };
486  _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487            const struct host_rlimit64 *, new_limit,
488            struct host_rlimit64 *, old_limit)
489  #endif
490  
491  
492  #if defined(TARGET_NR_timer_create)
493  /* Maximum of 32 active POSIX timers allowed at any one time. */
494  static timer_t g_posix_timers[32] = { 0, } ;
495  
496  static inline int next_free_host_timer(void)
497  {
498      int k ;
499      /* FIXME: Does finding the next free slot require a lock? */
500      for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501          if (g_posix_timers[k] == 0) {
502              g_posix_timers[k] = (timer_t) 1;
503              return k;
504          }
505      }
506      return -1;
507  }
508  #endif
509  
510  #define ERRNO_TABLE_SIZE 1200
511  
512  /* target_to_host_errno_table[] is initialized from
513   * host_to_target_errno_table[] in syscall_init(). */
514  static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515  };
516  
517  /*
518   * This list is the union of errno values overridden in asm-<arch>/errno.h
519   * minus the errnos that are not actually generic to all archs.
520   */
521  static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522      [EAGAIN]		= TARGET_EAGAIN,
523      [EIDRM]		= TARGET_EIDRM,
524      [ECHRNG]		= TARGET_ECHRNG,
525      [EL2NSYNC]		= TARGET_EL2NSYNC,
526      [EL3HLT]		= TARGET_EL3HLT,
527      [EL3RST]		= TARGET_EL3RST,
528      [ELNRNG]		= TARGET_ELNRNG,
529      [EUNATCH]		= TARGET_EUNATCH,
530      [ENOCSI]		= TARGET_ENOCSI,
531      [EL2HLT]		= TARGET_EL2HLT,
532      [EDEADLK]		= TARGET_EDEADLK,
533      [ENOLCK]		= TARGET_ENOLCK,
534      [EBADE]		= TARGET_EBADE,
535      [EBADR]		= TARGET_EBADR,
536      [EXFULL]		= TARGET_EXFULL,
537      [ENOANO]		= TARGET_ENOANO,
538      [EBADRQC]		= TARGET_EBADRQC,
539      [EBADSLT]		= TARGET_EBADSLT,
540      [EBFONT]		= TARGET_EBFONT,
541      [ENOSTR]		= TARGET_ENOSTR,
542      [ENODATA]		= TARGET_ENODATA,
543      [ETIME]		= TARGET_ETIME,
544      [ENOSR]		= TARGET_ENOSR,
545      [ENONET]		= TARGET_ENONET,
546      [ENOPKG]		= TARGET_ENOPKG,
547      [EREMOTE]		= TARGET_EREMOTE,
548      [ENOLINK]		= TARGET_ENOLINK,
549      [EADV]		= TARGET_EADV,
550      [ESRMNT]		= TARGET_ESRMNT,
551      [ECOMM]		= TARGET_ECOMM,
552      [EPROTO]		= TARGET_EPROTO,
553      [EDOTDOT]		= TARGET_EDOTDOT,
554      [EMULTIHOP]		= TARGET_EMULTIHOP,
555      [EBADMSG]		= TARGET_EBADMSG,
556      [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
557      [EOVERFLOW]		= TARGET_EOVERFLOW,
558      [ENOTUNIQ]		= TARGET_ENOTUNIQ,
559      [EBADFD]		= TARGET_EBADFD,
560      [EREMCHG]		= TARGET_EREMCHG,
561      [ELIBACC]		= TARGET_ELIBACC,
562      [ELIBBAD]		= TARGET_ELIBBAD,
563      [ELIBSCN]		= TARGET_ELIBSCN,
564      [ELIBMAX]		= TARGET_ELIBMAX,
565      [ELIBEXEC]		= TARGET_ELIBEXEC,
566      [EILSEQ]		= TARGET_EILSEQ,
567      [ENOSYS]		= TARGET_ENOSYS,
568      [ELOOP]		= TARGET_ELOOP,
569      [ERESTART]		= TARGET_ERESTART,
570      [ESTRPIPE]		= TARGET_ESTRPIPE,
571      [ENOTEMPTY]		= TARGET_ENOTEMPTY,
572      [EUSERS]		= TARGET_EUSERS,
573      [ENOTSOCK]		= TARGET_ENOTSOCK,
574      [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
575      [EMSGSIZE]		= TARGET_EMSGSIZE,
576      [EPROTOTYPE]	= TARGET_EPROTOTYPE,
577      [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
578      [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
579      [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
580      [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
581      [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
582      [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
583      [EADDRINUSE]	= TARGET_EADDRINUSE,
584      [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
585      [ENETDOWN]		= TARGET_ENETDOWN,
586      [ENETUNREACH]	= TARGET_ENETUNREACH,
587      [ENETRESET]		= TARGET_ENETRESET,
588      [ECONNABORTED]	= TARGET_ECONNABORTED,
589      [ECONNRESET]	= TARGET_ECONNRESET,
590      [ENOBUFS]		= TARGET_ENOBUFS,
591      [EISCONN]		= TARGET_EISCONN,
592      [ENOTCONN]		= TARGET_ENOTCONN,
593      [EUCLEAN]		= TARGET_EUCLEAN,
594      [ENOTNAM]		= TARGET_ENOTNAM,
595      [ENAVAIL]		= TARGET_ENAVAIL,
596      [EISNAM]		= TARGET_EISNAM,
597      [EREMOTEIO]		= TARGET_EREMOTEIO,
598      [EDQUOT]            = TARGET_EDQUOT,
599      [ESHUTDOWN]		= TARGET_ESHUTDOWN,
600      [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
601      [ETIMEDOUT]		= TARGET_ETIMEDOUT,
602      [ECONNREFUSED]	= TARGET_ECONNREFUSED,
603      [EHOSTDOWN]		= TARGET_EHOSTDOWN,
604      [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
605      [EALREADY]		= TARGET_EALREADY,
606      [EINPROGRESS]	= TARGET_EINPROGRESS,
607      [ESTALE]		= TARGET_ESTALE,
608      [ECANCELED]		= TARGET_ECANCELED,
609      [ENOMEDIUM]		= TARGET_ENOMEDIUM,
610      [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
611  #ifdef ENOKEY
612      [ENOKEY]		= TARGET_ENOKEY,
613  #endif
614  #ifdef EKEYEXPIRED
615      [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
616  #endif
617  #ifdef EKEYREVOKED
618      [EKEYREVOKED]	= TARGET_EKEYREVOKED,
619  #endif
620  #ifdef EKEYREJECTED
621      [EKEYREJECTED]	= TARGET_EKEYREJECTED,
622  #endif
623  #ifdef EOWNERDEAD
624      [EOWNERDEAD]	= TARGET_EOWNERDEAD,
625  #endif
626  #ifdef ENOTRECOVERABLE
627      [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
628  #endif
629  #ifdef ENOMSG
630      [ENOMSG]            = TARGET_ENOMSG,
631  #endif
632  #ifdef ERKFILL
633      [ERFKILL]           = TARGET_ERFKILL,
634  #endif
635  #ifdef EHWPOISON
636      [EHWPOISON]         = TARGET_EHWPOISON,
637  #endif
638  };
639  
640  static inline int host_to_target_errno(int err)
641  {
642      if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643          host_to_target_errno_table[err]) {
644          return host_to_target_errno_table[err];
645      }
646      return err;
647  }
648  
649  static inline int target_to_host_errno(int err)
650  {
651      if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652          target_to_host_errno_table[err]) {
653          return target_to_host_errno_table[err];
654      }
655      return err;
656  }
657  
658  static inline abi_long get_errno(abi_long ret)
659  {
660      if (ret == -1)
661          return -host_to_target_errno(errno);
662      else
663          return ret;
664  }
665  
666  const char *target_strerror(int err)
667  {
668      if (err == TARGET_ERESTARTSYS) {
669          return "To be restarted";
670      }
671      if (err == TARGET_QEMU_ESIGRETURN) {
672          return "Successful exit from sigreturn";
673      }
674  
675      if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676          return NULL;
677      }
678      return strerror(target_to_host_errno(err));
679  }
680  
681  #define safe_syscall0(type, name) \
682  static type safe_##name(void) \
683  { \
684      return safe_syscall(__NR_##name); \
685  }
686  
687  #define safe_syscall1(type, name, type1, arg1) \
688  static type safe_##name(type1 arg1) \
689  { \
690      return safe_syscall(__NR_##name, arg1); \
691  }
692  
693  #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694  static type safe_##name(type1 arg1, type2 arg2) \
695  { \
696      return safe_syscall(__NR_##name, arg1, arg2); \
697  }
698  
699  #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700  static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701  { \
702      return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703  }
704  
705  #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706      type4, arg4) \
707  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708  { \
709      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710  }
711  
712  #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713      type4, arg4, type5, arg5) \
714  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715      type5 arg5) \
716  { \
717      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718  }
719  
720  #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721      type4, arg4, type5, arg5, type6, arg6) \
722  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723      type5 arg5, type6 arg6) \
724  { \
725      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726  }
727  
728  safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729  safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730  safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731                int, flags, mode_t, mode)
732  #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733  safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734                struct rusage *, rusage)
735  #endif
736  safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737                int, options, struct rusage *, rusage)
738  safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741  safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742                fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743  #endif
744  #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745  safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746                struct timespec *, tsp, const sigset_t *, sigmask,
747                size_t, sigsetsize)
748  #endif
749  safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750                int, maxevents, int, timeout, const sigset_t *, sigmask,
751                size_t, sigsetsize)
752  #if defined(__NR_futex)
753  safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754                const struct timespec *,timeout,int *,uaddr2,int,val3)
755  #endif
756  #if defined(__NR_futex_time64)
757  safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758                const struct timespec *,timeout,int *,uaddr2,int,val3)
759  #endif
760  safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761  safe_syscall2(int, kill, pid_t, pid, int, sig)
762  safe_syscall2(int, tkill, int, tid, int, sig)
763  safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764  safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765  safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766  safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767                unsigned long, pos_l, unsigned long, pos_h)
768  safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769                unsigned long, pos_l, unsigned long, pos_h)
770  safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771                socklen_t, addrlen)
772  safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773                int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774  safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775                int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776  safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777  safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778  safe_syscall2(int, flock, int, fd, int, operation)
779  #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780  safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781                const struct timespec *, uts, size_t, sigsetsize)
782  #endif
783  safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784                int, flags)
785  #if defined(TARGET_NR_nanosleep)
786  safe_syscall2(int, nanosleep, const struct timespec *, req,
787                struct timespec *, rem)
788  #endif
789  #if defined(TARGET_NR_clock_nanosleep) || \
790      defined(TARGET_NR_clock_nanosleep_time64)
791  safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792                const struct timespec *, req, struct timespec *, rem)
793  #endif
794  #ifdef __NR_ipc
795  #ifdef __s390x__
796  safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797                void *, ptr)
798  #else
799  safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800                void *, ptr, long, fifth)
801  #endif
802  #endif
803  #ifdef __NR_msgsnd
804  safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805                int, flags)
806  #endif
807  #ifdef __NR_msgrcv
808  safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809                long, msgtype, int, flags)
810  #endif
811  #ifdef __NR_semtimedop
812  safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813                unsigned, nsops, const struct timespec *, timeout)
814  #endif
815  #if defined(TARGET_NR_mq_timedsend) || \
816      defined(TARGET_NR_mq_timedsend_time64)
817  safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818                size_t, len, unsigned, prio, const struct timespec *, timeout)
819  #endif
820  #if defined(TARGET_NR_mq_timedreceive) || \
821      defined(TARGET_NR_mq_timedreceive_time64)
822  safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823                size_t, len, unsigned *, prio, const struct timespec *, timeout)
824  #endif
825  #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826  safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827                int, outfd, loff_t *, poutoff, size_t, length,
828                unsigned int, flags)
829  #endif
830  
831  /* We do ioctl like this rather than via safe_syscall3 to preserve the
832   * "third argument might be integer or pointer or not present" behaviour of
833   * the libc function.
834   */
835  #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836  /* Similarly for fcntl. Note that callers must always:
837   *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838   *  use the flock64 struct rather than unsuffixed flock
839   * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840   */
841  #ifdef __NR_fcntl64
842  #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843  #else
844  #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845  #endif
846  
847  static inline int host_to_target_sock_type(int host_type)
848  {
849      int target_type;
850  
851      switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852      case SOCK_DGRAM:
853          target_type = TARGET_SOCK_DGRAM;
854          break;
855      case SOCK_STREAM:
856          target_type = TARGET_SOCK_STREAM;
857          break;
858      default:
859          target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860          break;
861      }
862  
863  #if defined(SOCK_CLOEXEC)
864      if (host_type & SOCK_CLOEXEC) {
865          target_type |= TARGET_SOCK_CLOEXEC;
866      }
867  #endif
868  
869  #if defined(SOCK_NONBLOCK)
870      if (host_type & SOCK_NONBLOCK) {
871          target_type |= TARGET_SOCK_NONBLOCK;
872      }
873  #endif
874  
875      return target_type;
876  }
877  
878  static abi_ulong target_brk;
879  static abi_ulong target_original_brk;
880  static abi_ulong brk_page;
881  
882  void target_set_brk(abi_ulong new_brk)
883  {
884      target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885      brk_page = HOST_PAGE_ALIGN(target_brk);
886  }
887  
888  //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889  #define DEBUGF_BRK(message, args...)
890  
891  /* do_brk() must return target values and target errnos. */
892  abi_long do_brk(abi_ulong new_brk)
893  {
894      abi_long mapped_addr;
895      abi_ulong new_alloc_size;
896  
897      /* brk pointers are always untagged */
898  
899      DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
900  
901      if (!new_brk) {
902          DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
903          return target_brk;
904      }
905      if (new_brk < target_original_brk) {
906          DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
907                     target_brk);
908          return target_brk;
909      }
910  
911      /* If the new brk is less than the highest page reserved to the
912       * target heap allocation, set it and we're almost done...  */
913      if (new_brk <= brk_page) {
914          /* Heap contents are initialized to zero, as for anonymous
915           * mapped pages.  */
916          if (new_brk > target_brk) {
917              memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
918          }
919  	target_brk = new_brk;
920          DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
921  	return target_brk;
922      }
923  
924      /* We need to allocate more memory after the brk... Note that
925       * we don't use MAP_FIXED because that will map over the top of
926       * any existing mapping (like the one with the host libc or qemu
927       * itself); instead we treat "mapped but at wrong address" as
928       * a failure and unmap again.
929       */
930      new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
931      mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
932                                          PROT_READ|PROT_WRITE,
933                                          MAP_ANON|MAP_PRIVATE, 0, 0));
934  
935      if (mapped_addr == brk_page) {
936          /* Heap contents are initialized to zero, as for anonymous
937           * mapped pages.  Technically the new pages are already
938           * initialized to zero since they *are* anonymous mapped
939           * pages, however we have to take care with the contents that
940           * come from the remaining part of the previous page: it may
941           * contains garbage data due to a previous heap usage (grown
942           * then shrunken).  */
943          memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
944  
945          target_brk = new_brk;
946          brk_page = HOST_PAGE_ALIGN(target_brk);
947          DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
948              target_brk);
949          return target_brk;
950      } else if (mapped_addr != -1) {
951          /* Mapped but at wrong address, meaning there wasn't actually
952           * enough space for this brk.
953           */
954          target_munmap(mapped_addr, new_alloc_size);
955          mapped_addr = -1;
956          DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
957      }
958      else {
959          DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
960      }
961  
962  #if defined(TARGET_ALPHA)
963      /* We (partially) emulate OSF/1 on Alpha, which requires we
964         return a proper errno, not an unchanged brk value.  */
965      return -TARGET_ENOMEM;
966  #endif
967      /* For everything else, return the previous break. */
968      return target_brk;
969  }
970  
971  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
972      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
973  static inline abi_long copy_from_user_fdset(fd_set *fds,
974                                              abi_ulong target_fds_addr,
975                                              int n)
976  {
977      int i, nw, j, k;
978      abi_ulong b, *target_fds;
979  
980      nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
981      if (!(target_fds = lock_user(VERIFY_READ,
982                                   target_fds_addr,
983                                   sizeof(abi_ulong) * nw,
984                                   1)))
985          return -TARGET_EFAULT;
986  
987      FD_ZERO(fds);
988      k = 0;
989      for (i = 0; i < nw; i++) {
990          /* grab the abi_ulong */
991          __get_user(b, &target_fds[i]);
992          for (j = 0; j < TARGET_ABI_BITS; j++) {
993              /* check the bit inside the abi_ulong */
994              if ((b >> j) & 1)
995                  FD_SET(k, fds);
996              k++;
997          }
998      }
999  
1000      unlock_user(target_fds, target_fds_addr, 0);
1001  
1002      return 0;
1003  }
1004  
1005  static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1006                                                   abi_ulong target_fds_addr,
1007                                                   int n)
1008  {
1009      if (target_fds_addr) {
1010          if (copy_from_user_fdset(fds, target_fds_addr, n))
1011              return -TARGET_EFAULT;
1012          *fds_ptr = fds;
1013      } else {
1014          *fds_ptr = NULL;
1015      }
1016      return 0;
1017  }
1018  
1019  static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1020                                            const fd_set *fds,
1021                                            int n)
1022  {
1023      int i, nw, j, k;
1024      abi_long v;
1025      abi_ulong *target_fds;
1026  
1027      nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1028      if (!(target_fds = lock_user(VERIFY_WRITE,
1029                                   target_fds_addr,
1030                                   sizeof(abi_ulong) * nw,
1031                                   0)))
1032          return -TARGET_EFAULT;
1033  
1034      k = 0;
1035      for (i = 0; i < nw; i++) {
1036          v = 0;
1037          for (j = 0; j < TARGET_ABI_BITS; j++) {
1038              v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1039              k++;
1040          }
1041          __put_user(v, &target_fds[i]);
1042      }
1043  
1044      unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1045  
1046      return 0;
1047  }
1048  #endif
1049  
1050  #if defined(__alpha__)
1051  #define HOST_HZ 1024
1052  #else
1053  #define HOST_HZ 100
1054  #endif
1055  
1056  static inline abi_long host_to_target_clock_t(long ticks)
1057  {
1058  #if HOST_HZ == TARGET_HZ
1059      return ticks;
1060  #else
1061      return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1062  #endif
1063  }
1064  
1065  static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1066                                               const struct rusage *rusage)
1067  {
1068      struct target_rusage *target_rusage;
1069  
1070      if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1071          return -TARGET_EFAULT;
1072      target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1073      target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1074      target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1075      target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1076      target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1077      target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1078      target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1079      target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1080      target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1081      target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1082      target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1083      target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1084      target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1085      target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1086      target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1087      target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1088      target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1089      target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1090      unlock_user_struct(target_rusage, target_addr, 1);
1091  
1092      return 0;
1093  }
1094  
1095  #ifdef TARGET_NR_setrlimit
1096  static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1097  {
1098      abi_ulong target_rlim_swap;
1099      rlim_t result;
1100  
1101      target_rlim_swap = tswapal(target_rlim);
1102      if (target_rlim_swap == TARGET_RLIM_INFINITY)
1103          return RLIM_INFINITY;
1104  
1105      result = target_rlim_swap;
1106      if (target_rlim_swap != (rlim_t)result)
1107          return RLIM_INFINITY;
1108  
1109      return result;
1110  }
1111  #endif
1112  
1113  #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114  static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1115  {
1116      abi_ulong target_rlim_swap;
1117      abi_ulong result;
1118  
1119      if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1120          target_rlim_swap = TARGET_RLIM_INFINITY;
1121      else
1122          target_rlim_swap = rlim;
1123      result = tswapal(target_rlim_swap);
1124  
1125      return result;
1126  }
1127  #endif
1128  
1129  static inline int target_to_host_resource(int code)
1130  {
1131      switch (code) {
1132      case TARGET_RLIMIT_AS:
1133          return RLIMIT_AS;
1134      case TARGET_RLIMIT_CORE:
1135          return RLIMIT_CORE;
1136      case TARGET_RLIMIT_CPU:
1137          return RLIMIT_CPU;
1138      case TARGET_RLIMIT_DATA:
1139          return RLIMIT_DATA;
1140      case TARGET_RLIMIT_FSIZE:
1141          return RLIMIT_FSIZE;
1142      case TARGET_RLIMIT_LOCKS:
1143          return RLIMIT_LOCKS;
1144      case TARGET_RLIMIT_MEMLOCK:
1145          return RLIMIT_MEMLOCK;
1146      case TARGET_RLIMIT_MSGQUEUE:
1147          return RLIMIT_MSGQUEUE;
1148      case TARGET_RLIMIT_NICE:
1149          return RLIMIT_NICE;
1150      case TARGET_RLIMIT_NOFILE:
1151          return RLIMIT_NOFILE;
1152      case TARGET_RLIMIT_NPROC:
1153          return RLIMIT_NPROC;
1154      case TARGET_RLIMIT_RSS:
1155          return RLIMIT_RSS;
1156      case TARGET_RLIMIT_RTPRIO:
1157          return RLIMIT_RTPRIO;
1158      case TARGET_RLIMIT_SIGPENDING:
1159          return RLIMIT_SIGPENDING;
1160      case TARGET_RLIMIT_STACK:
1161          return RLIMIT_STACK;
1162      default:
1163          return code;
1164      }
1165  }
1166  
1167  static inline abi_long copy_from_user_timeval(struct timeval *tv,
1168                                                abi_ulong target_tv_addr)
1169  {
1170      struct target_timeval *target_tv;
1171  
1172      if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173          return -TARGET_EFAULT;
1174      }
1175  
1176      __get_user(tv->tv_sec, &target_tv->tv_sec);
1177      __get_user(tv->tv_usec, &target_tv->tv_usec);
1178  
1179      unlock_user_struct(target_tv, target_tv_addr, 0);
1180  
1181      return 0;
1182  }
1183  
1184  static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1185                                              const struct timeval *tv)
1186  {
1187      struct target_timeval *target_tv;
1188  
1189      if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1190          return -TARGET_EFAULT;
1191      }
1192  
1193      __put_user(tv->tv_sec, &target_tv->tv_sec);
1194      __put_user(tv->tv_usec, &target_tv->tv_usec);
1195  
1196      unlock_user_struct(target_tv, target_tv_addr, 1);
1197  
1198      return 0;
1199  }
1200  
1201  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202  static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1203                                                  abi_ulong target_tv_addr)
1204  {
1205      struct target__kernel_sock_timeval *target_tv;
1206  
1207      if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1208          return -TARGET_EFAULT;
1209      }
1210  
1211      __get_user(tv->tv_sec, &target_tv->tv_sec);
1212      __get_user(tv->tv_usec, &target_tv->tv_usec);
1213  
1214      unlock_user_struct(target_tv, target_tv_addr, 0);
1215  
1216      return 0;
1217  }
1218  #endif
1219  
1220  static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1221                                                const struct timeval *tv)
1222  {
1223      struct target__kernel_sock_timeval *target_tv;
1224  
1225      if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1226          return -TARGET_EFAULT;
1227      }
1228  
1229      __put_user(tv->tv_sec, &target_tv->tv_sec);
1230      __put_user(tv->tv_usec, &target_tv->tv_usec);
1231  
1232      unlock_user_struct(target_tv, target_tv_addr, 1);
1233  
1234      return 0;
1235  }
1236  
1237  #if defined(TARGET_NR_futex) || \
1238      defined(TARGET_NR_rt_sigtimedwait) || \
1239      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240      defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241      defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242      defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243      defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244      defined(TARGET_NR_timer_settime) || \
1245      (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246  static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1247                                                 abi_ulong target_addr)
1248  {
1249      struct target_timespec *target_ts;
1250  
1251      if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1252          return -TARGET_EFAULT;
1253      }
1254      __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1255      __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256      unlock_user_struct(target_ts, target_addr, 0);
1257      return 0;
1258  }
1259  #endif
1260  
1261  #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262      defined(TARGET_NR_timer_settime64) || \
1263      defined(TARGET_NR_mq_timedsend_time64) || \
1264      defined(TARGET_NR_mq_timedreceive_time64) || \
1265      (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266      defined(TARGET_NR_clock_nanosleep_time64) || \
1267      defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268      defined(TARGET_NR_utimensat) || \
1269      defined(TARGET_NR_utimensat_time64) || \
1270      defined(TARGET_NR_semtimedop_time64) || \
1271      defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272  static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1273                                                   abi_ulong target_addr)
1274  {
1275      struct target__kernel_timespec *target_ts;
1276  
1277      if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1278          return -TARGET_EFAULT;
1279      }
1280      __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1281      __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1282      /* in 32bit mode, this drops the padding */
1283      host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1284      unlock_user_struct(target_ts, target_addr, 0);
1285      return 0;
1286  }
1287  #endif
1288  
1289  static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1290                                                 struct timespec *host_ts)
1291  {
1292      struct target_timespec *target_ts;
1293  
1294      if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1295          return -TARGET_EFAULT;
1296      }
1297      __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1298      __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1299      unlock_user_struct(target_ts, target_addr, 1);
1300      return 0;
1301  }
1302  
1303  static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1304                                                   struct timespec *host_ts)
1305  {
1306      struct target__kernel_timespec *target_ts;
1307  
1308      if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1309          return -TARGET_EFAULT;
1310      }
1311      __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1312      __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1313      unlock_user_struct(target_ts, target_addr, 1);
1314      return 0;
1315  }
1316  
1317  #if defined(TARGET_NR_gettimeofday)
1318  static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1319                                               struct timezone *tz)
1320  {
1321      struct target_timezone *target_tz;
1322  
1323      if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1324          return -TARGET_EFAULT;
1325      }
1326  
1327      __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1328      __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1329  
1330      unlock_user_struct(target_tz, target_tz_addr, 1);
1331  
1332      return 0;
1333  }
1334  #endif
1335  
1336  #if defined(TARGET_NR_settimeofday)
1337  static inline abi_long copy_from_user_timezone(struct timezone *tz,
1338                                                 abi_ulong target_tz_addr)
1339  {
1340      struct target_timezone *target_tz;
1341  
1342      if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1343          return -TARGET_EFAULT;
1344      }
1345  
1346      __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1347      __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1348  
1349      unlock_user_struct(target_tz, target_tz_addr, 0);
1350  
1351      return 0;
1352  }
1353  #endif
1354  
1355  #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356  #include <mqueue.h>
1357  
1358  static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1359                                                abi_ulong target_mq_attr_addr)
1360  {
1361      struct target_mq_attr *target_mq_attr;
1362  
1363      if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1364                            target_mq_attr_addr, 1))
1365          return -TARGET_EFAULT;
1366  
1367      __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1368      __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1369      __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1370      __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1371  
1372      unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1373  
1374      return 0;
1375  }
1376  
1377  static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1378                                              const struct mq_attr *attr)
1379  {
1380      struct target_mq_attr *target_mq_attr;
1381  
1382      if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1383                            target_mq_attr_addr, 0))
1384          return -TARGET_EFAULT;
1385  
1386      __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1387      __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1388      __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1389      __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1390  
1391      unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1392  
1393      return 0;
1394  }
1395  #endif
1396  
1397  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398  /* do_select() must return target values and target errnos. */
1399  static abi_long do_select(int n,
1400                            abi_ulong rfd_addr, abi_ulong wfd_addr,
1401                            abi_ulong efd_addr, abi_ulong target_tv_addr)
1402  {
1403      fd_set rfds, wfds, efds;
1404      fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1405      struct timeval tv;
1406      struct timespec ts, *ts_ptr;
1407      abi_long ret;
1408  
1409      ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410      if (ret) {
1411          return ret;
1412      }
1413      ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414      if (ret) {
1415          return ret;
1416      }
1417      ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418      if (ret) {
1419          return ret;
1420      }
1421  
1422      if (target_tv_addr) {
1423          if (copy_from_user_timeval(&tv, target_tv_addr))
1424              return -TARGET_EFAULT;
1425          ts.tv_sec = tv.tv_sec;
1426          ts.tv_nsec = tv.tv_usec * 1000;
1427          ts_ptr = &ts;
1428      } else {
1429          ts_ptr = NULL;
1430      }
1431  
1432      ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1433                                    ts_ptr, NULL));
1434  
1435      if (!is_error(ret)) {
1436          if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1437              return -TARGET_EFAULT;
1438          if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1439              return -TARGET_EFAULT;
1440          if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1441              return -TARGET_EFAULT;
1442  
1443          if (target_tv_addr) {
1444              tv.tv_sec = ts.tv_sec;
1445              tv.tv_usec = ts.tv_nsec / 1000;
1446              if (copy_to_user_timeval(target_tv_addr, &tv)) {
1447                  return -TARGET_EFAULT;
1448              }
1449          }
1450      }
1451  
1452      return ret;
1453  }
1454  
1455  #if defined(TARGET_WANT_OLD_SYS_SELECT)
1456  static abi_long do_old_select(abi_ulong arg1)
1457  {
1458      struct target_sel_arg_struct *sel;
1459      abi_ulong inp, outp, exp, tvp;
1460      long nsel;
1461  
1462      if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1463          return -TARGET_EFAULT;
1464      }
1465  
1466      nsel = tswapal(sel->n);
1467      inp = tswapal(sel->inp);
1468      outp = tswapal(sel->outp);
1469      exp = tswapal(sel->exp);
1470      tvp = tswapal(sel->tvp);
1471  
1472      unlock_user_struct(sel, arg1, 0);
1473  
1474      return do_select(nsel, inp, outp, exp, tvp);
1475  }
1476  #endif
1477  #endif
1478  
1479  #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480  static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1481                              abi_long arg4, abi_long arg5, abi_long arg6,
1482                              bool time64)
1483  {
1484      abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1485      fd_set rfds, wfds, efds;
1486      fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1487      struct timespec ts, *ts_ptr;
1488      abi_long ret;
1489  
1490      /*
1491       * The 6th arg is actually two args smashed together,
1492       * so we cannot use the C library.
1493       */
1494      sigset_t set;
1495      struct {
1496          sigset_t *set;
1497          size_t size;
1498      } sig, *sig_ptr;
1499  
1500      abi_ulong arg_sigset, arg_sigsize, *arg7;
1501      target_sigset_t *target_sigset;
1502  
1503      n = arg1;
1504      rfd_addr = arg2;
1505      wfd_addr = arg3;
1506      efd_addr = arg4;
1507      ts_addr = arg5;
1508  
1509      ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1510      if (ret) {
1511          return ret;
1512      }
1513      ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1514      if (ret) {
1515          return ret;
1516      }
1517      ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1518      if (ret) {
1519          return ret;
1520      }
1521  
1522      /*
1523       * This takes a timespec, and not a timeval, so we cannot
1524       * use the do_select() helper ...
1525       */
1526      if (ts_addr) {
1527          if (time64) {
1528              if (target_to_host_timespec64(&ts, ts_addr)) {
1529                  return -TARGET_EFAULT;
1530              }
1531          } else {
1532              if (target_to_host_timespec(&ts, ts_addr)) {
1533                  return -TARGET_EFAULT;
1534              }
1535          }
1536              ts_ptr = &ts;
1537      } else {
1538          ts_ptr = NULL;
1539      }
1540  
1541      /* Extract the two packed args for the sigset */
1542      if (arg6) {
1543          sig_ptr = &sig;
1544          sig.size = SIGSET_T_SIZE;
1545  
1546          arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1547          if (!arg7) {
1548              return -TARGET_EFAULT;
1549          }
1550          arg_sigset = tswapal(arg7[0]);
1551          arg_sigsize = tswapal(arg7[1]);
1552          unlock_user(arg7, arg6, 0);
1553  
1554          if (arg_sigset) {
1555              sig.set = &set;
1556              if (arg_sigsize != sizeof(*target_sigset)) {
1557                  /* Like the kernel, we enforce correct size sigsets */
1558                  return -TARGET_EINVAL;
1559              }
1560              target_sigset = lock_user(VERIFY_READ, arg_sigset,
1561                                        sizeof(*target_sigset), 1);
1562              if (!target_sigset) {
1563                  return -TARGET_EFAULT;
1564              }
1565              target_to_host_sigset(&set, target_sigset);
1566              unlock_user(target_sigset, arg_sigset, 0);
1567          } else {
1568              sig.set = NULL;
1569          }
1570      } else {
1571          sig_ptr = NULL;
1572      }
1573  
1574      ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1575                                    ts_ptr, sig_ptr));
1576  
1577      if (!is_error(ret)) {
1578          if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1579              return -TARGET_EFAULT;
1580          }
1581          if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1582              return -TARGET_EFAULT;
1583          }
1584          if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1585              return -TARGET_EFAULT;
1586          }
1587          if (time64) {
1588              if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1589                  return -TARGET_EFAULT;
1590              }
1591          } else {
1592              if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1593                  return -TARGET_EFAULT;
1594              }
1595          }
1596      }
1597      return ret;
1598  }
1599  #endif
1600  
1601  #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602      defined(TARGET_NR_ppoll_time64)
1603  static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1604                           abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1605  {
1606      struct target_pollfd *target_pfd;
1607      unsigned int nfds = arg2;
1608      struct pollfd *pfd;
1609      unsigned int i;
1610      abi_long ret;
1611  
1612      pfd = NULL;
1613      target_pfd = NULL;
1614      if (nfds) {
1615          if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1616              return -TARGET_EINVAL;
1617          }
1618          target_pfd = lock_user(VERIFY_WRITE, arg1,
1619                                 sizeof(struct target_pollfd) * nfds, 1);
1620          if (!target_pfd) {
1621              return -TARGET_EFAULT;
1622          }
1623  
1624          pfd = alloca(sizeof(struct pollfd) * nfds);
1625          for (i = 0; i < nfds; i++) {
1626              pfd[i].fd = tswap32(target_pfd[i].fd);
1627              pfd[i].events = tswap16(target_pfd[i].events);
1628          }
1629      }
1630      if (ppoll) {
1631          struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1632          target_sigset_t *target_set;
1633          sigset_t _set, *set = &_set;
1634  
1635          if (arg3) {
1636              if (time64) {
1637                  if (target_to_host_timespec64(timeout_ts, arg3)) {
1638                      unlock_user(target_pfd, arg1, 0);
1639                      return -TARGET_EFAULT;
1640                  }
1641              } else {
1642                  if (target_to_host_timespec(timeout_ts, arg3)) {
1643                      unlock_user(target_pfd, arg1, 0);
1644                      return -TARGET_EFAULT;
1645                  }
1646              }
1647          } else {
1648              timeout_ts = NULL;
1649          }
1650  
1651          if (arg4) {
1652              if (arg5 != sizeof(target_sigset_t)) {
1653                  unlock_user(target_pfd, arg1, 0);
1654                  return -TARGET_EINVAL;
1655              }
1656  
1657              target_set = lock_user(VERIFY_READ, arg4,
1658                                     sizeof(target_sigset_t), 1);
1659              if (!target_set) {
1660                  unlock_user(target_pfd, arg1, 0);
1661                  return -TARGET_EFAULT;
1662              }
1663              target_to_host_sigset(set, target_set);
1664          } else {
1665              set = NULL;
1666          }
1667  
1668          ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1669                                     set, SIGSET_T_SIZE));
1670  
1671          if (!is_error(ret) && arg3) {
1672              if (time64) {
1673                  if (host_to_target_timespec64(arg3, timeout_ts)) {
1674                      return -TARGET_EFAULT;
1675                  }
1676              } else {
1677                  if (host_to_target_timespec(arg3, timeout_ts)) {
1678                      return -TARGET_EFAULT;
1679                  }
1680              }
1681          }
1682          if (arg4) {
1683              unlock_user(target_set, arg4, 0);
1684          }
1685      } else {
1686            struct timespec ts, *pts;
1687  
1688            if (arg3 >= 0) {
1689                /* Convert ms to secs, ns */
1690                ts.tv_sec = arg3 / 1000;
1691                ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1692                pts = &ts;
1693            } else {
1694                /* -ve poll() timeout means "infinite" */
1695                pts = NULL;
1696            }
1697            ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1698      }
1699  
1700      if (!is_error(ret)) {
1701          for (i = 0; i < nfds; i++) {
1702              target_pfd[i].revents = tswap16(pfd[i].revents);
1703          }
1704      }
1705      unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1706      return ret;
1707  }
1708  #endif
1709  
1710  static abi_long do_pipe2(int host_pipe[], int flags)
1711  {
1712  #ifdef CONFIG_PIPE2
1713      return pipe2(host_pipe, flags);
1714  #else
1715      return -ENOSYS;
1716  #endif
1717  }
1718  
1719  static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1720                          int flags, int is_pipe2)
1721  {
1722      int host_pipe[2];
1723      abi_long ret;
1724      ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1725  
1726      if (is_error(ret))
1727          return get_errno(ret);
1728  
1729      /* Several targets have special calling conventions for the original
1730         pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1731      if (!is_pipe2) {
1732  #if defined(TARGET_ALPHA)
1733          ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1734          return host_pipe[0];
1735  #elif defined(TARGET_MIPS)
1736          ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1737          return host_pipe[0];
1738  #elif defined(TARGET_SH4)
1739          ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1740          return host_pipe[0];
1741  #elif defined(TARGET_SPARC)
1742          ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1743          return host_pipe[0];
1744  #endif
1745      }
1746  
1747      if (put_user_s32(host_pipe[0], pipedes)
1748          || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1749          return -TARGET_EFAULT;
1750      return get_errno(ret);
1751  }
1752  
1753  static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1754                                                abi_ulong target_addr,
1755                                                socklen_t len)
1756  {
1757      struct target_ip_mreqn *target_smreqn;
1758  
1759      target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1760      if (!target_smreqn)
1761          return -TARGET_EFAULT;
1762      mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1763      mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1764      if (len == sizeof(struct target_ip_mreqn))
1765          mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1766      unlock_user(target_smreqn, target_addr, 0);
1767  
1768      return 0;
1769  }
1770  
1771  static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1772                                                 abi_ulong target_addr,
1773                                                 socklen_t len)
1774  {
1775      const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1776      sa_family_t sa_family;
1777      struct target_sockaddr *target_saddr;
1778  
1779      if (fd_trans_target_to_host_addr(fd)) {
1780          return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1781      }
1782  
1783      target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1784      if (!target_saddr)
1785          return -TARGET_EFAULT;
1786  
1787      sa_family = tswap16(target_saddr->sa_family);
1788  
1789      /* Oops. The caller might send a incomplete sun_path; sun_path
1790       * must be terminated by \0 (see the manual page), but
1791       * unfortunately it is quite common to specify sockaddr_un
1792       * length as "strlen(x->sun_path)" while it should be
1793       * "strlen(...) + 1". We'll fix that here if needed.
1794       * Linux kernel has a similar feature.
1795       */
1796  
1797      if (sa_family == AF_UNIX) {
1798          if (len < unix_maxlen && len > 0) {
1799              char *cp = (char*)target_saddr;
1800  
1801              if ( cp[len-1] && !cp[len] )
1802                  len++;
1803          }
1804          if (len > unix_maxlen)
1805              len = unix_maxlen;
1806      }
1807  
1808      memcpy(addr, target_saddr, len);
1809      addr->sa_family = sa_family;
1810      if (sa_family == AF_NETLINK) {
1811          struct sockaddr_nl *nladdr;
1812  
1813          nladdr = (struct sockaddr_nl *)addr;
1814          nladdr->nl_pid = tswap32(nladdr->nl_pid);
1815          nladdr->nl_groups = tswap32(nladdr->nl_groups);
1816      } else if (sa_family == AF_PACKET) {
1817  	struct target_sockaddr_ll *lladdr;
1818  
1819  	lladdr = (struct target_sockaddr_ll *)addr;
1820  	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1821  	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1822      }
1823      unlock_user(target_saddr, target_addr, 0);
1824  
1825      return 0;
1826  }
1827  
1828  static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1829                                                 struct sockaddr *addr,
1830                                                 socklen_t len)
1831  {
1832      struct target_sockaddr *target_saddr;
1833  
1834      if (len == 0) {
1835          return 0;
1836      }
1837      assert(addr);
1838  
1839      target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1840      if (!target_saddr)
1841          return -TARGET_EFAULT;
1842      memcpy(target_saddr, addr, len);
1843      if (len >= offsetof(struct target_sockaddr, sa_family) +
1844          sizeof(target_saddr->sa_family)) {
1845          target_saddr->sa_family = tswap16(addr->sa_family);
1846      }
1847      if (addr->sa_family == AF_NETLINK &&
1848          len >= sizeof(struct target_sockaddr_nl)) {
1849          struct target_sockaddr_nl *target_nl =
1850                 (struct target_sockaddr_nl *)target_saddr;
1851          target_nl->nl_pid = tswap32(target_nl->nl_pid);
1852          target_nl->nl_groups = tswap32(target_nl->nl_groups);
1853      } else if (addr->sa_family == AF_PACKET) {
1854          struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1855          target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1856          target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1857      } else if (addr->sa_family == AF_INET6 &&
1858                 len >= sizeof(struct target_sockaddr_in6)) {
1859          struct target_sockaddr_in6 *target_in6 =
1860                 (struct target_sockaddr_in6 *)target_saddr;
1861          target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1862      }
1863      unlock_user(target_saddr, target_addr, len);
1864  
1865      return 0;
1866  }
1867  
1868  static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1869                                             struct target_msghdr *target_msgh)
1870  {
1871      struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872      abi_long msg_controllen;
1873      abi_ulong target_cmsg_addr;
1874      struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875      socklen_t space = 0;
1876  
1877      msg_controllen = tswapal(target_msgh->msg_controllen);
1878      if (msg_controllen < sizeof (struct target_cmsghdr))
1879          goto the_end;
1880      target_cmsg_addr = tswapal(target_msgh->msg_control);
1881      target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1882      target_cmsg_start = target_cmsg;
1883      if (!target_cmsg)
1884          return -TARGET_EFAULT;
1885  
1886      while (cmsg && target_cmsg) {
1887          void *data = CMSG_DATA(cmsg);
1888          void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889  
1890          int len = tswapal(target_cmsg->cmsg_len)
1891              - sizeof(struct target_cmsghdr);
1892  
1893          space += CMSG_SPACE(len);
1894          if (space > msgh->msg_controllen) {
1895              space -= CMSG_SPACE(len);
1896              /* This is a QEMU bug, since we allocated the payload
1897               * area ourselves (unlike overflow in host-to-target
1898               * conversion, which is just the guest giving us a buffer
1899               * that's too small). It can't happen for the payload types
1900               * we currently support; if it becomes an issue in future
1901               * we would need to improve our allocation strategy to
1902               * something more intelligent than "twice the size of the
1903               * target buffer we're reading from".
1904               */
1905              qemu_log_mask(LOG_UNIMP,
1906                            ("Unsupported ancillary data %d/%d: "
1907                             "unhandled msg size\n"),
1908                            tswap32(target_cmsg->cmsg_level),
1909                            tswap32(target_cmsg->cmsg_type));
1910              break;
1911          }
1912  
1913          if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1914              cmsg->cmsg_level = SOL_SOCKET;
1915          } else {
1916              cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1917          }
1918          cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1919          cmsg->cmsg_len = CMSG_LEN(len);
1920  
1921          if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1922              int *fd = (int *)data;
1923              int *target_fd = (int *)target_data;
1924              int i, numfds = len / sizeof(int);
1925  
1926              for (i = 0; i < numfds; i++) {
1927                  __get_user(fd[i], target_fd + i);
1928              }
1929          } else if (cmsg->cmsg_level == SOL_SOCKET
1930                 &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1931              struct ucred *cred = (struct ucred *)data;
1932              struct target_ucred *target_cred =
1933                  (struct target_ucred *)target_data;
1934  
1935              __get_user(cred->pid, &target_cred->pid);
1936              __get_user(cred->uid, &target_cred->uid);
1937              __get_user(cred->gid, &target_cred->gid);
1938          } else {
1939              qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1940                            cmsg->cmsg_level, cmsg->cmsg_type);
1941              memcpy(data, target_data, len);
1942          }
1943  
1944          cmsg = CMSG_NXTHDR(msgh, cmsg);
1945          target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1946                                           target_cmsg_start);
1947      }
1948      unlock_user(target_cmsg, target_cmsg_addr, 0);
1949   the_end:
1950      msgh->msg_controllen = space;
1951      return 0;
1952  }
1953  
1954  static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1955                                             struct msghdr *msgh)
1956  {
1957      struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1958      abi_long msg_controllen;
1959      abi_ulong target_cmsg_addr;
1960      struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1961      socklen_t space = 0;
1962  
1963      msg_controllen = tswapal(target_msgh->msg_controllen);
1964      if (msg_controllen < sizeof (struct target_cmsghdr))
1965          goto the_end;
1966      target_cmsg_addr = tswapal(target_msgh->msg_control);
1967      target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1968      target_cmsg_start = target_cmsg;
1969      if (!target_cmsg)
1970          return -TARGET_EFAULT;
1971  
1972      while (cmsg && target_cmsg) {
1973          void *data = CMSG_DATA(cmsg);
1974          void *target_data = TARGET_CMSG_DATA(target_cmsg);
1975  
1976          int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1977          int tgt_len, tgt_space;
1978  
1979          /* We never copy a half-header but may copy half-data;
1980           * this is Linux's behaviour in put_cmsg(). Note that
1981           * truncation here is a guest problem (which we report
1982           * to the guest via the CTRUNC bit), unlike truncation
1983           * in target_to_host_cmsg, which is a QEMU bug.
1984           */
1985          if (msg_controllen < sizeof(struct target_cmsghdr)) {
1986              target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1987              break;
1988          }
1989  
1990          if (cmsg->cmsg_level == SOL_SOCKET) {
1991              target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1992          } else {
1993              target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1994          }
1995          target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1996  
1997          /* Payload types which need a different size of payload on
1998           * the target must adjust tgt_len here.
1999           */
2000          tgt_len = len;
2001          switch (cmsg->cmsg_level) {
2002          case SOL_SOCKET:
2003              switch (cmsg->cmsg_type) {
2004              case SO_TIMESTAMP:
2005                  tgt_len = sizeof(struct target_timeval);
2006                  break;
2007              default:
2008                  break;
2009              }
2010              break;
2011          default:
2012              break;
2013          }
2014  
2015          if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2016              target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2017              tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2018          }
2019  
2020          /* We must now copy-and-convert len bytes of payload
2021           * into tgt_len bytes of destination space. Bear in mind
2022           * that in both source and destination we may be dealing
2023           * with a truncated value!
2024           */
2025          switch (cmsg->cmsg_level) {
2026          case SOL_SOCKET:
2027              switch (cmsg->cmsg_type) {
2028              case SCM_RIGHTS:
2029              {
2030                  int *fd = (int *)data;
2031                  int *target_fd = (int *)target_data;
2032                  int i, numfds = tgt_len / sizeof(int);
2033  
2034                  for (i = 0; i < numfds; i++) {
2035                      __put_user(fd[i], target_fd + i);
2036                  }
2037                  break;
2038              }
2039              case SO_TIMESTAMP:
2040              {
2041                  struct timeval *tv = (struct timeval *)data;
2042                  struct target_timeval *target_tv =
2043                      (struct target_timeval *)target_data;
2044  
2045                  if (len != sizeof(struct timeval) ||
2046                      tgt_len != sizeof(struct target_timeval)) {
2047                      goto unimplemented;
2048                  }
2049  
2050                  /* copy struct timeval to target */
2051                  __put_user(tv->tv_sec, &target_tv->tv_sec);
2052                  __put_user(tv->tv_usec, &target_tv->tv_usec);
2053                  break;
2054              }
2055              case SCM_CREDENTIALS:
2056              {
2057                  struct ucred *cred = (struct ucred *)data;
2058                  struct target_ucred *target_cred =
2059                      (struct target_ucred *)target_data;
2060  
2061                  __put_user(cred->pid, &target_cred->pid);
2062                  __put_user(cred->uid, &target_cred->uid);
2063                  __put_user(cred->gid, &target_cred->gid);
2064                  break;
2065              }
2066              default:
2067                  goto unimplemented;
2068              }
2069              break;
2070  
2071          case SOL_IP:
2072              switch (cmsg->cmsg_type) {
2073              case IP_TTL:
2074              {
2075                  uint32_t *v = (uint32_t *)data;
2076                  uint32_t *t_int = (uint32_t *)target_data;
2077  
2078                  if (len != sizeof(uint32_t) ||
2079                      tgt_len != sizeof(uint32_t)) {
2080                      goto unimplemented;
2081                  }
2082                  __put_user(*v, t_int);
2083                  break;
2084              }
2085              case IP_RECVERR:
2086              {
2087                  struct errhdr_t {
2088                     struct sock_extended_err ee;
2089                     struct sockaddr_in offender;
2090                  };
2091                  struct errhdr_t *errh = (struct errhdr_t *)data;
2092                  struct errhdr_t *target_errh =
2093                      (struct errhdr_t *)target_data;
2094  
2095                  if (len != sizeof(struct errhdr_t) ||
2096                      tgt_len != sizeof(struct errhdr_t)) {
2097                      goto unimplemented;
2098                  }
2099                  __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2100                  __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2101                  __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2102                  __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2103                  __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2104                  __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2105                  __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2106                  host_to_target_sockaddr((unsigned long) &target_errh->offender,
2107                      (void *) &errh->offender, sizeof(errh->offender));
2108                  break;
2109              }
2110              default:
2111                  goto unimplemented;
2112              }
2113              break;
2114  
2115          case SOL_IPV6:
2116              switch (cmsg->cmsg_type) {
2117              case IPV6_HOPLIMIT:
2118              {
2119                  uint32_t *v = (uint32_t *)data;
2120                  uint32_t *t_int = (uint32_t *)target_data;
2121  
2122                  if (len != sizeof(uint32_t) ||
2123                      tgt_len != sizeof(uint32_t)) {
2124                      goto unimplemented;
2125                  }
2126                  __put_user(*v, t_int);
2127                  break;
2128              }
2129              case IPV6_RECVERR:
2130              {
2131                  struct errhdr6_t {
2132                     struct sock_extended_err ee;
2133                     struct sockaddr_in6 offender;
2134                  };
2135                  struct errhdr6_t *errh = (struct errhdr6_t *)data;
2136                  struct errhdr6_t *target_errh =
2137                      (struct errhdr6_t *)target_data;
2138  
2139                  if (len != sizeof(struct errhdr6_t) ||
2140                      tgt_len != sizeof(struct errhdr6_t)) {
2141                      goto unimplemented;
2142                  }
2143                  __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2144                  __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2145                  __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2146                  __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2147                  __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2148                  __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2149                  __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2150                  host_to_target_sockaddr((unsigned long) &target_errh->offender,
2151                      (void *) &errh->offender, sizeof(errh->offender));
2152                  break;
2153              }
2154              default:
2155                  goto unimplemented;
2156              }
2157              break;
2158  
2159          default:
2160          unimplemented:
2161              qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2162                            cmsg->cmsg_level, cmsg->cmsg_type);
2163              memcpy(target_data, data, MIN(len, tgt_len));
2164              if (tgt_len > len) {
2165                  memset(target_data + len, 0, tgt_len - len);
2166              }
2167          }
2168  
2169          target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2170          tgt_space = TARGET_CMSG_SPACE(tgt_len);
2171          if (msg_controllen < tgt_space) {
2172              tgt_space = msg_controllen;
2173          }
2174          msg_controllen -= tgt_space;
2175          space += tgt_space;
2176          cmsg = CMSG_NXTHDR(msgh, cmsg);
2177          target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2178                                           target_cmsg_start);
2179      }
2180      unlock_user(target_cmsg, target_cmsg_addr, space);
2181   the_end:
2182      target_msgh->msg_controllen = tswapal(space);
2183      return 0;
2184  }
2185  
2186  /* do_setsockopt() Must return target values and target errnos. */
2187  static abi_long do_setsockopt(int sockfd, int level, int optname,
2188                                abi_ulong optval_addr, socklen_t optlen)
2189  {
2190      abi_long ret;
2191      int val;
2192      struct ip_mreqn *ip_mreq;
2193      struct ip_mreq_source *ip_mreq_source;
2194  
2195      switch(level) {
2196      case SOL_TCP:
2197      case SOL_UDP:
2198          /* TCP and UDP options all take an 'int' value.  */
2199          if (optlen < sizeof(uint32_t))
2200              return -TARGET_EINVAL;
2201  
2202          if (get_user_u32(val, optval_addr))
2203              return -TARGET_EFAULT;
2204          ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2205          break;
2206      case SOL_IP:
2207          switch(optname) {
2208          case IP_TOS:
2209          case IP_TTL:
2210          case IP_HDRINCL:
2211          case IP_ROUTER_ALERT:
2212          case IP_RECVOPTS:
2213          case IP_RETOPTS:
2214          case IP_PKTINFO:
2215          case IP_MTU_DISCOVER:
2216          case IP_RECVERR:
2217          case IP_RECVTTL:
2218          case IP_RECVTOS:
2219  #ifdef IP_FREEBIND
2220          case IP_FREEBIND:
2221  #endif
2222          case IP_MULTICAST_TTL:
2223          case IP_MULTICAST_LOOP:
2224              val = 0;
2225              if (optlen >= sizeof(uint32_t)) {
2226                  if (get_user_u32(val, optval_addr))
2227                      return -TARGET_EFAULT;
2228              } else if (optlen >= 1) {
2229                  if (get_user_u8(val, optval_addr))
2230                      return -TARGET_EFAULT;
2231              }
2232              ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2233              break;
2234          case IP_ADD_MEMBERSHIP:
2235          case IP_DROP_MEMBERSHIP:
2236              if (optlen < sizeof (struct target_ip_mreq) ||
2237                  optlen > sizeof (struct target_ip_mreqn))
2238                  return -TARGET_EINVAL;
2239  
2240              ip_mreq = (struct ip_mreqn *) alloca(optlen);
2241              target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2242              ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2243              break;
2244  
2245          case IP_BLOCK_SOURCE:
2246          case IP_UNBLOCK_SOURCE:
2247          case IP_ADD_SOURCE_MEMBERSHIP:
2248          case IP_DROP_SOURCE_MEMBERSHIP:
2249              if (optlen != sizeof (struct target_ip_mreq_source))
2250                  return -TARGET_EINVAL;
2251  
2252              ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2253              ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2254              unlock_user (ip_mreq_source, optval_addr, 0);
2255              break;
2256  
2257          default:
2258              goto unimplemented;
2259          }
2260          break;
2261      case SOL_IPV6:
2262          switch (optname) {
2263          case IPV6_MTU_DISCOVER:
2264          case IPV6_MTU:
2265          case IPV6_V6ONLY:
2266          case IPV6_RECVPKTINFO:
2267          case IPV6_UNICAST_HOPS:
2268          case IPV6_MULTICAST_HOPS:
2269          case IPV6_MULTICAST_LOOP:
2270          case IPV6_RECVERR:
2271          case IPV6_RECVHOPLIMIT:
2272          case IPV6_2292HOPLIMIT:
2273          case IPV6_CHECKSUM:
2274          case IPV6_ADDRFORM:
2275          case IPV6_2292PKTINFO:
2276          case IPV6_RECVTCLASS:
2277          case IPV6_RECVRTHDR:
2278          case IPV6_2292RTHDR:
2279          case IPV6_RECVHOPOPTS:
2280          case IPV6_2292HOPOPTS:
2281          case IPV6_RECVDSTOPTS:
2282          case IPV6_2292DSTOPTS:
2283          case IPV6_TCLASS:
2284          case IPV6_ADDR_PREFERENCES:
2285  #ifdef IPV6_RECVPATHMTU
2286          case IPV6_RECVPATHMTU:
2287  #endif
2288  #ifdef IPV6_TRANSPARENT
2289          case IPV6_TRANSPARENT:
2290  #endif
2291  #ifdef IPV6_FREEBIND
2292          case IPV6_FREEBIND:
2293  #endif
2294  #ifdef IPV6_RECVORIGDSTADDR
2295          case IPV6_RECVORIGDSTADDR:
2296  #endif
2297              val = 0;
2298              if (optlen < sizeof(uint32_t)) {
2299                  return -TARGET_EINVAL;
2300              }
2301              if (get_user_u32(val, optval_addr)) {
2302                  return -TARGET_EFAULT;
2303              }
2304              ret = get_errno(setsockopt(sockfd, level, optname,
2305                                         &val, sizeof(val)));
2306              break;
2307          case IPV6_PKTINFO:
2308          {
2309              struct in6_pktinfo pki;
2310  
2311              if (optlen < sizeof(pki)) {
2312                  return -TARGET_EINVAL;
2313              }
2314  
2315              if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2316                  return -TARGET_EFAULT;
2317              }
2318  
2319              pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2320  
2321              ret = get_errno(setsockopt(sockfd, level, optname,
2322                                         &pki, sizeof(pki)));
2323              break;
2324          }
2325          case IPV6_ADD_MEMBERSHIP:
2326          case IPV6_DROP_MEMBERSHIP:
2327          {
2328              struct ipv6_mreq ipv6mreq;
2329  
2330              if (optlen < sizeof(ipv6mreq)) {
2331                  return -TARGET_EINVAL;
2332              }
2333  
2334              if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2335                  return -TARGET_EFAULT;
2336              }
2337  
2338              ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2339  
2340              ret = get_errno(setsockopt(sockfd, level, optname,
2341                                         &ipv6mreq, sizeof(ipv6mreq)));
2342              break;
2343          }
2344          default:
2345              goto unimplemented;
2346          }
2347          break;
2348      case SOL_ICMPV6:
2349          switch (optname) {
2350          case ICMPV6_FILTER:
2351          {
2352              struct icmp6_filter icmp6f;
2353  
2354              if (optlen > sizeof(icmp6f)) {
2355                  optlen = sizeof(icmp6f);
2356              }
2357  
2358              if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2359                  return -TARGET_EFAULT;
2360              }
2361  
2362              for (val = 0; val < 8; val++) {
2363                  icmp6f.data[val] = tswap32(icmp6f.data[val]);
2364              }
2365  
2366              ret = get_errno(setsockopt(sockfd, level, optname,
2367                                         &icmp6f, optlen));
2368              break;
2369          }
2370          default:
2371              goto unimplemented;
2372          }
2373          break;
2374      case SOL_RAW:
2375          switch (optname) {
2376          case ICMP_FILTER:
2377          case IPV6_CHECKSUM:
2378              /* those take an u32 value */
2379              if (optlen < sizeof(uint32_t)) {
2380                  return -TARGET_EINVAL;
2381              }
2382  
2383              if (get_user_u32(val, optval_addr)) {
2384                  return -TARGET_EFAULT;
2385              }
2386              ret = get_errno(setsockopt(sockfd, level, optname,
2387                                         &val, sizeof(val)));
2388              break;
2389  
2390          default:
2391              goto unimplemented;
2392          }
2393          break;
2394  #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2395      case SOL_ALG:
2396          switch (optname) {
2397          case ALG_SET_KEY:
2398          {
2399              char *alg_key = g_malloc(optlen);
2400  
2401              if (!alg_key) {
2402                  return -TARGET_ENOMEM;
2403              }
2404              if (copy_from_user(alg_key, optval_addr, optlen)) {
2405                  g_free(alg_key);
2406                  return -TARGET_EFAULT;
2407              }
2408              ret = get_errno(setsockopt(sockfd, level, optname,
2409                                         alg_key, optlen));
2410              g_free(alg_key);
2411              break;
2412          }
2413          case ALG_SET_AEAD_AUTHSIZE:
2414          {
2415              ret = get_errno(setsockopt(sockfd, level, optname,
2416                                         NULL, optlen));
2417              break;
2418          }
2419          default:
2420              goto unimplemented;
2421          }
2422          break;
2423  #endif
2424      case TARGET_SOL_SOCKET:
2425          switch (optname) {
2426          case TARGET_SO_RCVTIMEO:
2427          {
2428                  struct timeval tv;
2429  
2430                  optname = SO_RCVTIMEO;
2431  
2432  set_timeout:
2433                  if (optlen != sizeof(struct target_timeval)) {
2434                      return -TARGET_EINVAL;
2435                  }
2436  
2437                  if (copy_from_user_timeval(&tv, optval_addr)) {
2438                      return -TARGET_EFAULT;
2439                  }
2440  
2441                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2442                                  &tv, sizeof(tv)));
2443                  return ret;
2444          }
2445          case TARGET_SO_SNDTIMEO:
2446                  optname = SO_SNDTIMEO;
2447                  goto set_timeout;
2448          case TARGET_SO_ATTACH_FILTER:
2449          {
2450                  struct target_sock_fprog *tfprog;
2451                  struct target_sock_filter *tfilter;
2452                  struct sock_fprog fprog;
2453                  struct sock_filter *filter;
2454                  int i;
2455  
2456                  if (optlen != sizeof(*tfprog)) {
2457                      return -TARGET_EINVAL;
2458                  }
2459                  if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2460                      return -TARGET_EFAULT;
2461                  }
2462                  if (!lock_user_struct(VERIFY_READ, tfilter,
2463                                        tswapal(tfprog->filter), 0)) {
2464                      unlock_user_struct(tfprog, optval_addr, 1);
2465                      return -TARGET_EFAULT;
2466                  }
2467  
2468                  fprog.len = tswap16(tfprog->len);
2469                  filter = g_try_new(struct sock_filter, fprog.len);
2470                  if (filter == NULL) {
2471                      unlock_user_struct(tfilter, tfprog->filter, 1);
2472                      unlock_user_struct(tfprog, optval_addr, 1);
2473                      return -TARGET_ENOMEM;
2474                  }
2475                  for (i = 0; i < fprog.len; i++) {
2476                      filter[i].code = tswap16(tfilter[i].code);
2477                      filter[i].jt = tfilter[i].jt;
2478                      filter[i].jf = tfilter[i].jf;
2479                      filter[i].k = tswap32(tfilter[i].k);
2480                  }
2481                  fprog.filter = filter;
2482  
2483                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2484                                  SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2485                  g_free(filter);
2486  
2487                  unlock_user_struct(tfilter, tfprog->filter, 1);
2488                  unlock_user_struct(tfprog, optval_addr, 1);
2489                  return ret;
2490          }
2491  	case TARGET_SO_BINDTODEVICE:
2492  	{
2493  		char *dev_ifname, *addr_ifname;
2494  
2495  		if (optlen > IFNAMSIZ - 1) {
2496  		    optlen = IFNAMSIZ - 1;
2497  		}
2498  		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2499  		if (!dev_ifname) {
2500  		    return -TARGET_EFAULT;
2501  		}
2502  		optname = SO_BINDTODEVICE;
2503  		addr_ifname = alloca(IFNAMSIZ);
2504  		memcpy(addr_ifname, dev_ifname, optlen);
2505  		addr_ifname[optlen] = 0;
2506  		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2507                                             addr_ifname, optlen));
2508  		unlock_user (dev_ifname, optval_addr, 0);
2509  		return ret;
2510  	}
2511          case TARGET_SO_LINGER:
2512          {
2513                  struct linger lg;
2514                  struct target_linger *tlg;
2515  
2516                  if (optlen != sizeof(struct target_linger)) {
2517                      return -TARGET_EINVAL;
2518                  }
2519                  if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2520                      return -TARGET_EFAULT;
2521                  }
2522                  __get_user(lg.l_onoff, &tlg->l_onoff);
2523                  __get_user(lg.l_linger, &tlg->l_linger);
2524                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2525                                  &lg, sizeof(lg)));
2526                  unlock_user_struct(tlg, optval_addr, 0);
2527                  return ret;
2528          }
2529              /* Options with 'int' argument.  */
2530          case TARGET_SO_DEBUG:
2531  		optname = SO_DEBUG;
2532  		break;
2533          case TARGET_SO_REUSEADDR:
2534  		optname = SO_REUSEADDR;
2535  		break;
2536  #ifdef SO_REUSEPORT
2537          case TARGET_SO_REUSEPORT:
2538                  optname = SO_REUSEPORT;
2539                  break;
2540  #endif
2541          case TARGET_SO_TYPE:
2542  		optname = SO_TYPE;
2543  		break;
2544          case TARGET_SO_ERROR:
2545  		optname = SO_ERROR;
2546  		break;
2547          case TARGET_SO_DONTROUTE:
2548  		optname = SO_DONTROUTE;
2549  		break;
2550          case TARGET_SO_BROADCAST:
2551  		optname = SO_BROADCAST;
2552  		break;
2553          case TARGET_SO_SNDBUF:
2554  		optname = SO_SNDBUF;
2555  		break;
2556          case TARGET_SO_SNDBUFFORCE:
2557                  optname = SO_SNDBUFFORCE;
2558                  break;
2559          case TARGET_SO_RCVBUF:
2560  		optname = SO_RCVBUF;
2561  		break;
2562          case TARGET_SO_RCVBUFFORCE:
2563                  optname = SO_RCVBUFFORCE;
2564                  break;
2565          case TARGET_SO_KEEPALIVE:
2566  		optname = SO_KEEPALIVE;
2567  		break;
2568          case TARGET_SO_OOBINLINE:
2569  		optname = SO_OOBINLINE;
2570  		break;
2571          case TARGET_SO_NO_CHECK:
2572  		optname = SO_NO_CHECK;
2573  		break;
2574          case TARGET_SO_PRIORITY:
2575  		optname = SO_PRIORITY;
2576  		break;
2577  #ifdef SO_BSDCOMPAT
2578          case TARGET_SO_BSDCOMPAT:
2579  		optname = SO_BSDCOMPAT;
2580  		break;
2581  #endif
2582          case TARGET_SO_PASSCRED:
2583  		optname = SO_PASSCRED;
2584  		break;
2585          case TARGET_SO_PASSSEC:
2586                  optname = SO_PASSSEC;
2587                  break;
2588          case TARGET_SO_TIMESTAMP:
2589  		optname = SO_TIMESTAMP;
2590  		break;
2591          case TARGET_SO_RCVLOWAT:
2592  		optname = SO_RCVLOWAT;
2593  		break;
2594          default:
2595              goto unimplemented;
2596          }
2597  	if (optlen < sizeof(uint32_t))
2598              return -TARGET_EINVAL;
2599  
2600  	if (get_user_u32(val, optval_addr))
2601              return -TARGET_EFAULT;
2602  	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2603          break;
2604  #ifdef SOL_NETLINK
2605      case SOL_NETLINK:
2606          switch (optname) {
2607          case NETLINK_PKTINFO:
2608          case NETLINK_ADD_MEMBERSHIP:
2609          case NETLINK_DROP_MEMBERSHIP:
2610          case NETLINK_BROADCAST_ERROR:
2611          case NETLINK_NO_ENOBUFS:
2612  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613          case NETLINK_LISTEN_ALL_NSID:
2614          case NETLINK_CAP_ACK:
2615  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617          case NETLINK_EXT_ACK:
2618  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620          case NETLINK_GET_STRICT_CHK:
2621  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2622              break;
2623          default:
2624              goto unimplemented;
2625          }
2626          val = 0;
2627          if (optlen < sizeof(uint32_t)) {
2628              return -TARGET_EINVAL;
2629          }
2630          if (get_user_u32(val, optval_addr)) {
2631              return -TARGET_EFAULT;
2632          }
2633          ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2634                                     sizeof(val)));
2635          break;
2636  #endif /* SOL_NETLINK */
2637      default:
2638      unimplemented:
2639          qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2640                        level, optname);
2641          ret = -TARGET_ENOPROTOOPT;
2642      }
2643      return ret;
2644  }
2645  
2646  /* do_getsockopt() Must return target values and target errnos. */
2647  static abi_long do_getsockopt(int sockfd, int level, int optname,
2648                                abi_ulong optval_addr, abi_ulong optlen)
2649  {
2650      abi_long ret;
2651      int len, val;
2652      socklen_t lv;
2653  
2654      switch(level) {
2655      case TARGET_SOL_SOCKET:
2656          level = SOL_SOCKET;
2657          switch (optname) {
2658          /* These don't just return a single integer */
2659          case TARGET_SO_PEERNAME:
2660              goto unimplemented;
2661          case TARGET_SO_RCVTIMEO: {
2662              struct timeval tv;
2663              socklen_t tvlen;
2664  
2665              optname = SO_RCVTIMEO;
2666  
2667  get_timeout:
2668              if (get_user_u32(len, optlen)) {
2669                  return -TARGET_EFAULT;
2670              }
2671              if (len < 0) {
2672                  return -TARGET_EINVAL;
2673              }
2674  
2675              tvlen = sizeof(tv);
2676              ret = get_errno(getsockopt(sockfd, level, optname,
2677                                         &tv, &tvlen));
2678              if (ret < 0) {
2679                  return ret;
2680              }
2681              if (len > sizeof(struct target_timeval)) {
2682                  len = sizeof(struct target_timeval);
2683              }
2684              if (copy_to_user_timeval(optval_addr, &tv)) {
2685                  return -TARGET_EFAULT;
2686              }
2687              if (put_user_u32(len, optlen)) {
2688                  return -TARGET_EFAULT;
2689              }
2690              break;
2691          }
2692          case TARGET_SO_SNDTIMEO:
2693              optname = SO_SNDTIMEO;
2694              goto get_timeout;
2695          case TARGET_SO_PEERCRED: {
2696              struct ucred cr;
2697              socklen_t crlen;
2698              struct target_ucred *tcr;
2699  
2700              if (get_user_u32(len, optlen)) {
2701                  return -TARGET_EFAULT;
2702              }
2703              if (len < 0) {
2704                  return -TARGET_EINVAL;
2705              }
2706  
2707              crlen = sizeof(cr);
2708              ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2709                                         &cr, &crlen));
2710              if (ret < 0) {
2711                  return ret;
2712              }
2713              if (len > crlen) {
2714                  len = crlen;
2715              }
2716              if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2717                  return -TARGET_EFAULT;
2718              }
2719              __put_user(cr.pid, &tcr->pid);
2720              __put_user(cr.uid, &tcr->uid);
2721              __put_user(cr.gid, &tcr->gid);
2722              unlock_user_struct(tcr, optval_addr, 1);
2723              if (put_user_u32(len, optlen)) {
2724                  return -TARGET_EFAULT;
2725              }
2726              break;
2727          }
2728          case TARGET_SO_PEERSEC: {
2729              char *name;
2730  
2731              if (get_user_u32(len, optlen)) {
2732                  return -TARGET_EFAULT;
2733              }
2734              if (len < 0) {
2735                  return -TARGET_EINVAL;
2736              }
2737              name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2738              if (!name) {
2739                  return -TARGET_EFAULT;
2740              }
2741              lv = len;
2742              ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2743                                         name, &lv));
2744              if (put_user_u32(lv, optlen)) {
2745                  ret = -TARGET_EFAULT;
2746              }
2747              unlock_user(name, optval_addr, lv);
2748              break;
2749          }
2750          case TARGET_SO_LINGER:
2751          {
2752              struct linger lg;
2753              socklen_t lglen;
2754              struct target_linger *tlg;
2755  
2756              if (get_user_u32(len, optlen)) {
2757                  return -TARGET_EFAULT;
2758              }
2759              if (len < 0) {
2760                  return -TARGET_EINVAL;
2761              }
2762  
2763              lglen = sizeof(lg);
2764              ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2765                                         &lg, &lglen));
2766              if (ret < 0) {
2767                  return ret;
2768              }
2769              if (len > lglen) {
2770                  len = lglen;
2771              }
2772              if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2773                  return -TARGET_EFAULT;
2774              }
2775              __put_user(lg.l_onoff, &tlg->l_onoff);
2776              __put_user(lg.l_linger, &tlg->l_linger);
2777              unlock_user_struct(tlg, optval_addr, 1);
2778              if (put_user_u32(len, optlen)) {
2779                  return -TARGET_EFAULT;
2780              }
2781              break;
2782          }
2783          /* Options with 'int' argument.  */
2784          case TARGET_SO_DEBUG:
2785              optname = SO_DEBUG;
2786              goto int_case;
2787          case TARGET_SO_REUSEADDR:
2788              optname = SO_REUSEADDR;
2789              goto int_case;
2790  #ifdef SO_REUSEPORT
2791          case TARGET_SO_REUSEPORT:
2792              optname = SO_REUSEPORT;
2793              goto int_case;
2794  #endif
2795          case TARGET_SO_TYPE:
2796              optname = SO_TYPE;
2797              goto int_case;
2798          case TARGET_SO_ERROR:
2799              optname = SO_ERROR;
2800              goto int_case;
2801          case TARGET_SO_DONTROUTE:
2802              optname = SO_DONTROUTE;
2803              goto int_case;
2804          case TARGET_SO_BROADCAST:
2805              optname = SO_BROADCAST;
2806              goto int_case;
2807          case TARGET_SO_SNDBUF:
2808              optname = SO_SNDBUF;
2809              goto int_case;
2810          case TARGET_SO_RCVBUF:
2811              optname = SO_RCVBUF;
2812              goto int_case;
2813          case TARGET_SO_KEEPALIVE:
2814              optname = SO_KEEPALIVE;
2815              goto int_case;
2816          case TARGET_SO_OOBINLINE:
2817              optname = SO_OOBINLINE;
2818              goto int_case;
2819          case TARGET_SO_NO_CHECK:
2820              optname = SO_NO_CHECK;
2821              goto int_case;
2822          case TARGET_SO_PRIORITY:
2823              optname = SO_PRIORITY;
2824              goto int_case;
2825  #ifdef SO_BSDCOMPAT
2826          case TARGET_SO_BSDCOMPAT:
2827              optname = SO_BSDCOMPAT;
2828              goto int_case;
2829  #endif
2830          case TARGET_SO_PASSCRED:
2831              optname = SO_PASSCRED;
2832              goto int_case;
2833          case TARGET_SO_TIMESTAMP:
2834              optname = SO_TIMESTAMP;
2835              goto int_case;
2836          case TARGET_SO_RCVLOWAT:
2837              optname = SO_RCVLOWAT;
2838              goto int_case;
2839          case TARGET_SO_ACCEPTCONN:
2840              optname = SO_ACCEPTCONN;
2841              goto int_case;
2842          case TARGET_SO_PROTOCOL:
2843              optname = SO_PROTOCOL;
2844              goto int_case;
2845          case TARGET_SO_DOMAIN:
2846              optname = SO_DOMAIN;
2847              goto int_case;
2848          default:
2849              goto int_case;
2850          }
2851          break;
2852      case SOL_TCP:
2853      case SOL_UDP:
2854          /* TCP and UDP options all take an 'int' value.  */
2855      int_case:
2856          if (get_user_u32(len, optlen))
2857              return -TARGET_EFAULT;
2858          if (len < 0)
2859              return -TARGET_EINVAL;
2860          lv = sizeof(lv);
2861          ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2862          if (ret < 0)
2863              return ret;
2864          if (optname == SO_TYPE) {
2865              val = host_to_target_sock_type(val);
2866          }
2867          if (len > lv)
2868              len = lv;
2869          if (len == 4) {
2870              if (put_user_u32(val, optval_addr))
2871                  return -TARGET_EFAULT;
2872          } else {
2873              if (put_user_u8(val, optval_addr))
2874                  return -TARGET_EFAULT;
2875          }
2876          if (put_user_u32(len, optlen))
2877              return -TARGET_EFAULT;
2878          break;
2879      case SOL_IP:
2880          switch(optname) {
2881          case IP_TOS:
2882          case IP_TTL:
2883          case IP_HDRINCL:
2884          case IP_ROUTER_ALERT:
2885          case IP_RECVOPTS:
2886          case IP_RETOPTS:
2887          case IP_PKTINFO:
2888          case IP_MTU_DISCOVER:
2889          case IP_RECVERR:
2890          case IP_RECVTOS:
2891  #ifdef IP_FREEBIND
2892          case IP_FREEBIND:
2893  #endif
2894          case IP_MULTICAST_TTL:
2895          case IP_MULTICAST_LOOP:
2896              if (get_user_u32(len, optlen))
2897                  return -TARGET_EFAULT;
2898              if (len < 0)
2899                  return -TARGET_EINVAL;
2900              lv = sizeof(lv);
2901              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2902              if (ret < 0)
2903                  return ret;
2904              if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2905                  len = 1;
2906                  if (put_user_u32(len, optlen)
2907                      || put_user_u8(val, optval_addr))
2908                      return -TARGET_EFAULT;
2909              } else {
2910                  if (len > sizeof(int))
2911                      len = sizeof(int);
2912                  if (put_user_u32(len, optlen)
2913                      || put_user_u32(val, optval_addr))
2914                      return -TARGET_EFAULT;
2915              }
2916              break;
2917          default:
2918              ret = -TARGET_ENOPROTOOPT;
2919              break;
2920          }
2921          break;
2922      case SOL_IPV6:
2923          switch (optname) {
2924          case IPV6_MTU_DISCOVER:
2925          case IPV6_MTU:
2926          case IPV6_V6ONLY:
2927          case IPV6_RECVPKTINFO:
2928          case IPV6_UNICAST_HOPS:
2929          case IPV6_MULTICAST_HOPS:
2930          case IPV6_MULTICAST_LOOP:
2931          case IPV6_RECVERR:
2932          case IPV6_RECVHOPLIMIT:
2933          case IPV6_2292HOPLIMIT:
2934          case IPV6_CHECKSUM:
2935          case IPV6_ADDRFORM:
2936          case IPV6_2292PKTINFO:
2937          case IPV6_RECVTCLASS:
2938          case IPV6_RECVRTHDR:
2939          case IPV6_2292RTHDR:
2940          case IPV6_RECVHOPOPTS:
2941          case IPV6_2292HOPOPTS:
2942          case IPV6_RECVDSTOPTS:
2943          case IPV6_2292DSTOPTS:
2944          case IPV6_TCLASS:
2945          case IPV6_ADDR_PREFERENCES:
2946  #ifdef IPV6_RECVPATHMTU
2947          case IPV6_RECVPATHMTU:
2948  #endif
2949  #ifdef IPV6_TRANSPARENT
2950          case IPV6_TRANSPARENT:
2951  #endif
2952  #ifdef IPV6_FREEBIND
2953          case IPV6_FREEBIND:
2954  #endif
2955  #ifdef IPV6_RECVORIGDSTADDR
2956          case IPV6_RECVORIGDSTADDR:
2957  #endif
2958              if (get_user_u32(len, optlen))
2959                  return -TARGET_EFAULT;
2960              if (len < 0)
2961                  return -TARGET_EINVAL;
2962              lv = sizeof(lv);
2963              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2964              if (ret < 0)
2965                  return ret;
2966              if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2967                  len = 1;
2968                  if (put_user_u32(len, optlen)
2969                      || put_user_u8(val, optval_addr))
2970                      return -TARGET_EFAULT;
2971              } else {
2972                  if (len > sizeof(int))
2973                      len = sizeof(int);
2974                  if (put_user_u32(len, optlen)
2975                      || put_user_u32(val, optval_addr))
2976                      return -TARGET_EFAULT;
2977              }
2978              break;
2979          default:
2980              ret = -TARGET_ENOPROTOOPT;
2981              break;
2982          }
2983          break;
2984  #ifdef SOL_NETLINK
2985      case SOL_NETLINK:
2986          switch (optname) {
2987          case NETLINK_PKTINFO:
2988          case NETLINK_BROADCAST_ERROR:
2989          case NETLINK_NO_ENOBUFS:
2990  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991          case NETLINK_LISTEN_ALL_NSID:
2992          case NETLINK_CAP_ACK:
2993  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995          case NETLINK_EXT_ACK:
2996  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998          case NETLINK_GET_STRICT_CHK:
2999  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000              if (get_user_u32(len, optlen)) {
3001                  return -TARGET_EFAULT;
3002              }
3003              if (len != sizeof(val)) {
3004                  return -TARGET_EINVAL;
3005              }
3006              lv = len;
3007              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3008              if (ret < 0) {
3009                  return ret;
3010              }
3011              if (put_user_u32(lv, optlen)
3012                  || put_user_u32(val, optval_addr)) {
3013                  return -TARGET_EFAULT;
3014              }
3015              break;
3016  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017          case NETLINK_LIST_MEMBERSHIPS:
3018          {
3019              uint32_t *results;
3020              int i;
3021              if (get_user_u32(len, optlen)) {
3022                  return -TARGET_EFAULT;
3023              }
3024              if (len < 0) {
3025                  return -TARGET_EINVAL;
3026              }
3027              results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3028              if (!results && len > 0) {
3029                  return -TARGET_EFAULT;
3030              }
3031              lv = len;
3032              ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3033              if (ret < 0) {
3034                  unlock_user(results, optval_addr, 0);
3035                  return ret;
3036              }
3037              /* swap host endianess to target endianess. */
3038              for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3039                  results[i] = tswap32(results[i]);
3040              }
3041              if (put_user_u32(lv, optlen)) {
3042                  return -TARGET_EFAULT;
3043              }
3044              unlock_user(results, optval_addr, 0);
3045              break;
3046          }
3047  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3048          default:
3049              goto unimplemented;
3050          }
3051          break;
3052  #endif /* SOL_NETLINK */
3053      default:
3054      unimplemented:
3055          qemu_log_mask(LOG_UNIMP,
3056                        "getsockopt level=%d optname=%d not yet supported\n",
3057                        level, optname);
3058          ret = -TARGET_EOPNOTSUPP;
3059          break;
3060      }
3061      return ret;
3062  }
3063  
3064  /* Convert target low/high pair representing file offset into the host
3065   * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066   * as the kernel doesn't handle them either.
3067   */
3068  static void target_to_host_low_high(abi_ulong tlow,
3069                                      abi_ulong thigh,
3070                                      unsigned long *hlow,
3071                                      unsigned long *hhigh)
3072  {
3073      uint64_t off = tlow |
3074          ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3075          TARGET_LONG_BITS / 2;
3076  
3077      *hlow = off;
3078      *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3079  }
3080  
3081  static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3082                                  abi_ulong count, int copy)
3083  {
3084      struct target_iovec *target_vec;
3085      struct iovec *vec;
3086      abi_ulong total_len, max_len;
3087      int i;
3088      int err = 0;
3089      bool bad_address = false;
3090  
3091      if (count == 0) {
3092          errno = 0;
3093          return NULL;
3094      }
3095      if (count > IOV_MAX) {
3096          errno = EINVAL;
3097          return NULL;
3098      }
3099  
3100      vec = g_try_new0(struct iovec, count);
3101      if (vec == NULL) {
3102          errno = ENOMEM;
3103          return NULL;
3104      }
3105  
3106      target_vec = lock_user(VERIFY_READ, target_addr,
3107                             count * sizeof(struct target_iovec), 1);
3108      if (target_vec == NULL) {
3109          err = EFAULT;
3110          goto fail2;
3111      }
3112  
3113      /* ??? If host page size > target page size, this will result in a
3114         value larger than what we can actually support.  */
3115      max_len = 0x7fffffff & TARGET_PAGE_MASK;
3116      total_len = 0;
3117  
3118      for (i = 0; i < count; i++) {
3119          abi_ulong base = tswapal(target_vec[i].iov_base);
3120          abi_long len = tswapal(target_vec[i].iov_len);
3121  
3122          if (len < 0) {
3123              err = EINVAL;
3124              goto fail;
3125          } else if (len == 0) {
3126              /* Zero length pointer is ignored.  */
3127              vec[i].iov_base = 0;
3128          } else {
3129              vec[i].iov_base = lock_user(type, base, len, copy);
3130              /* If the first buffer pointer is bad, this is a fault.  But
3131               * subsequent bad buffers will result in a partial write; this
3132               * is realized by filling the vector with null pointers and
3133               * zero lengths. */
3134              if (!vec[i].iov_base) {
3135                  if (i == 0) {
3136                      err = EFAULT;
3137                      goto fail;
3138                  } else {
3139                      bad_address = true;
3140                  }
3141              }
3142              if (bad_address) {
3143                  len = 0;
3144              }
3145              if (len > max_len - total_len) {
3146                  len = max_len - total_len;
3147              }
3148          }
3149          vec[i].iov_len = len;
3150          total_len += len;
3151      }
3152  
3153      unlock_user(target_vec, target_addr, 0);
3154      return vec;
3155  
3156   fail:
3157      while (--i >= 0) {
3158          if (tswapal(target_vec[i].iov_len) > 0) {
3159              unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3160          }
3161      }
3162      unlock_user(target_vec, target_addr, 0);
3163   fail2:
3164      g_free(vec);
3165      errno = err;
3166      return NULL;
3167  }
3168  
3169  static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3170                           abi_ulong count, int copy)
3171  {
3172      struct target_iovec *target_vec;
3173      int i;
3174  
3175      target_vec = lock_user(VERIFY_READ, target_addr,
3176                             count * sizeof(struct target_iovec), 1);
3177      if (target_vec) {
3178          for (i = 0; i < count; i++) {
3179              abi_ulong base = tswapal(target_vec[i].iov_base);
3180              abi_long len = tswapal(target_vec[i].iov_len);
3181              if (len < 0) {
3182                  break;
3183              }
3184              unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3185          }
3186          unlock_user(target_vec, target_addr, 0);
3187      }
3188  
3189      g_free(vec);
3190  }
3191  
3192  static inline int target_to_host_sock_type(int *type)
3193  {
3194      int host_type = 0;
3195      int target_type = *type;
3196  
3197      switch (target_type & TARGET_SOCK_TYPE_MASK) {
3198      case TARGET_SOCK_DGRAM:
3199          host_type = SOCK_DGRAM;
3200          break;
3201      case TARGET_SOCK_STREAM:
3202          host_type = SOCK_STREAM;
3203          break;
3204      default:
3205          host_type = target_type & TARGET_SOCK_TYPE_MASK;
3206          break;
3207      }
3208      if (target_type & TARGET_SOCK_CLOEXEC) {
3209  #if defined(SOCK_CLOEXEC)
3210          host_type |= SOCK_CLOEXEC;
3211  #else
3212          return -TARGET_EINVAL;
3213  #endif
3214      }
3215      if (target_type & TARGET_SOCK_NONBLOCK) {
3216  #if defined(SOCK_NONBLOCK)
3217          host_type |= SOCK_NONBLOCK;
3218  #elif !defined(O_NONBLOCK)
3219          return -TARGET_EINVAL;
3220  #endif
3221      }
3222      *type = host_type;
3223      return 0;
3224  }
3225  
3226  /* Try to emulate socket type flags after socket creation.  */
3227  static int sock_flags_fixup(int fd, int target_type)
3228  {
3229  #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230      if (target_type & TARGET_SOCK_NONBLOCK) {
3231          int flags = fcntl(fd, F_GETFL);
3232          if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3233              close(fd);
3234              return -TARGET_EINVAL;
3235          }
3236      }
3237  #endif
3238      return fd;
3239  }
3240  
3241  /* do_socket() Must return target values and target errnos. */
3242  static abi_long do_socket(int domain, int type, int protocol)
3243  {
3244      int target_type = type;
3245      int ret;
3246  
3247      ret = target_to_host_sock_type(&type);
3248      if (ret) {
3249          return ret;
3250      }
3251  
3252      if (domain == PF_NETLINK && !(
3253  #ifdef CONFIG_RTNETLINK
3254           protocol == NETLINK_ROUTE ||
3255  #endif
3256           protocol == NETLINK_KOBJECT_UEVENT ||
3257           protocol == NETLINK_AUDIT)) {
3258          return -TARGET_EPROTONOSUPPORT;
3259      }
3260  
3261      if (domain == AF_PACKET ||
3262          (domain == AF_INET && type == SOCK_PACKET)) {
3263          protocol = tswap16(protocol);
3264      }
3265  
3266      ret = get_errno(socket(domain, type, protocol));
3267      if (ret >= 0) {
3268          ret = sock_flags_fixup(ret, target_type);
3269          if (type == SOCK_PACKET) {
3270              /* Manage an obsolete case :
3271               * if socket type is SOCK_PACKET, bind by name
3272               */
3273              fd_trans_register(ret, &target_packet_trans);
3274          } else if (domain == PF_NETLINK) {
3275              switch (protocol) {
3276  #ifdef CONFIG_RTNETLINK
3277              case NETLINK_ROUTE:
3278                  fd_trans_register(ret, &target_netlink_route_trans);
3279                  break;
3280  #endif
3281              case NETLINK_KOBJECT_UEVENT:
3282                  /* nothing to do: messages are strings */
3283                  break;
3284              case NETLINK_AUDIT:
3285                  fd_trans_register(ret, &target_netlink_audit_trans);
3286                  break;
3287              default:
3288                  g_assert_not_reached();
3289              }
3290          }
3291      }
3292      return ret;
3293  }
3294  
3295  /* do_bind() Must return target values and target errnos. */
3296  static abi_long do_bind(int sockfd, abi_ulong target_addr,
3297                          socklen_t addrlen)
3298  {
3299      void *addr;
3300      abi_long ret;
3301  
3302      if ((int)addrlen < 0) {
3303          return -TARGET_EINVAL;
3304      }
3305  
3306      addr = alloca(addrlen+1);
3307  
3308      ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3309      if (ret)
3310          return ret;
3311  
3312      return get_errno(bind(sockfd, addr, addrlen));
3313  }
3314  
3315  /* do_connect() Must return target values and target errnos. */
3316  static abi_long do_connect(int sockfd, abi_ulong target_addr,
3317                             socklen_t addrlen)
3318  {
3319      void *addr;
3320      abi_long ret;
3321  
3322      if ((int)addrlen < 0) {
3323          return -TARGET_EINVAL;
3324      }
3325  
3326      addr = alloca(addrlen+1);
3327  
3328      ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3329      if (ret)
3330          return ret;
3331  
3332      return get_errno(safe_connect(sockfd, addr, addrlen));
3333  }
3334  
3335  /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336  static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3337                                        int flags, int send)
3338  {
3339      abi_long ret, len;
3340      struct msghdr msg;
3341      abi_ulong count;
3342      struct iovec *vec;
3343      abi_ulong target_vec;
3344  
3345      if (msgp->msg_name) {
3346          msg.msg_namelen = tswap32(msgp->msg_namelen);
3347          msg.msg_name = alloca(msg.msg_namelen+1);
3348          ret = target_to_host_sockaddr(fd, msg.msg_name,
3349                                        tswapal(msgp->msg_name),
3350                                        msg.msg_namelen);
3351          if (ret == -TARGET_EFAULT) {
3352              /* For connected sockets msg_name and msg_namelen must
3353               * be ignored, so returning EFAULT immediately is wrong.
3354               * Instead, pass a bad msg_name to the host kernel, and
3355               * let it decide whether to return EFAULT or not.
3356               */
3357              msg.msg_name = (void *)-1;
3358          } else if (ret) {
3359              goto out2;
3360          }
3361      } else {
3362          msg.msg_name = NULL;
3363          msg.msg_namelen = 0;
3364      }
3365      msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3366      msg.msg_control = alloca(msg.msg_controllen);
3367      memset(msg.msg_control, 0, msg.msg_controllen);
3368  
3369      msg.msg_flags = tswap32(msgp->msg_flags);
3370  
3371      count = tswapal(msgp->msg_iovlen);
3372      target_vec = tswapal(msgp->msg_iov);
3373  
3374      if (count > IOV_MAX) {
3375          /* sendrcvmsg returns a different errno for this condition than
3376           * readv/writev, so we must catch it here before lock_iovec() does.
3377           */
3378          ret = -TARGET_EMSGSIZE;
3379          goto out2;
3380      }
3381  
3382      vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3383                       target_vec, count, send);
3384      if (vec == NULL) {
3385          ret = -host_to_target_errno(errno);
3386          goto out2;
3387      }
3388      msg.msg_iovlen = count;
3389      msg.msg_iov = vec;
3390  
3391      if (send) {
3392          if (fd_trans_target_to_host_data(fd)) {
3393              void *host_msg;
3394  
3395              host_msg = g_malloc(msg.msg_iov->iov_len);
3396              memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3397              ret = fd_trans_target_to_host_data(fd)(host_msg,
3398                                                     msg.msg_iov->iov_len);
3399              if (ret >= 0) {
3400                  msg.msg_iov->iov_base = host_msg;
3401                  ret = get_errno(safe_sendmsg(fd, &msg, flags));
3402              }
3403              g_free(host_msg);
3404          } else {
3405              ret = target_to_host_cmsg(&msg, msgp);
3406              if (ret == 0) {
3407                  ret = get_errno(safe_sendmsg(fd, &msg, flags));
3408              }
3409          }
3410      } else {
3411          ret = get_errno(safe_recvmsg(fd, &msg, flags));
3412          if (!is_error(ret)) {
3413              len = ret;
3414              if (fd_trans_host_to_target_data(fd)) {
3415                  ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3416                                                 MIN(msg.msg_iov->iov_len, len));
3417              } else {
3418                  ret = host_to_target_cmsg(msgp, &msg);
3419              }
3420              if (!is_error(ret)) {
3421                  msgp->msg_namelen = tswap32(msg.msg_namelen);
3422                  msgp->msg_flags = tswap32(msg.msg_flags);
3423                  if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3424                      ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3425                                      msg.msg_name, msg.msg_namelen);
3426                      if (ret) {
3427                          goto out;
3428                      }
3429                  }
3430  
3431                  ret = len;
3432              }
3433          }
3434      }
3435  
3436  out:
3437      unlock_iovec(vec, target_vec, count, !send);
3438  out2:
3439      return ret;
3440  }
3441  
3442  static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3443                                 int flags, int send)
3444  {
3445      abi_long ret;
3446      struct target_msghdr *msgp;
3447  
3448      if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3449                            msgp,
3450                            target_msg,
3451                            send ? 1 : 0)) {
3452          return -TARGET_EFAULT;
3453      }
3454      ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3455      unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3456      return ret;
3457  }
3458  
3459  /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460   * so it might not have this *mmsg-specific flag either.
3461   */
3462  #ifndef MSG_WAITFORONE
3463  #define MSG_WAITFORONE 0x10000
3464  #endif
3465  
3466  static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3467                                  unsigned int vlen, unsigned int flags,
3468                                  int send)
3469  {
3470      struct target_mmsghdr *mmsgp;
3471      abi_long ret = 0;
3472      int i;
3473  
3474      if (vlen > UIO_MAXIOV) {
3475          vlen = UIO_MAXIOV;
3476      }
3477  
3478      mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3479      if (!mmsgp) {
3480          return -TARGET_EFAULT;
3481      }
3482  
3483      for (i = 0; i < vlen; i++) {
3484          ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3485          if (is_error(ret)) {
3486              break;
3487          }
3488          mmsgp[i].msg_len = tswap32(ret);
3489          /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490          if (flags & MSG_WAITFORONE) {
3491              flags |= MSG_DONTWAIT;
3492          }
3493      }
3494  
3495      unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3496  
3497      /* Return number of datagrams sent if we sent any at all;
3498       * otherwise return the error.
3499       */
3500      if (i) {
3501          return i;
3502      }
3503      return ret;
3504  }
3505  
3506  /* do_accept4() Must return target values and target errnos. */
3507  static abi_long do_accept4(int fd, abi_ulong target_addr,
3508                             abi_ulong target_addrlen_addr, int flags)
3509  {
3510      socklen_t addrlen, ret_addrlen;
3511      void *addr;
3512      abi_long ret;
3513      int host_flags;
3514  
3515      host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3516  
3517      if (target_addr == 0) {
3518          return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3519      }
3520  
3521      /* linux returns EFAULT if addrlen pointer is invalid */
3522      if (get_user_u32(addrlen, target_addrlen_addr))
3523          return -TARGET_EFAULT;
3524  
3525      if ((int)addrlen < 0) {
3526          return -TARGET_EINVAL;
3527      }
3528  
3529      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3530          return -TARGET_EFAULT;
3531      }
3532  
3533      addr = alloca(addrlen);
3534  
3535      ret_addrlen = addrlen;
3536      ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3537      if (!is_error(ret)) {
3538          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3539          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3540              ret = -TARGET_EFAULT;
3541          }
3542      }
3543      return ret;
3544  }
3545  
3546  /* do_getpeername() Must return target values and target errnos. */
3547  static abi_long do_getpeername(int fd, abi_ulong target_addr,
3548                                 abi_ulong target_addrlen_addr)
3549  {
3550      socklen_t addrlen, ret_addrlen;
3551      void *addr;
3552      abi_long ret;
3553  
3554      if (get_user_u32(addrlen, target_addrlen_addr))
3555          return -TARGET_EFAULT;
3556  
3557      if ((int)addrlen < 0) {
3558          return -TARGET_EINVAL;
3559      }
3560  
3561      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3562          return -TARGET_EFAULT;
3563      }
3564  
3565      addr = alloca(addrlen);
3566  
3567      ret_addrlen = addrlen;
3568      ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3569      if (!is_error(ret)) {
3570          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3571          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3572              ret = -TARGET_EFAULT;
3573          }
3574      }
3575      return ret;
3576  }
3577  
3578  /* do_getsockname() Must return target values and target errnos. */
3579  static abi_long do_getsockname(int fd, abi_ulong target_addr,
3580                                 abi_ulong target_addrlen_addr)
3581  {
3582      socklen_t addrlen, ret_addrlen;
3583      void *addr;
3584      abi_long ret;
3585  
3586      if (get_user_u32(addrlen, target_addrlen_addr))
3587          return -TARGET_EFAULT;
3588  
3589      if ((int)addrlen < 0) {
3590          return -TARGET_EINVAL;
3591      }
3592  
3593      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3594          return -TARGET_EFAULT;
3595      }
3596  
3597      addr = alloca(addrlen);
3598  
3599      ret_addrlen = addrlen;
3600      ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3601      if (!is_error(ret)) {
3602          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3603          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3604              ret = -TARGET_EFAULT;
3605          }
3606      }
3607      return ret;
3608  }
3609  
3610  /* do_socketpair() Must return target values and target errnos. */
3611  static abi_long do_socketpair(int domain, int type, int protocol,
3612                                abi_ulong target_tab_addr)
3613  {
3614      int tab[2];
3615      abi_long ret;
3616  
3617      target_to_host_sock_type(&type);
3618  
3619      ret = get_errno(socketpair(domain, type, protocol, tab));
3620      if (!is_error(ret)) {
3621          if (put_user_s32(tab[0], target_tab_addr)
3622              || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3623              ret = -TARGET_EFAULT;
3624      }
3625      return ret;
3626  }
3627  
3628  /* do_sendto() Must return target values and target errnos. */
3629  static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3630                            abi_ulong target_addr, socklen_t addrlen)
3631  {
3632      void *addr;
3633      void *host_msg;
3634      void *copy_msg = NULL;
3635      abi_long ret;
3636  
3637      if ((int)addrlen < 0) {
3638          return -TARGET_EINVAL;
3639      }
3640  
3641      host_msg = lock_user(VERIFY_READ, msg, len, 1);
3642      if (!host_msg)
3643          return -TARGET_EFAULT;
3644      if (fd_trans_target_to_host_data(fd)) {
3645          copy_msg = host_msg;
3646          host_msg = g_malloc(len);
3647          memcpy(host_msg, copy_msg, len);
3648          ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3649          if (ret < 0) {
3650              goto fail;
3651          }
3652      }
3653      if (target_addr) {
3654          addr = alloca(addrlen+1);
3655          ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3656          if (ret) {
3657              goto fail;
3658          }
3659          ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3660      } else {
3661          ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3662      }
3663  fail:
3664      if (copy_msg) {
3665          g_free(host_msg);
3666          host_msg = copy_msg;
3667      }
3668      unlock_user(host_msg, msg, 0);
3669      return ret;
3670  }
3671  
3672  /* do_recvfrom() Must return target values and target errnos. */
3673  static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3674                              abi_ulong target_addr,
3675                              abi_ulong target_addrlen)
3676  {
3677      socklen_t addrlen, ret_addrlen;
3678      void *addr;
3679      void *host_msg;
3680      abi_long ret;
3681  
3682      if (!msg) {
3683          host_msg = NULL;
3684      } else {
3685          host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3686          if (!host_msg) {
3687              return -TARGET_EFAULT;
3688          }
3689      }
3690      if (target_addr) {
3691          if (get_user_u32(addrlen, target_addrlen)) {
3692              ret = -TARGET_EFAULT;
3693              goto fail;
3694          }
3695          if ((int)addrlen < 0) {
3696              ret = -TARGET_EINVAL;
3697              goto fail;
3698          }
3699          addr = alloca(addrlen);
3700          ret_addrlen = addrlen;
3701          ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3702                                        addr, &ret_addrlen));
3703      } else {
3704          addr = NULL; /* To keep compiler quiet.  */
3705          addrlen = 0; /* To keep compiler quiet.  */
3706          ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3707      }
3708      if (!is_error(ret)) {
3709          if (fd_trans_host_to_target_data(fd)) {
3710              abi_long trans;
3711              trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3712              if (is_error(trans)) {
3713                  ret = trans;
3714                  goto fail;
3715              }
3716          }
3717          if (target_addr) {
3718              host_to_target_sockaddr(target_addr, addr,
3719                                      MIN(addrlen, ret_addrlen));
3720              if (put_user_u32(ret_addrlen, target_addrlen)) {
3721                  ret = -TARGET_EFAULT;
3722                  goto fail;
3723              }
3724          }
3725          unlock_user(host_msg, msg, len);
3726      } else {
3727  fail:
3728          unlock_user(host_msg, msg, 0);
3729      }
3730      return ret;
3731  }
3732  
3733  #ifdef TARGET_NR_socketcall
3734  /* do_socketcall() must return target values and target errnos. */
3735  static abi_long do_socketcall(int num, abi_ulong vptr)
3736  {
3737      static const unsigned nargs[] = { /* number of arguments per operation */
3738          [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3739          [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3740          [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3741          [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3742          [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3743          [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3744          [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3745          [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3746          [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3747          [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3748          [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3749          [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3750          [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3751          [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3752          [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3753          [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3754          [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3755          [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3756          [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3757          [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3758      };
3759      abi_long a[6]; /* max 6 args */
3760      unsigned i;
3761  
3762      /* check the range of the first argument num */
3763      /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3764      if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3765          return -TARGET_EINVAL;
3766      }
3767      /* ensure we have space for args */
3768      if (nargs[num] > ARRAY_SIZE(a)) {
3769          return -TARGET_EINVAL;
3770      }
3771      /* collect the arguments in a[] according to nargs[] */
3772      for (i = 0; i < nargs[num]; ++i) {
3773          if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3774              return -TARGET_EFAULT;
3775          }
3776      }
3777      /* now when we have the args, invoke the appropriate underlying function */
3778      switch (num) {
3779      case TARGET_SYS_SOCKET: /* domain, type, protocol */
3780          return do_socket(a[0], a[1], a[2]);
3781      case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3782          return do_bind(a[0], a[1], a[2]);
3783      case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3784          return do_connect(a[0], a[1], a[2]);
3785      case TARGET_SYS_LISTEN: /* sockfd, backlog */
3786          return get_errno(listen(a[0], a[1]));
3787      case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3788          return do_accept4(a[0], a[1], a[2], 0);
3789      case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3790          return do_getsockname(a[0], a[1], a[2]);
3791      case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3792          return do_getpeername(a[0], a[1], a[2]);
3793      case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3794          return do_socketpair(a[0], a[1], a[2], a[3]);
3795      case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3796          return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3797      case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3798          return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3799      case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3800          return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3801      case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3802          return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3803      case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3804          return get_errno(shutdown(a[0], a[1]));
3805      case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3806          return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3807      case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3808          return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3809      case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3810          return do_sendrecvmsg(a[0], a[1], a[2], 1);
3811      case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3812          return do_sendrecvmsg(a[0], a[1], a[2], 0);
3813      case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3814          return do_accept4(a[0], a[1], a[2], a[3]);
3815      case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3816          return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3817      case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3818          return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3819      default:
3820          qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3821          return -TARGET_EINVAL;
3822      }
3823  }
3824  #endif
3825  
3826  #define N_SHM_REGIONS	32
3827  
3828  static struct shm_region {
3829      abi_ulong start;
3830      abi_ulong size;
3831      bool in_use;
3832  } shm_regions[N_SHM_REGIONS];
3833  
3834  #ifndef TARGET_SEMID64_DS
3835  /* asm-generic version of this struct */
3836  struct target_semid64_ds
3837  {
3838    struct target_ipc_perm sem_perm;
3839    abi_ulong sem_otime;
3840  #if TARGET_ABI_BITS == 32
3841    abi_ulong __unused1;
3842  #endif
3843    abi_ulong sem_ctime;
3844  #if TARGET_ABI_BITS == 32
3845    abi_ulong __unused2;
3846  #endif
3847    abi_ulong sem_nsems;
3848    abi_ulong __unused3;
3849    abi_ulong __unused4;
3850  };
3851  #endif
3852  
3853  static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3854                                                 abi_ulong target_addr)
3855  {
3856      struct target_ipc_perm *target_ip;
3857      struct target_semid64_ds *target_sd;
3858  
3859      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3860          return -TARGET_EFAULT;
3861      target_ip = &(target_sd->sem_perm);
3862      host_ip->__key = tswap32(target_ip->__key);
3863      host_ip->uid = tswap32(target_ip->uid);
3864      host_ip->gid = tswap32(target_ip->gid);
3865      host_ip->cuid = tswap32(target_ip->cuid);
3866      host_ip->cgid = tswap32(target_ip->cgid);
3867  #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868      host_ip->mode = tswap32(target_ip->mode);
3869  #else
3870      host_ip->mode = tswap16(target_ip->mode);
3871  #endif
3872  #if defined(TARGET_PPC)
3873      host_ip->__seq = tswap32(target_ip->__seq);
3874  #else
3875      host_ip->__seq = tswap16(target_ip->__seq);
3876  #endif
3877      unlock_user_struct(target_sd, target_addr, 0);
3878      return 0;
3879  }
3880  
3881  static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3882                                                 struct ipc_perm *host_ip)
3883  {
3884      struct target_ipc_perm *target_ip;
3885      struct target_semid64_ds *target_sd;
3886  
3887      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3888          return -TARGET_EFAULT;
3889      target_ip = &(target_sd->sem_perm);
3890      target_ip->__key = tswap32(host_ip->__key);
3891      target_ip->uid = tswap32(host_ip->uid);
3892      target_ip->gid = tswap32(host_ip->gid);
3893      target_ip->cuid = tswap32(host_ip->cuid);
3894      target_ip->cgid = tswap32(host_ip->cgid);
3895  #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3896      target_ip->mode = tswap32(host_ip->mode);
3897  #else
3898      target_ip->mode = tswap16(host_ip->mode);
3899  #endif
3900  #if defined(TARGET_PPC)
3901      target_ip->__seq = tswap32(host_ip->__seq);
3902  #else
3903      target_ip->__seq = tswap16(host_ip->__seq);
3904  #endif
3905      unlock_user_struct(target_sd, target_addr, 1);
3906      return 0;
3907  }
3908  
3909  static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3910                                                 abi_ulong target_addr)
3911  {
3912      struct target_semid64_ds *target_sd;
3913  
3914      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3915          return -TARGET_EFAULT;
3916      if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3917          return -TARGET_EFAULT;
3918      host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3919      host_sd->sem_otime = tswapal(target_sd->sem_otime);
3920      host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3921      unlock_user_struct(target_sd, target_addr, 0);
3922      return 0;
3923  }
3924  
3925  static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3926                                                 struct semid_ds *host_sd)
3927  {
3928      struct target_semid64_ds *target_sd;
3929  
3930      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3931          return -TARGET_EFAULT;
3932      if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3933          return -TARGET_EFAULT;
3934      target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3935      target_sd->sem_otime = tswapal(host_sd->sem_otime);
3936      target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3937      unlock_user_struct(target_sd, target_addr, 1);
3938      return 0;
3939  }
3940  
3941  struct target_seminfo {
3942      int semmap;
3943      int semmni;
3944      int semmns;
3945      int semmnu;
3946      int semmsl;
3947      int semopm;
3948      int semume;
3949      int semusz;
3950      int semvmx;
3951      int semaem;
3952  };
3953  
3954  static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3955                                                struct seminfo *host_seminfo)
3956  {
3957      struct target_seminfo *target_seminfo;
3958      if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3959          return -TARGET_EFAULT;
3960      __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3961      __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3962      __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3963      __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3964      __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3965      __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3966      __put_user(host_seminfo->semume, &target_seminfo->semume);
3967      __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3968      __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3969      __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3970      unlock_user_struct(target_seminfo, target_addr, 1);
3971      return 0;
3972  }
3973  
3974  union semun {
3975  	int val;
3976  	struct semid_ds *buf;
3977  	unsigned short *array;
3978  	struct seminfo *__buf;
3979  };
3980  
3981  union target_semun {
3982  	int val;
3983  	abi_ulong buf;
3984  	abi_ulong array;
3985  	abi_ulong __buf;
3986  };
3987  
3988  static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3989                                                 abi_ulong target_addr)
3990  {
3991      int nsems;
3992      unsigned short *array;
3993      union semun semun;
3994      struct semid_ds semid_ds;
3995      int i, ret;
3996  
3997      semun.buf = &semid_ds;
3998  
3999      ret = semctl(semid, 0, IPC_STAT, semun);
4000      if (ret == -1)
4001          return get_errno(ret);
4002  
4003      nsems = semid_ds.sem_nsems;
4004  
4005      *host_array = g_try_new(unsigned short, nsems);
4006      if (!*host_array) {
4007          return -TARGET_ENOMEM;
4008      }
4009      array = lock_user(VERIFY_READ, target_addr,
4010                        nsems*sizeof(unsigned short), 1);
4011      if (!array) {
4012          g_free(*host_array);
4013          return -TARGET_EFAULT;
4014      }
4015  
4016      for(i=0; i<nsems; i++) {
4017          __get_user((*host_array)[i], &array[i]);
4018      }
4019      unlock_user(array, target_addr, 0);
4020  
4021      return 0;
4022  }
4023  
4024  static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4025                                                 unsigned short **host_array)
4026  {
4027      int nsems;
4028      unsigned short *array;
4029      union semun semun;
4030      struct semid_ds semid_ds;
4031      int i, ret;
4032  
4033      semun.buf = &semid_ds;
4034  
4035      ret = semctl(semid, 0, IPC_STAT, semun);
4036      if (ret == -1)
4037          return get_errno(ret);
4038  
4039      nsems = semid_ds.sem_nsems;
4040  
4041      array = lock_user(VERIFY_WRITE, target_addr,
4042                        nsems*sizeof(unsigned short), 0);
4043      if (!array)
4044          return -TARGET_EFAULT;
4045  
4046      for(i=0; i<nsems; i++) {
4047          __put_user((*host_array)[i], &array[i]);
4048      }
4049      g_free(*host_array);
4050      unlock_user(array, target_addr, 1);
4051  
4052      return 0;
4053  }
4054  
4055  static inline abi_long do_semctl(int semid, int semnum, int cmd,
4056                                   abi_ulong target_arg)
4057  {
4058      union target_semun target_su = { .buf = target_arg };
4059      union semun arg;
4060      struct semid_ds dsarg;
4061      unsigned short *array = NULL;
4062      struct seminfo seminfo;
4063      abi_long ret = -TARGET_EINVAL;
4064      abi_long err;
4065      cmd &= 0xff;
4066  
4067      switch( cmd ) {
4068  	case GETVAL:
4069  	case SETVAL:
4070              /* In 64 bit cross-endian situations, we will erroneously pick up
4071               * the wrong half of the union for the "val" element.  To rectify
4072               * this, the entire 8-byte structure is byteswapped, followed by
4073  	     * a swap of the 4 byte val field. In other cases, the data is
4074  	     * already in proper host byte order. */
4075  	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4076  		target_su.buf = tswapal(target_su.buf);
4077  		arg.val = tswap32(target_su.val);
4078  	    } else {
4079  		arg.val = target_su.val;
4080  	    }
4081              ret = get_errno(semctl(semid, semnum, cmd, arg));
4082              break;
4083  	case GETALL:
4084  	case SETALL:
4085              err = target_to_host_semarray(semid, &array, target_su.array);
4086              if (err)
4087                  return err;
4088              arg.array = array;
4089              ret = get_errno(semctl(semid, semnum, cmd, arg));
4090              err = host_to_target_semarray(semid, target_su.array, &array);
4091              if (err)
4092                  return err;
4093              break;
4094  	case IPC_STAT:
4095  	case IPC_SET:
4096  	case SEM_STAT:
4097              err = target_to_host_semid_ds(&dsarg, target_su.buf);
4098              if (err)
4099                  return err;
4100              arg.buf = &dsarg;
4101              ret = get_errno(semctl(semid, semnum, cmd, arg));
4102              err = host_to_target_semid_ds(target_su.buf, &dsarg);
4103              if (err)
4104                  return err;
4105              break;
4106  	case IPC_INFO:
4107  	case SEM_INFO:
4108              arg.__buf = &seminfo;
4109              ret = get_errno(semctl(semid, semnum, cmd, arg));
4110              err = host_to_target_seminfo(target_su.__buf, &seminfo);
4111              if (err)
4112                  return err;
4113              break;
4114  	case IPC_RMID:
4115  	case GETPID:
4116  	case GETNCNT:
4117  	case GETZCNT:
4118              ret = get_errno(semctl(semid, semnum, cmd, NULL));
4119              break;
4120      }
4121  
4122      return ret;
4123  }
4124  
4125  struct target_sembuf {
4126      unsigned short sem_num;
4127      short sem_op;
4128      short sem_flg;
4129  };
4130  
4131  static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4132                                               abi_ulong target_addr,
4133                                               unsigned nsops)
4134  {
4135      struct target_sembuf *target_sembuf;
4136      int i;
4137  
4138      target_sembuf = lock_user(VERIFY_READ, target_addr,
4139                                nsops*sizeof(struct target_sembuf), 1);
4140      if (!target_sembuf)
4141          return -TARGET_EFAULT;
4142  
4143      for(i=0; i<nsops; i++) {
4144          __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4145          __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4146          __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4147      }
4148  
4149      unlock_user(target_sembuf, target_addr, 0);
4150  
4151      return 0;
4152  }
4153  
4154  #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4155      defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4156  
4157  /*
4158   * This macro is required to handle the s390 variants, which passes the
4159   * arguments in a different order than default.
4160   */
4161  #ifdef __s390x__
4162  #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4163    (__nsops), (__timeout), (__sops)
4164  #else
4165  #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4166    (__nsops), 0, (__sops), (__timeout)
4167  #endif
4168  
4169  static inline abi_long do_semtimedop(int semid,
4170                                       abi_long ptr,
4171                                       unsigned nsops,
4172                                       abi_long timeout, bool time64)
4173  {
4174      struct sembuf *sops;
4175      struct timespec ts, *pts = NULL;
4176      abi_long ret;
4177  
4178      if (timeout) {
4179          pts = &ts;
4180          if (time64) {
4181              if (target_to_host_timespec64(pts, timeout)) {
4182                  return -TARGET_EFAULT;
4183              }
4184          } else {
4185              if (target_to_host_timespec(pts, timeout)) {
4186                  return -TARGET_EFAULT;
4187              }
4188          }
4189      }
4190  
4191      if (nsops > TARGET_SEMOPM) {
4192          return -TARGET_E2BIG;
4193      }
4194  
4195      sops = g_new(struct sembuf, nsops);
4196  
4197      if (target_to_host_sembuf(sops, ptr, nsops)) {
4198          g_free(sops);
4199          return -TARGET_EFAULT;
4200      }
4201  
4202      ret = -TARGET_ENOSYS;
4203  #ifdef __NR_semtimedop
4204      ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4205  #endif
4206  #ifdef __NR_ipc
4207      if (ret == -TARGET_ENOSYS) {
4208          ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4209                                   SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4210      }
4211  #endif
4212      g_free(sops);
4213      return ret;
4214  }
4215  #endif
4216  
4217  struct target_msqid_ds
4218  {
4219      struct target_ipc_perm msg_perm;
4220      abi_ulong msg_stime;
4221  #if TARGET_ABI_BITS == 32
4222      abi_ulong __unused1;
4223  #endif
4224      abi_ulong msg_rtime;
4225  #if TARGET_ABI_BITS == 32
4226      abi_ulong __unused2;
4227  #endif
4228      abi_ulong msg_ctime;
4229  #if TARGET_ABI_BITS == 32
4230      abi_ulong __unused3;
4231  #endif
4232      abi_ulong __msg_cbytes;
4233      abi_ulong msg_qnum;
4234      abi_ulong msg_qbytes;
4235      abi_ulong msg_lspid;
4236      abi_ulong msg_lrpid;
4237      abi_ulong __unused4;
4238      abi_ulong __unused5;
4239  };
4240  
4241  static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4242                                                 abi_ulong target_addr)
4243  {
4244      struct target_msqid_ds *target_md;
4245  
4246      if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4247          return -TARGET_EFAULT;
4248      if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4249          return -TARGET_EFAULT;
4250      host_md->msg_stime = tswapal(target_md->msg_stime);
4251      host_md->msg_rtime = tswapal(target_md->msg_rtime);
4252      host_md->msg_ctime = tswapal(target_md->msg_ctime);
4253      host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4254      host_md->msg_qnum = tswapal(target_md->msg_qnum);
4255      host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4256      host_md->msg_lspid = tswapal(target_md->msg_lspid);
4257      host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4258      unlock_user_struct(target_md, target_addr, 0);
4259      return 0;
4260  }
4261  
4262  static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4263                                                 struct msqid_ds *host_md)
4264  {
4265      struct target_msqid_ds *target_md;
4266  
4267      if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4268          return -TARGET_EFAULT;
4269      if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4270          return -TARGET_EFAULT;
4271      target_md->msg_stime = tswapal(host_md->msg_stime);
4272      target_md->msg_rtime = tswapal(host_md->msg_rtime);
4273      target_md->msg_ctime = tswapal(host_md->msg_ctime);
4274      target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4275      target_md->msg_qnum = tswapal(host_md->msg_qnum);
4276      target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4277      target_md->msg_lspid = tswapal(host_md->msg_lspid);
4278      target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4279      unlock_user_struct(target_md, target_addr, 1);
4280      return 0;
4281  }
4282  
4283  struct target_msginfo {
4284      int msgpool;
4285      int msgmap;
4286      int msgmax;
4287      int msgmnb;
4288      int msgmni;
4289      int msgssz;
4290      int msgtql;
4291      unsigned short int msgseg;
4292  };
4293  
4294  static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4295                                                struct msginfo *host_msginfo)
4296  {
4297      struct target_msginfo *target_msginfo;
4298      if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4299          return -TARGET_EFAULT;
4300      __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4301      __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4302      __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4303      __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4304      __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4305      __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4306      __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4307      __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4308      unlock_user_struct(target_msginfo, target_addr, 1);
4309      return 0;
4310  }
4311  
4312  static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4313  {
4314      struct msqid_ds dsarg;
4315      struct msginfo msginfo;
4316      abi_long ret = -TARGET_EINVAL;
4317  
4318      cmd &= 0xff;
4319  
4320      switch (cmd) {
4321      case IPC_STAT:
4322      case IPC_SET:
4323      case MSG_STAT:
4324          if (target_to_host_msqid_ds(&dsarg,ptr))
4325              return -TARGET_EFAULT;
4326          ret = get_errno(msgctl(msgid, cmd, &dsarg));
4327          if (host_to_target_msqid_ds(ptr,&dsarg))
4328              return -TARGET_EFAULT;
4329          break;
4330      case IPC_RMID:
4331          ret = get_errno(msgctl(msgid, cmd, NULL));
4332          break;
4333      case IPC_INFO:
4334      case MSG_INFO:
4335          ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4336          if (host_to_target_msginfo(ptr, &msginfo))
4337              return -TARGET_EFAULT;
4338          break;
4339      }
4340  
4341      return ret;
4342  }
4343  
4344  struct target_msgbuf {
4345      abi_long mtype;
4346      char	mtext[1];
4347  };
4348  
4349  static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4350                                   ssize_t msgsz, int msgflg)
4351  {
4352      struct target_msgbuf *target_mb;
4353      struct msgbuf *host_mb;
4354      abi_long ret = 0;
4355  
4356      if (msgsz < 0) {
4357          return -TARGET_EINVAL;
4358      }
4359  
4360      if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4361          return -TARGET_EFAULT;
4362      host_mb = g_try_malloc(msgsz + sizeof(long));
4363      if (!host_mb) {
4364          unlock_user_struct(target_mb, msgp, 0);
4365          return -TARGET_ENOMEM;
4366      }
4367      host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4368      memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4369      ret = -TARGET_ENOSYS;
4370  #ifdef __NR_msgsnd
4371      ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4372  #endif
4373  #ifdef __NR_ipc
4374      if (ret == -TARGET_ENOSYS) {
4375  #ifdef __s390x__
4376          ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4377                                   host_mb));
4378  #else
4379          ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4380                                   host_mb, 0));
4381  #endif
4382      }
4383  #endif
4384      g_free(host_mb);
4385      unlock_user_struct(target_mb, msgp, 0);
4386  
4387      return ret;
4388  }
4389  
4390  #ifdef __NR_ipc
4391  #if defined(__sparc__)
4392  /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4393  #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4394  #elif defined(__s390x__)
4395  /* The s390 sys_ipc variant has only five parameters.  */
4396  #define MSGRCV_ARGS(__msgp, __msgtyp) \
4397      ((long int[]){(long int)__msgp, __msgtyp})
4398  #else
4399  #define MSGRCV_ARGS(__msgp, __msgtyp) \
4400      ((long int[]){(long int)__msgp, __msgtyp}), 0
4401  #endif
4402  #endif
4403  
4404  static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4405                                   ssize_t msgsz, abi_long msgtyp,
4406                                   int msgflg)
4407  {
4408      struct target_msgbuf *target_mb;
4409      char *target_mtext;
4410      struct msgbuf *host_mb;
4411      abi_long ret = 0;
4412  
4413      if (msgsz < 0) {
4414          return -TARGET_EINVAL;
4415      }
4416  
4417      if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4418          return -TARGET_EFAULT;
4419  
4420      host_mb = g_try_malloc(msgsz + sizeof(long));
4421      if (!host_mb) {
4422          ret = -TARGET_ENOMEM;
4423          goto end;
4424      }
4425      ret = -TARGET_ENOSYS;
4426  #ifdef __NR_msgrcv
4427      ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4428  #endif
4429  #ifdef __NR_ipc
4430      if (ret == -TARGET_ENOSYS) {
4431          ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4432                          msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4433      }
4434  #endif
4435  
4436      if (ret > 0) {
4437          abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4438          target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4439          if (!target_mtext) {
4440              ret = -TARGET_EFAULT;
4441              goto end;
4442          }
4443          memcpy(target_mb->mtext, host_mb->mtext, ret);
4444          unlock_user(target_mtext, target_mtext_addr, ret);
4445      }
4446  
4447      target_mb->mtype = tswapal(host_mb->mtype);
4448  
4449  end:
4450      if (target_mb)
4451          unlock_user_struct(target_mb, msgp, 1);
4452      g_free(host_mb);
4453      return ret;
4454  }
4455  
4456  static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4457                                                 abi_ulong target_addr)
4458  {
4459      struct target_shmid_ds *target_sd;
4460  
4461      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4462          return -TARGET_EFAULT;
4463      if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4464          return -TARGET_EFAULT;
4465      __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4466      __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4467      __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4468      __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4469      __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4470      __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4471      __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4472      unlock_user_struct(target_sd, target_addr, 0);
4473      return 0;
4474  }
4475  
4476  static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4477                                                 struct shmid_ds *host_sd)
4478  {
4479      struct target_shmid_ds *target_sd;
4480  
4481      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4482          return -TARGET_EFAULT;
4483      if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4484          return -TARGET_EFAULT;
4485      __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4486      __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4487      __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4488      __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4489      __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4490      __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4491      __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4492      unlock_user_struct(target_sd, target_addr, 1);
4493      return 0;
4494  }
4495  
4496  struct  target_shminfo {
4497      abi_ulong shmmax;
4498      abi_ulong shmmin;
4499      abi_ulong shmmni;
4500      abi_ulong shmseg;
4501      abi_ulong shmall;
4502  };
4503  
4504  static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4505                                                struct shminfo *host_shminfo)
4506  {
4507      struct target_shminfo *target_shminfo;
4508      if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4509          return -TARGET_EFAULT;
4510      __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4511      __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4512      __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4513      __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4514      __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4515      unlock_user_struct(target_shminfo, target_addr, 1);
4516      return 0;
4517  }
4518  
4519  struct target_shm_info {
4520      int used_ids;
4521      abi_ulong shm_tot;
4522      abi_ulong shm_rss;
4523      abi_ulong shm_swp;
4524      abi_ulong swap_attempts;
4525      abi_ulong swap_successes;
4526  };
4527  
4528  static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4529                                                 struct shm_info *host_shm_info)
4530  {
4531      struct target_shm_info *target_shm_info;
4532      if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4533          return -TARGET_EFAULT;
4534      __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4535      __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4536      __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4537      __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4538      __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4539      __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4540      unlock_user_struct(target_shm_info, target_addr, 1);
4541      return 0;
4542  }
4543  
4544  static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4545  {
4546      struct shmid_ds dsarg;
4547      struct shminfo shminfo;
4548      struct shm_info shm_info;
4549      abi_long ret = -TARGET_EINVAL;
4550  
4551      cmd &= 0xff;
4552  
4553      switch(cmd) {
4554      case IPC_STAT:
4555      case IPC_SET:
4556      case SHM_STAT:
4557          if (target_to_host_shmid_ds(&dsarg, buf))
4558              return -TARGET_EFAULT;
4559          ret = get_errno(shmctl(shmid, cmd, &dsarg));
4560          if (host_to_target_shmid_ds(buf, &dsarg))
4561              return -TARGET_EFAULT;
4562          break;
4563      case IPC_INFO:
4564          ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4565          if (host_to_target_shminfo(buf, &shminfo))
4566              return -TARGET_EFAULT;
4567          break;
4568      case SHM_INFO:
4569          ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4570          if (host_to_target_shm_info(buf, &shm_info))
4571              return -TARGET_EFAULT;
4572          break;
4573      case IPC_RMID:
4574      case SHM_LOCK:
4575      case SHM_UNLOCK:
4576          ret = get_errno(shmctl(shmid, cmd, NULL));
4577          break;
4578      }
4579  
4580      return ret;
4581  }
4582  
4583  #ifndef TARGET_FORCE_SHMLBA
4584  /* For most architectures, SHMLBA is the same as the page size;
4585   * some architectures have larger values, in which case they should
4586   * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4587   * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4588   * and defining its own value for SHMLBA.
4589   *
4590   * The kernel also permits SHMLBA to be set by the architecture to a
4591   * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4592   * this means that addresses are rounded to the large size if
4593   * SHM_RND is set but addresses not aligned to that size are not rejected
4594   * as long as they are at least page-aligned. Since the only architecture
4595   * which uses this is ia64 this code doesn't provide for that oddity.
4596   */
4597  static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4598  {
4599      return TARGET_PAGE_SIZE;
4600  }
4601  #endif
4602  
4603  static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4604                                   int shmid, abi_ulong shmaddr, int shmflg)
4605  {
4606      abi_long raddr;
4607      void *host_raddr;
4608      struct shmid_ds shm_info;
4609      int i,ret;
4610      abi_ulong shmlba;
4611  
4612      /* shmat pointers are always untagged */
4613  
4614      /* find out the length of the shared memory segment */
4615      ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4616      if (is_error(ret)) {
4617          /* can't get length, bail out */
4618          return ret;
4619      }
4620  
4621      shmlba = target_shmlba(cpu_env);
4622  
4623      if (shmaddr & (shmlba - 1)) {
4624          if (shmflg & SHM_RND) {
4625              shmaddr &= ~(shmlba - 1);
4626          } else {
4627              return -TARGET_EINVAL;
4628          }
4629      }
4630      if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4631          return -TARGET_EINVAL;
4632      }
4633  
4634      mmap_lock();
4635  
4636      if (shmaddr)
4637          host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4638      else {
4639          abi_ulong mmap_start;
4640  
4641          /* In order to use the host shmat, we need to honor host SHMLBA.  */
4642          mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4643  
4644          if (mmap_start == -1) {
4645              errno = ENOMEM;
4646              host_raddr = (void *)-1;
4647          } else
4648              host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4649                                 shmflg | SHM_REMAP);
4650      }
4651  
4652      if (host_raddr == (void *)-1) {
4653          mmap_unlock();
4654          return get_errno((long)host_raddr);
4655      }
4656      raddr=h2g((unsigned long)host_raddr);
4657  
4658      page_set_flags(raddr, raddr + shm_info.shm_segsz,
4659                     PAGE_VALID | PAGE_RESET | PAGE_READ |
4660                     (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4661  
4662      for (i = 0; i < N_SHM_REGIONS; i++) {
4663          if (!shm_regions[i].in_use) {
4664              shm_regions[i].in_use = true;
4665              shm_regions[i].start = raddr;
4666              shm_regions[i].size = shm_info.shm_segsz;
4667              break;
4668          }
4669      }
4670  
4671      mmap_unlock();
4672      return raddr;
4673  
4674  }
4675  
4676  static inline abi_long do_shmdt(abi_ulong shmaddr)
4677  {
4678      int i;
4679      abi_long rv;
4680  
4681      /* shmdt pointers are always untagged */
4682  
4683      mmap_lock();
4684  
4685      for (i = 0; i < N_SHM_REGIONS; ++i) {
4686          if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4687              shm_regions[i].in_use = false;
4688              page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4689              break;
4690          }
4691      }
4692      rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4693  
4694      mmap_unlock();
4695  
4696      return rv;
4697  }
4698  
4699  #ifdef TARGET_NR_ipc
4700  /* ??? This only works with linear mappings.  */
4701  /* do_ipc() must return target values and target errnos. */
4702  static abi_long do_ipc(CPUArchState *cpu_env,
4703                         unsigned int call, abi_long first,
4704                         abi_long second, abi_long third,
4705                         abi_long ptr, abi_long fifth)
4706  {
4707      int version;
4708      abi_long ret = 0;
4709  
4710      version = call >> 16;
4711      call &= 0xffff;
4712  
4713      switch (call) {
4714      case IPCOP_semop:
4715          ret = do_semtimedop(first, ptr, second, 0, false);
4716          break;
4717      case IPCOP_semtimedop:
4718      /*
4719       * The s390 sys_ipc variant has only five parameters instead of six
4720       * (as for default variant) and the only difference is the handling of
4721       * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4722       * to a struct timespec where the generic variant uses fifth parameter.
4723       */
4724  #if defined(TARGET_S390X)
4725          ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4726  #else
4727          ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4728  #endif
4729          break;
4730  
4731      case IPCOP_semget:
4732          ret = get_errno(semget(first, second, third));
4733          break;
4734  
4735      case IPCOP_semctl: {
4736          /* The semun argument to semctl is passed by value, so dereference the
4737           * ptr argument. */
4738          abi_ulong atptr;
4739          get_user_ual(atptr, ptr);
4740          ret = do_semctl(first, second, third, atptr);
4741          break;
4742      }
4743  
4744      case IPCOP_msgget:
4745          ret = get_errno(msgget(first, second));
4746          break;
4747  
4748      case IPCOP_msgsnd:
4749          ret = do_msgsnd(first, ptr, second, third);
4750          break;
4751  
4752      case IPCOP_msgctl:
4753          ret = do_msgctl(first, second, ptr);
4754          break;
4755  
4756      case IPCOP_msgrcv:
4757          switch (version) {
4758          case 0:
4759              {
4760                  struct target_ipc_kludge {
4761                      abi_long msgp;
4762                      abi_long msgtyp;
4763                  } *tmp;
4764  
4765                  if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4766                      ret = -TARGET_EFAULT;
4767                      break;
4768                  }
4769  
4770                  ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4771  
4772                  unlock_user_struct(tmp, ptr, 0);
4773                  break;
4774              }
4775          default:
4776              ret = do_msgrcv(first, ptr, second, fifth, third);
4777          }
4778          break;
4779  
4780      case IPCOP_shmat:
4781          switch (version) {
4782          default:
4783          {
4784              abi_ulong raddr;
4785              raddr = do_shmat(cpu_env, first, ptr, second);
4786              if (is_error(raddr))
4787                  return get_errno(raddr);
4788              if (put_user_ual(raddr, third))
4789                  return -TARGET_EFAULT;
4790              break;
4791          }
4792          case 1:
4793              ret = -TARGET_EINVAL;
4794              break;
4795          }
4796  	break;
4797      case IPCOP_shmdt:
4798          ret = do_shmdt(ptr);
4799  	break;
4800  
4801      case IPCOP_shmget:
4802  	/* IPC_* flag values are the same on all linux platforms */
4803  	ret = get_errno(shmget(first, second, third));
4804  	break;
4805  
4806  	/* IPC_* and SHM_* command values are the same on all linux platforms */
4807      case IPCOP_shmctl:
4808          ret = do_shmctl(first, second, ptr);
4809          break;
4810      default:
4811          qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4812                        call, version);
4813  	ret = -TARGET_ENOSYS;
4814  	break;
4815      }
4816      return ret;
4817  }
4818  #endif
4819  
4820  /* kernel structure types definitions */
4821  
4822  #define STRUCT(name, ...) STRUCT_ ## name,
4823  #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4824  enum {
4825  #include "syscall_types.h"
4826  STRUCT_MAX
4827  };
4828  #undef STRUCT
4829  #undef STRUCT_SPECIAL
4830  
4831  #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4832  #define STRUCT_SPECIAL(name)
4833  #include "syscall_types.h"
4834  #undef STRUCT
4835  #undef STRUCT_SPECIAL
4836  
4837  #define MAX_STRUCT_SIZE 4096
4838  
4839  #ifdef CONFIG_FIEMAP
4840  /* So fiemap access checks don't overflow on 32 bit systems.
4841   * This is very slightly smaller than the limit imposed by
4842   * the underlying kernel.
4843   */
4844  #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4845                              / sizeof(struct fiemap_extent))
4846  
4847  static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4848                                         int fd, int cmd, abi_long arg)
4849  {
4850      /* The parameter for this ioctl is a struct fiemap followed
4851       * by an array of struct fiemap_extent whose size is set
4852       * in fiemap->fm_extent_count. The array is filled in by the
4853       * ioctl.
4854       */
4855      int target_size_in, target_size_out;
4856      struct fiemap *fm;
4857      const argtype *arg_type = ie->arg_type;
4858      const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4859      void *argptr, *p;
4860      abi_long ret;
4861      int i, extent_size = thunk_type_size(extent_arg_type, 0);
4862      uint32_t outbufsz;
4863      int free_fm = 0;
4864  
4865      assert(arg_type[0] == TYPE_PTR);
4866      assert(ie->access == IOC_RW);
4867      arg_type++;
4868      target_size_in = thunk_type_size(arg_type, 0);
4869      argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4870      if (!argptr) {
4871          return -TARGET_EFAULT;
4872      }
4873      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4874      unlock_user(argptr, arg, 0);
4875      fm = (struct fiemap *)buf_temp;
4876      if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4877          return -TARGET_EINVAL;
4878      }
4879  
4880      outbufsz = sizeof (*fm) +
4881          (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4882  
4883      if (outbufsz > MAX_STRUCT_SIZE) {
4884          /* We can't fit all the extents into the fixed size buffer.
4885           * Allocate one that is large enough and use it instead.
4886           */
4887          fm = g_try_malloc(outbufsz);
4888          if (!fm) {
4889              return -TARGET_ENOMEM;
4890          }
4891          memcpy(fm, buf_temp, sizeof(struct fiemap));
4892          free_fm = 1;
4893      }
4894      ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4895      if (!is_error(ret)) {
4896          target_size_out = target_size_in;
4897          /* An extent_count of 0 means we were only counting the extents
4898           * so there are no structs to copy
4899           */
4900          if (fm->fm_extent_count != 0) {
4901              target_size_out += fm->fm_mapped_extents * extent_size;
4902          }
4903          argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4904          if (!argptr) {
4905              ret = -TARGET_EFAULT;
4906          } else {
4907              /* Convert the struct fiemap */
4908              thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4909              if (fm->fm_extent_count != 0) {
4910                  p = argptr + target_size_in;
4911                  /* ...and then all the struct fiemap_extents */
4912                  for (i = 0; i < fm->fm_mapped_extents; i++) {
4913                      thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4914                                    THUNK_TARGET);
4915                      p += extent_size;
4916                  }
4917              }
4918              unlock_user(argptr, arg, target_size_out);
4919          }
4920      }
4921      if (free_fm) {
4922          g_free(fm);
4923      }
4924      return ret;
4925  }
4926  #endif
4927  
4928  static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4929                                  int fd, int cmd, abi_long arg)
4930  {
4931      const argtype *arg_type = ie->arg_type;
4932      int target_size;
4933      void *argptr;
4934      int ret;
4935      struct ifconf *host_ifconf;
4936      uint32_t outbufsz;
4937      const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4938      const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4939      int target_ifreq_size;
4940      int nb_ifreq;
4941      int free_buf = 0;
4942      int i;
4943      int target_ifc_len;
4944      abi_long target_ifc_buf;
4945      int host_ifc_len;
4946      char *host_ifc_buf;
4947  
4948      assert(arg_type[0] == TYPE_PTR);
4949      assert(ie->access == IOC_RW);
4950  
4951      arg_type++;
4952      target_size = thunk_type_size(arg_type, 0);
4953  
4954      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4955      if (!argptr)
4956          return -TARGET_EFAULT;
4957      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4958      unlock_user(argptr, arg, 0);
4959  
4960      host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4961      target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4962      target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4963  
4964      if (target_ifc_buf != 0) {
4965          target_ifc_len = host_ifconf->ifc_len;
4966          nb_ifreq = target_ifc_len / target_ifreq_size;
4967          host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4968  
4969          outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4970          if (outbufsz > MAX_STRUCT_SIZE) {
4971              /*
4972               * We can't fit all the extents into the fixed size buffer.
4973               * Allocate one that is large enough and use it instead.
4974               */
4975              host_ifconf = malloc(outbufsz);
4976              if (!host_ifconf) {
4977                  return -TARGET_ENOMEM;
4978              }
4979              memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4980              free_buf = 1;
4981          }
4982          host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4983  
4984          host_ifconf->ifc_len = host_ifc_len;
4985      } else {
4986        host_ifc_buf = NULL;
4987      }
4988      host_ifconf->ifc_buf = host_ifc_buf;
4989  
4990      ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4991      if (!is_error(ret)) {
4992  	/* convert host ifc_len to target ifc_len */
4993  
4994          nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4995          target_ifc_len = nb_ifreq * target_ifreq_size;
4996          host_ifconf->ifc_len = target_ifc_len;
4997  
4998  	/* restore target ifc_buf */
4999  
5000          host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5001  
5002  	/* copy struct ifconf to target user */
5003  
5004          argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5005          if (!argptr)
5006              return -TARGET_EFAULT;
5007          thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5008          unlock_user(argptr, arg, target_size);
5009  
5010          if (target_ifc_buf != 0) {
5011              /* copy ifreq[] to target user */
5012              argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5013              for (i = 0; i < nb_ifreq ; i++) {
5014                  thunk_convert(argptr + i * target_ifreq_size,
5015                                host_ifc_buf + i * sizeof(struct ifreq),
5016                                ifreq_arg_type, THUNK_TARGET);
5017              }
5018              unlock_user(argptr, target_ifc_buf, target_ifc_len);
5019          }
5020      }
5021  
5022      if (free_buf) {
5023          free(host_ifconf);
5024      }
5025  
5026      return ret;
5027  }
5028  
5029  #if defined(CONFIG_USBFS)
5030  #if HOST_LONG_BITS > 64
5031  #error USBDEVFS thunks do not support >64 bit hosts yet.
5032  #endif
5033  struct live_urb {
5034      uint64_t target_urb_adr;
5035      uint64_t target_buf_adr;
5036      char *target_buf_ptr;
5037      struct usbdevfs_urb host_urb;
5038  };
5039  
5040  static GHashTable *usbdevfs_urb_hashtable(void)
5041  {
5042      static GHashTable *urb_hashtable;
5043  
5044      if (!urb_hashtable) {
5045          urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5046      }
5047      return urb_hashtable;
5048  }
5049  
5050  static void urb_hashtable_insert(struct live_urb *urb)
5051  {
5052      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5053      g_hash_table_insert(urb_hashtable, urb, urb);
5054  }
5055  
5056  static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5057  {
5058      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5059      return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5060  }
5061  
5062  static void urb_hashtable_remove(struct live_urb *urb)
5063  {
5064      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5065      g_hash_table_remove(urb_hashtable, urb);
5066  }
5067  
5068  static abi_long
5069  do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5070                            int fd, int cmd, abi_long arg)
5071  {
5072      const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5073      const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5074      struct live_urb *lurb;
5075      void *argptr;
5076      uint64_t hurb;
5077      int target_size;
5078      uintptr_t target_urb_adr;
5079      abi_long ret;
5080  
5081      target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5082  
5083      memset(buf_temp, 0, sizeof(uint64_t));
5084      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5085      if (is_error(ret)) {
5086          return ret;
5087      }
5088  
5089      memcpy(&hurb, buf_temp, sizeof(uint64_t));
5090      lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5091      if (!lurb->target_urb_adr) {
5092          return -TARGET_EFAULT;
5093      }
5094      urb_hashtable_remove(lurb);
5095      unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5096          lurb->host_urb.buffer_length);
5097      lurb->target_buf_ptr = NULL;
5098  
5099      /* restore the guest buffer pointer */
5100      lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5101  
5102      /* update the guest urb struct */
5103      argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5104      if (!argptr) {
5105          g_free(lurb);
5106          return -TARGET_EFAULT;
5107      }
5108      thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5109      unlock_user(argptr, lurb->target_urb_adr, target_size);
5110  
5111      target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5112      /* write back the urb handle */
5113      argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5114      if (!argptr) {
5115          g_free(lurb);
5116          return -TARGET_EFAULT;
5117      }
5118  
5119      /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5120      target_urb_adr = lurb->target_urb_adr;
5121      thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5122      unlock_user(argptr, arg, target_size);
5123  
5124      g_free(lurb);
5125      return ret;
5126  }
5127  
5128  static abi_long
5129  do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5130                               uint8_t *buf_temp __attribute__((unused)),
5131                               int fd, int cmd, abi_long arg)
5132  {
5133      struct live_urb *lurb;
5134  
5135      /* map target address back to host URB with metadata. */
5136      lurb = urb_hashtable_lookup(arg);
5137      if (!lurb) {
5138          return -TARGET_EFAULT;
5139      }
5140      return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5141  }
5142  
5143  static abi_long
5144  do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5145                              int fd, int cmd, abi_long arg)
5146  {
5147      const argtype *arg_type = ie->arg_type;
5148      int target_size;
5149      abi_long ret;
5150      void *argptr;
5151      int rw_dir;
5152      struct live_urb *lurb;
5153  
5154      /*
5155       * each submitted URB needs to map to a unique ID for the
5156       * kernel, and that unique ID needs to be a pointer to
5157       * host memory.  hence, we need to malloc for each URB.
5158       * isochronous transfers have a variable length struct.
5159       */
5160      arg_type++;
5161      target_size = thunk_type_size(arg_type, THUNK_TARGET);
5162  
5163      /* construct host copy of urb and metadata */
5164      lurb = g_try_malloc0(sizeof(struct live_urb));
5165      if (!lurb) {
5166          return -TARGET_ENOMEM;
5167      }
5168  
5169      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5170      if (!argptr) {
5171          g_free(lurb);
5172          return -TARGET_EFAULT;
5173      }
5174      thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5175      unlock_user(argptr, arg, 0);
5176  
5177      lurb->target_urb_adr = arg;
5178      lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5179  
5180      /* buffer space used depends on endpoint type so lock the entire buffer */
5181      /* control type urbs should check the buffer contents for true direction */
5182      rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5183      lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5184          lurb->host_urb.buffer_length, 1);
5185      if (lurb->target_buf_ptr == NULL) {
5186          g_free(lurb);
5187          return -TARGET_EFAULT;
5188      }
5189  
5190      /* update buffer pointer in host copy */
5191      lurb->host_urb.buffer = lurb->target_buf_ptr;
5192  
5193      ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5194      if (is_error(ret)) {
5195          unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5196          g_free(lurb);
5197      } else {
5198          urb_hashtable_insert(lurb);
5199      }
5200  
5201      return ret;
5202  }
5203  #endif /* CONFIG_USBFS */
5204  
5205  static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5206                              int cmd, abi_long arg)
5207  {
5208      void *argptr;
5209      struct dm_ioctl *host_dm;
5210      abi_long guest_data;
5211      uint32_t guest_data_size;
5212      int target_size;
5213      const argtype *arg_type = ie->arg_type;
5214      abi_long ret;
5215      void *big_buf = NULL;
5216      char *host_data;
5217  
5218      arg_type++;
5219      target_size = thunk_type_size(arg_type, 0);
5220      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5221      if (!argptr) {
5222          ret = -TARGET_EFAULT;
5223          goto out;
5224      }
5225      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5226      unlock_user(argptr, arg, 0);
5227  
5228      /* buf_temp is too small, so fetch things into a bigger buffer */
5229      big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5230      memcpy(big_buf, buf_temp, target_size);
5231      buf_temp = big_buf;
5232      host_dm = big_buf;
5233  
5234      guest_data = arg + host_dm->data_start;
5235      if ((guest_data - arg) < 0) {
5236          ret = -TARGET_EINVAL;
5237          goto out;
5238      }
5239      guest_data_size = host_dm->data_size - host_dm->data_start;
5240      host_data = (char*)host_dm + host_dm->data_start;
5241  
5242      argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5243      if (!argptr) {
5244          ret = -TARGET_EFAULT;
5245          goto out;
5246      }
5247  
5248      switch (ie->host_cmd) {
5249      case DM_REMOVE_ALL:
5250      case DM_LIST_DEVICES:
5251      case DM_DEV_CREATE:
5252      case DM_DEV_REMOVE:
5253      case DM_DEV_SUSPEND:
5254      case DM_DEV_STATUS:
5255      case DM_DEV_WAIT:
5256      case DM_TABLE_STATUS:
5257      case DM_TABLE_CLEAR:
5258      case DM_TABLE_DEPS:
5259      case DM_LIST_VERSIONS:
5260          /* no input data */
5261          break;
5262      case DM_DEV_RENAME:
5263      case DM_DEV_SET_GEOMETRY:
5264          /* data contains only strings */
5265          memcpy(host_data, argptr, guest_data_size);
5266          break;
5267      case DM_TARGET_MSG:
5268          memcpy(host_data, argptr, guest_data_size);
5269          *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5270          break;
5271      case DM_TABLE_LOAD:
5272      {
5273          void *gspec = argptr;
5274          void *cur_data = host_data;
5275          const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5276          int spec_size = thunk_type_size(arg_type, 0);
5277          int i;
5278  
5279          for (i = 0; i < host_dm->target_count; i++) {
5280              struct dm_target_spec *spec = cur_data;
5281              uint32_t next;
5282              int slen;
5283  
5284              thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5285              slen = strlen((char*)gspec + spec_size) + 1;
5286              next = spec->next;
5287              spec->next = sizeof(*spec) + slen;
5288              strcpy((char*)&spec[1], gspec + spec_size);
5289              gspec += next;
5290              cur_data += spec->next;
5291          }
5292          break;
5293      }
5294      default:
5295          ret = -TARGET_EINVAL;
5296          unlock_user(argptr, guest_data, 0);
5297          goto out;
5298      }
5299      unlock_user(argptr, guest_data, 0);
5300  
5301      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5302      if (!is_error(ret)) {
5303          guest_data = arg + host_dm->data_start;
5304          guest_data_size = host_dm->data_size - host_dm->data_start;
5305          argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5306          switch (ie->host_cmd) {
5307          case DM_REMOVE_ALL:
5308          case DM_DEV_CREATE:
5309          case DM_DEV_REMOVE:
5310          case DM_DEV_RENAME:
5311          case DM_DEV_SUSPEND:
5312          case DM_DEV_STATUS:
5313          case DM_TABLE_LOAD:
5314          case DM_TABLE_CLEAR:
5315          case DM_TARGET_MSG:
5316          case DM_DEV_SET_GEOMETRY:
5317              /* no return data */
5318              break;
5319          case DM_LIST_DEVICES:
5320          {
5321              struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5322              uint32_t remaining_data = guest_data_size;
5323              void *cur_data = argptr;
5324              const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5325              int nl_size = 12; /* can't use thunk_size due to alignment */
5326  
5327              while (1) {
5328                  uint32_t next = nl->next;
5329                  if (next) {
5330                      nl->next = nl_size + (strlen(nl->name) + 1);
5331                  }
5332                  if (remaining_data < nl->next) {
5333                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5334                      break;
5335                  }
5336                  thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5337                  strcpy(cur_data + nl_size, nl->name);
5338                  cur_data += nl->next;
5339                  remaining_data -= nl->next;
5340                  if (!next) {
5341                      break;
5342                  }
5343                  nl = (void*)nl + next;
5344              }
5345              break;
5346          }
5347          case DM_DEV_WAIT:
5348          case DM_TABLE_STATUS:
5349          {
5350              struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5351              void *cur_data = argptr;
5352              const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5353              int spec_size = thunk_type_size(arg_type, 0);
5354              int i;
5355  
5356              for (i = 0; i < host_dm->target_count; i++) {
5357                  uint32_t next = spec->next;
5358                  int slen = strlen((char*)&spec[1]) + 1;
5359                  spec->next = (cur_data - argptr) + spec_size + slen;
5360                  if (guest_data_size < spec->next) {
5361                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5362                      break;
5363                  }
5364                  thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5365                  strcpy(cur_data + spec_size, (char*)&spec[1]);
5366                  cur_data = argptr + spec->next;
5367                  spec = (void*)host_dm + host_dm->data_start + next;
5368              }
5369              break;
5370          }
5371          case DM_TABLE_DEPS:
5372          {
5373              void *hdata = (void*)host_dm + host_dm->data_start;
5374              int count = *(uint32_t*)hdata;
5375              uint64_t *hdev = hdata + 8;
5376              uint64_t *gdev = argptr + 8;
5377              int i;
5378  
5379              *(uint32_t*)argptr = tswap32(count);
5380              for (i = 0; i < count; i++) {
5381                  *gdev = tswap64(*hdev);
5382                  gdev++;
5383                  hdev++;
5384              }
5385              break;
5386          }
5387          case DM_LIST_VERSIONS:
5388          {
5389              struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5390              uint32_t remaining_data = guest_data_size;
5391              void *cur_data = argptr;
5392              const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5393              int vers_size = thunk_type_size(arg_type, 0);
5394  
5395              while (1) {
5396                  uint32_t next = vers->next;
5397                  if (next) {
5398                      vers->next = vers_size + (strlen(vers->name) + 1);
5399                  }
5400                  if (remaining_data < vers->next) {
5401                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5402                      break;
5403                  }
5404                  thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5405                  strcpy(cur_data + vers_size, vers->name);
5406                  cur_data += vers->next;
5407                  remaining_data -= vers->next;
5408                  if (!next) {
5409                      break;
5410                  }
5411                  vers = (void*)vers + next;
5412              }
5413              break;
5414          }
5415          default:
5416              unlock_user(argptr, guest_data, 0);
5417              ret = -TARGET_EINVAL;
5418              goto out;
5419          }
5420          unlock_user(argptr, guest_data, guest_data_size);
5421  
5422          argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5423          if (!argptr) {
5424              ret = -TARGET_EFAULT;
5425              goto out;
5426          }
5427          thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5428          unlock_user(argptr, arg, target_size);
5429      }
5430  out:
5431      g_free(big_buf);
5432      return ret;
5433  }
5434  
5435  static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5436                                 int cmd, abi_long arg)
5437  {
5438      void *argptr;
5439      int target_size;
5440      const argtype *arg_type = ie->arg_type;
5441      const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5442      abi_long ret;
5443  
5444      struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5445      struct blkpg_partition host_part;
5446  
5447      /* Read and convert blkpg */
5448      arg_type++;
5449      target_size = thunk_type_size(arg_type, 0);
5450      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5451      if (!argptr) {
5452          ret = -TARGET_EFAULT;
5453          goto out;
5454      }
5455      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5456      unlock_user(argptr, arg, 0);
5457  
5458      switch (host_blkpg->op) {
5459      case BLKPG_ADD_PARTITION:
5460      case BLKPG_DEL_PARTITION:
5461          /* payload is struct blkpg_partition */
5462          break;
5463      default:
5464          /* Unknown opcode */
5465          ret = -TARGET_EINVAL;
5466          goto out;
5467      }
5468  
5469      /* Read and convert blkpg->data */
5470      arg = (abi_long)(uintptr_t)host_blkpg->data;
5471      target_size = thunk_type_size(part_arg_type, 0);
5472      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5473      if (!argptr) {
5474          ret = -TARGET_EFAULT;
5475          goto out;
5476      }
5477      thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5478      unlock_user(argptr, arg, 0);
5479  
5480      /* Swizzle the data pointer to our local copy and call! */
5481      host_blkpg->data = &host_part;
5482      ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5483  
5484  out:
5485      return ret;
5486  }
5487  
5488  static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5489                                  int fd, int cmd, abi_long arg)
5490  {
5491      const argtype *arg_type = ie->arg_type;
5492      const StructEntry *se;
5493      const argtype *field_types;
5494      const int *dst_offsets, *src_offsets;
5495      int target_size;
5496      void *argptr;
5497      abi_ulong *target_rt_dev_ptr = NULL;
5498      unsigned long *host_rt_dev_ptr = NULL;
5499      abi_long ret;
5500      int i;
5501  
5502      assert(ie->access == IOC_W);
5503      assert(*arg_type == TYPE_PTR);
5504      arg_type++;
5505      assert(*arg_type == TYPE_STRUCT);
5506      target_size = thunk_type_size(arg_type, 0);
5507      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5508      if (!argptr) {
5509          return -TARGET_EFAULT;
5510      }
5511      arg_type++;
5512      assert(*arg_type == (int)STRUCT_rtentry);
5513      se = struct_entries + *arg_type++;
5514      assert(se->convert[0] == NULL);
5515      /* convert struct here to be able to catch rt_dev string */
5516      field_types = se->field_types;
5517      dst_offsets = se->field_offsets[THUNK_HOST];
5518      src_offsets = se->field_offsets[THUNK_TARGET];
5519      for (i = 0; i < se->nb_fields; i++) {
5520          if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5521              assert(*field_types == TYPE_PTRVOID);
5522              target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5523              host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5524              if (*target_rt_dev_ptr != 0) {
5525                  *host_rt_dev_ptr = (unsigned long)lock_user_string(
5526                                                    tswapal(*target_rt_dev_ptr));
5527                  if (!*host_rt_dev_ptr) {
5528                      unlock_user(argptr, arg, 0);
5529                      return -TARGET_EFAULT;
5530                  }
5531              } else {
5532                  *host_rt_dev_ptr = 0;
5533              }
5534              field_types++;
5535              continue;
5536          }
5537          field_types = thunk_convert(buf_temp + dst_offsets[i],
5538                                      argptr + src_offsets[i],
5539                                      field_types, THUNK_HOST);
5540      }
5541      unlock_user(argptr, arg, 0);
5542  
5543      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5544  
5545      assert(host_rt_dev_ptr != NULL);
5546      assert(target_rt_dev_ptr != NULL);
5547      if (*host_rt_dev_ptr != 0) {
5548          unlock_user((void *)*host_rt_dev_ptr,
5549                      *target_rt_dev_ptr, 0);
5550      }
5551      return ret;
5552  }
5553  
5554  static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5555                                       int fd, int cmd, abi_long arg)
5556  {
5557      int sig = target_to_host_signal(arg);
5558      return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5559  }
5560  
5561  static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5562                                      int fd, int cmd, abi_long arg)
5563  {
5564      struct timeval tv;
5565      abi_long ret;
5566  
5567      ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5568      if (is_error(ret)) {
5569          return ret;
5570      }
5571  
5572      if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5573          if (copy_to_user_timeval(arg, &tv)) {
5574              return -TARGET_EFAULT;
5575          }
5576      } else {
5577          if (copy_to_user_timeval64(arg, &tv)) {
5578              return -TARGET_EFAULT;
5579          }
5580      }
5581  
5582      return ret;
5583  }
5584  
5585  static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5586                                        int fd, int cmd, abi_long arg)
5587  {
5588      struct timespec ts;
5589      abi_long ret;
5590  
5591      ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5592      if (is_error(ret)) {
5593          return ret;
5594      }
5595  
5596      if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5597          if (host_to_target_timespec(arg, &ts)) {
5598              return -TARGET_EFAULT;
5599          }
5600      } else{
5601          if (host_to_target_timespec64(arg, &ts)) {
5602              return -TARGET_EFAULT;
5603          }
5604      }
5605  
5606      return ret;
5607  }
5608  
5609  #ifdef TIOCGPTPEER
5610  static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5611                                       int fd, int cmd, abi_long arg)
5612  {
5613      int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5614      return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5615  }
5616  #endif
5617  
5618  #ifdef HAVE_DRM_H
5619  
5620  static void unlock_drm_version(struct drm_version *host_ver,
5621                                 struct target_drm_version *target_ver,
5622                                 bool copy)
5623  {
5624      unlock_user(host_ver->name, target_ver->name,
5625                                  copy ? host_ver->name_len : 0);
5626      unlock_user(host_ver->date, target_ver->date,
5627                                  copy ? host_ver->date_len : 0);
5628      unlock_user(host_ver->desc, target_ver->desc,
5629                                  copy ? host_ver->desc_len : 0);
5630  }
5631  
5632  static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5633                                            struct target_drm_version *target_ver)
5634  {
5635      memset(host_ver, 0, sizeof(*host_ver));
5636  
5637      __get_user(host_ver->name_len, &target_ver->name_len);
5638      if (host_ver->name_len) {
5639          host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5640                                     target_ver->name_len, 0);
5641          if (!host_ver->name) {
5642              return -EFAULT;
5643          }
5644      }
5645  
5646      __get_user(host_ver->date_len, &target_ver->date_len);
5647      if (host_ver->date_len) {
5648          host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5649                                     target_ver->date_len, 0);
5650          if (!host_ver->date) {
5651              goto err;
5652          }
5653      }
5654  
5655      __get_user(host_ver->desc_len, &target_ver->desc_len);
5656      if (host_ver->desc_len) {
5657          host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5658                                     target_ver->desc_len, 0);
5659          if (!host_ver->desc) {
5660              goto err;
5661          }
5662      }
5663  
5664      return 0;
5665  err:
5666      unlock_drm_version(host_ver, target_ver, false);
5667      return -EFAULT;
5668  }
5669  
5670  static inline void host_to_target_drmversion(
5671                                            struct target_drm_version *target_ver,
5672                                            struct drm_version *host_ver)
5673  {
5674      __put_user(host_ver->version_major, &target_ver->version_major);
5675      __put_user(host_ver->version_minor, &target_ver->version_minor);
5676      __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5677      __put_user(host_ver->name_len, &target_ver->name_len);
5678      __put_user(host_ver->date_len, &target_ver->date_len);
5679      __put_user(host_ver->desc_len, &target_ver->desc_len);
5680      unlock_drm_version(host_ver, target_ver, true);
5681  }
5682  
5683  static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5684                               int fd, int cmd, abi_long arg)
5685  {
5686      struct drm_version *ver;
5687      struct target_drm_version *target_ver;
5688      abi_long ret;
5689  
5690      switch (ie->host_cmd) {
5691      case DRM_IOCTL_VERSION:
5692          if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5693              return -TARGET_EFAULT;
5694          }
5695          ver = (struct drm_version *)buf_temp;
5696          ret = target_to_host_drmversion(ver, target_ver);
5697          if (!is_error(ret)) {
5698              ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5699              if (is_error(ret)) {
5700                  unlock_drm_version(ver, target_ver, false);
5701              } else {
5702                  host_to_target_drmversion(target_ver, ver);
5703              }
5704          }
5705          unlock_user_struct(target_ver, arg, 0);
5706          return ret;
5707      }
5708      return -TARGET_ENOSYS;
5709  }
5710  
5711  static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5712                                             struct drm_i915_getparam *gparam,
5713                                             int fd, abi_long arg)
5714  {
5715      abi_long ret;
5716      int value;
5717      struct target_drm_i915_getparam *target_gparam;
5718  
5719      if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5720          return -TARGET_EFAULT;
5721      }
5722  
5723      __get_user(gparam->param, &target_gparam->param);
5724      gparam->value = &value;
5725      ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5726      put_user_s32(value, target_gparam->value);
5727  
5728      unlock_user_struct(target_gparam, arg, 0);
5729      return ret;
5730  }
5731  
5732  static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5733                                    int fd, int cmd, abi_long arg)
5734  {
5735      switch (ie->host_cmd) {
5736      case DRM_IOCTL_I915_GETPARAM:
5737          return do_ioctl_drm_i915_getparam(ie,
5738                                            (struct drm_i915_getparam *)buf_temp,
5739                                            fd, arg);
5740      default:
5741          return -TARGET_ENOSYS;
5742      }
5743  }
5744  
5745  #endif
5746  
5747  static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5748                                          int fd, int cmd, abi_long arg)
5749  {
5750      struct tun_filter *filter = (struct tun_filter *)buf_temp;
5751      struct tun_filter *target_filter;
5752      char *target_addr;
5753  
5754      assert(ie->access == IOC_W);
5755  
5756      target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5757      if (!target_filter) {
5758          return -TARGET_EFAULT;
5759      }
5760      filter->flags = tswap16(target_filter->flags);
5761      filter->count = tswap16(target_filter->count);
5762      unlock_user(target_filter, arg, 0);
5763  
5764      if (filter->count) {
5765          if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5766              MAX_STRUCT_SIZE) {
5767              return -TARGET_EFAULT;
5768          }
5769  
5770          target_addr = lock_user(VERIFY_READ,
5771                                  arg + offsetof(struct tun_filter, addr),
5772                                  filter->count * ETH_ALEN, 1);
5773          if (!target_addr) {
5774              return -TARGET_EFAULT;
5775          }
5776          memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5777          unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5778      }
5779  
5780      return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5781  }
5782  
5783  IOCTLEntry ioctl_entries[] = {
5784  #define IOCTL(cmd, access, ...) \
5785      { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5786  #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5787      { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5788  #define IOCTL_IGNORE(cmd) \
5789      { TARGET_ ## cmd, 0, #cmd },
5790  #include "ioctls.h"
5791      { 0, 0, },
5792  };
5793  
5794  /* ??? Implement proper locking for ioctls.  */
5795  /* do_ioctl() Must return target values and target errnos. */
5796  static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5797  {
5798      const IOCTLEntry *ie;
5799      const argtype *arg_type;
5800      abi_long ret;
5801      uint8_t buf_temp[MAX_STRUCT_SIZE];
5802      int target_size;
5803      void *argptr;
5804  
5805      ie = ioctl_entries;
5806      for(;;) {
5807          if (ie->target_cmd == 0) {
5808              qemu_log_mask(
5809                  LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5810              return -TARGET_ENOSYS;
5811          }
5812          if (ie->target_cmd == cmd)
5813              break;
5814          ie++;
5815      }
5816      arg_type = ie->arg_type;
5817      if (ie->do_ioctl) {
5818          return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5819      } else if (!ie->host_cmd) {
5820          /* Some architectures define BSD ioctls in their headers
5821             that are not implemented in Linux.  */
5822          return -TARGET_ENOSYS;
5823      }
5824  
5825      switch(arg_type[0]) {
5826      case TYPE_NULL:
5827          /* no argument */
5828          ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5829          break;
5830      case TYPE_PTRVOID:
5831      case TYPE_INT:
5832      case TYPE_LONG:
5833      case TYPE_ULONG:
5834          ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5835          break;
5836      case TYPE_PTR:
5837          arg_type++;
5838          target_size = thunk_type_size(arg_type, 0);
5839          switch(ie->access) {
5840          case IOC_R:
5841              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5842              if (!is_error(ret)) {
5843                  argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5844                  if (!argptr)
5845                      return -TARGET_EFAULT;
5846                  thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5847                  unlock_user(argptr, arg, target_size);
5848              }
5849              break;
5850          case IOC_W:
5851              argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5852              if (!argptr)
5853                  return -TARGET_EFAULT;
5854              thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5855              unlock_user(argptr, arg, 0);
5856              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5857              break;
5858          default:
5859          case IOC_RW:
5860              argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5861              if (!argptr)
5862                  return -TARGET_EFAULT;
5863              thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5864              unlock_user(argptr, arg, 0);
5865              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5866              if (!is_error(ret)) {
5867                  argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5868                  if (!argptr)
5869                      return -TARGET_EFAULT;
5870                  thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5871                  unlock_user(argptr, arg, target_size);
5872              }
5873              break;
5874          }
5875          break;
5876      default:
5877          qemu_log_mask(LOG_UNIMP,
5878                        "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5879                        (long)cmd, arg_type[0]);
5880          ret = -TARGET_ENOSYS;
5881          break;
5882      }
5883      return ret;
5884  }
5885  
5886  static const bitmask_transtbl iflag_tbl[] = {
5887          { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5888          { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5889          { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5890          { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5891          { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5892          { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5893          { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5894          { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5895          { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5896          { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5897          { TARGET_IXON, TARGET_IXON, IXON, IXON },
5898          { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5899          { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5900          { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5901          { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5902          { 0, 0, 0, 0 }
5903  };
5904  
5905  static const bitmask_transtbl oflag_tbl[] = {
5906  	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5907  	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5908  	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5909  	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5910  	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5911  	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5912  	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5913  	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5914  	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5915  	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5916  	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5917  	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5918  	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5919  	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5920  	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5921  	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5922  	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5923  	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5924  	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5925  	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5926  	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5927  	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5928  	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5929  	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5930  	{ 0, 0, 0, 0 }
5931  };
5932  
5933  static const bitmask_transtbl cflag_tbl[] = {
5934  	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5935  	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5936  	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5937  	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5938  	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5939  	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5940  	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5941  	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5942  	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5943  	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5944  	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5945  	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5946  	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5947  	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5948  	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5949  	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5950  	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5951  	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5952  	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5953  	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5954  	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5955  	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5956  	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5957  	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5958  	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5959  	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5960  	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5961  	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5962  	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5963  	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5964  	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5965  	{ 0, 0, 0, 0 }
5966  };
5967  
5968  static const bitmask_transtbl lflag_tbl[] = {
5969    { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5970    { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5971    { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5972    { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5973    { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5974    { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5975    { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5976    { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5977    { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5978    { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5979    { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5980    { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5981    { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5982    { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5983    { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5984    { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5985    { 0, 0, 0, 0 }
5986  };
5987  
5988  static void target_to_host_termios (void *dst, const void *src)
5989  {
5990      struct host_termios *host = dst;
5991      const struct target_termios *target = src;
5992  
5993      host->c_iflag =
5994          target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5995      host->c_oflag =
5996          target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5997      host->c_cflag =
5998          target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5999      host->c_lflag =
6000          target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
6001      host->c_line = target->c_line;
6002  
6003      memset(host->c_cc, 0, sizeof(host->c_cc));
6004      host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
6005      host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
6006      host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
6007      host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6008      host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6009      host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6010      host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6011      host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6012      host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6013      host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6014      host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6015      host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6016      host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6017      host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6018      host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6019      host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6020      host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6021  }
6022  
6023  static void host_to_target_termios (void *dst, const void *src)
6024  {
6025      struct target_termios *target = dst;
6026      const struct host_termios *host = src;
6027  
6028      target->c_iflag =
6029          tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6030      target->c_oflag =
6031          tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6032      target->c_cflag =
6033          tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6034      target->c_lflag =
6035          tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6036      target->c_line = host->c_line;
6037  
6038      memset(target->c_cc, 0, sizeof(target->c_cc));
6039      target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6040      target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6041      target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6042      target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6043      target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6044      target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6045      target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6046      target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6047      target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6048      target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6049      target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6050      target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6051      target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6052      target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6053      target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6054      target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6055      target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6056  }
6057  
6058  static const StructEntry struct_termios_def = {
6059      .convert = { host_to_target_termios, target_to_host_termios },
6060      .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6061      .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6062      .print = print_termios,
6063  };
6064  
6065  static const bitmask_transtbl mmap_flags_tbl[] = {
6066      { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6067      { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6068      { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6069      { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6070        MAP_ANONYMOUS, MAP_ANONYMOUS },
6071      { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6072        MAP_GROWSDOWN, MAP_GROWSDOWN },
6073      { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6074        MAP_DENYWRITE, MAP_DENYWRITE },
6075      { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6076        MAP_EXECUTABLE, MAP_EXECUTABLE },
6077      { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6078      { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6079        MAP_NORESERVE, MAP_NORESERVE },
6080      { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6081      /* MAP_STACK had been ignored by the kernel for quite some time.
6082         Recognize it for the target insofar as we do not want to pass
6083         it through to the host.  */
6084      { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6085      { 0, 0, 0, 0 }
6086  };
6087  
6088  /*
6089   * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6090   *       TARGET_I386 is defined if TARGET_X86_64 is defined
6091   */
6092  #if defined(TARGET_I386)
6093  
6094  /* NOTE: there is really one LDT for all the threads */
6095  static uint8_t *ldt_table;
6096  
6097  static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6098  {
6099      int size;
6100      void *p;
6101  
6102      if (!ldt_table)
6103          return 0;
6104      size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6105      if (size > bytecount)
6106          size = bytecount;
6107      p = lock_user(VERIFY_WRITE, ptr, size, 0);
6108      if (!p)
6109          return -TARGET_EFAULT;
6110      /* ??? Should this by byteswapped?  */
6111      memcpy(p, ldt_table, size);
6112      unlock_user(p, ptr, size);
6113      return size;
6114  }
6115  
6116  /* XXX: add locking support */
6117  static abi_long write_ldt(CPUX86State *env,
6118                            abi_ulong ptr, unsigned long bytecount, int oldmode)
6119  {
6120      struct target_modify_ldt_ldt_s ldt_info;
6121      struct target_modify_ldt_ldt_s *target_ldt_info;
6122      int seg_32bit, contents, read_exec_only, limit_in_pages;
6123      int seg_not_present, useable, lm;
6124      uint32_t *lp, entry_1, entry_2;
6125  
6126      if (bytecount != sizeof(ldt_info))
6127          return -TARGET_EINVAL;
6128      if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6129          return -TARGET_EFAULT;
6130      ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6131      ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6132      ldt_info.limit = tswap32(target_ldt_info->limit);
6133      ldt_info.flags = tswap32(target_ldt_info->flags);
6134      unlock_user_struct(target_ldt_info, ptr, 0);
6135  
6136      if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6137          return -TARGET_EINVAL;
6138      seg_32bit = ldt_info.flags & 1;
6139      contents = (ldt_info.flags >> 1) & 3;
6140      read_exec_only = (ldt_info.flags >> 3) & 1;
6141      limit_in_pages = (ldt_info.flags >> 4) & 1;
6142      seg_not_present = (ldt_info.flags >> 5) & 1;
6143      useable = (ldt_info.flags >> 6) & 1;
6144  #ifdef TARGET_ABI32
6145      lm = 0;
6146  #else
6147      lm = (ldt_info.flags >> 7) & 1;
6148  #endif
6149      if (contents == 3) {
6150          if (oldmode)
6151              return -TARGET_EINVAL;
6152          if (seg_not_present == 0)
6153              return -TARGET_EINVAL;
6154      }
6155      /* allocate the LDT */
6156      if (!ldt_table) {
6157          env->ldt.base = target_mmap(0,
6158                                      TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6159                                      PROT_READ|PROT_WRITE,
6160                                      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6161          if (env->ldt.base == -1)
6162              return -TARGET_ENOMEM;
6163          memset(g2h_untagged(env->ldt.base), 0,
6164                 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6165          env->ldt.limit = 0xffff;
6166          ldt_table = g2h_untagged(env->ldt.base);
6167      }
6168  
6169      /* NOTE: same code as Linux kernel */
6170      /* Allow LDTs to be cleared by the user. */
6171      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6172          if (oldmode ||
6173              (contents == 0		&&
6174               read_exec_only == 1	&&
6175               seg_32bit == 0		&&
6176               limit_in_pages == 0	&&
6177               seg_not_present == 1	&&
6178               useable == 0 )) {
6179              entry_1 = 0;
6180              entry_2 = 0;
6181              goto install;
6182          }
6183      }
6184  
6185      entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6186          (ldt_info.limit & 0x0ffff);
6187      entry_2 = (ldt_info.base_addr & 0xff000000) |
6188          ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6189          (ldt_info.limit & 0xf0000) |
6190          ((read_exec_only ^ 1) << 9) |
6191          (contents << 10) |
6192          ((seg_not_present ^ 1) << 15) |
6193          (seg_32bit << 22) |
6194          (limit_in_pages << 23) |
6195          (lm << 21) |
6196          0x7000;
6197      if (!oldmode)
6198          entry_2 |= (useable << 20);
6199  
6200      /* Install the new entry ...  */
6201  install:
6202      lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6203      lp[0] = tswap32(entry_1);
6204      lp[1] = tswap32(entry_2);
6205      return 0;
6206  }
6207  
6208  /* specific and weird i386 syscalls */
6209  static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6210                                unsigned long bytecount)
6211  {
6212      abi_long ret;
6213  
6214      switch (func) {
6215      case 0:
6216          ret = read_ldt(ptr, bytecount);
6217          break;
6218      case 1:
6219          ret = write_ldt(env, ptr, bytecount, 1);
6220          break;
6221      case 0x11:
6222          ret = write_ldt(env, ptr, bytecount, 0);
6223          break;
6224      default:
6225          ret = -TARGET_ENOSYS;
6226          break;
6227      }
6228      return ret;
6229  }
6230  
6231  #if defined(TARGET_ABI32)
6232  abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6233  {
6234      uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6235      struct target_modify_ldt_ldt_s ldt_info;
6236      struct target_modify_ldt_ldt_s *target_ldt_info;
6237      int seg_32bit, contents, read_exec_only, limit_in_pages;
6238      int seg_not_present, useable, lm;
6239      uint32_t *lp, entry_1, entry_2;
6240      int i;
6241  
6242      lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6243      if (!target_ldt_info)
6244          return -TARGET_EFAULT;
6245      ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6246      ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6247      ldt_info.limit = tswap32(target_ldt_info->limit);
6248      ldt_info.flags = tswap32(target_ldt_info->flags);
6249      if (ldt_info.entry_number == -1) {
6250          for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6251              if (gdt_table[i] == 0) {
6252                  ldt_info.entry_number = i;
6253                  target_ldt_info->entry_number = tswap32(i);
6254                  break;
6255              }
6256          }
6257      }
6258      unlock_user_struct(target_ldt_info, ptr, 1);
6259  
6260      if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6261          ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6262             return -TARGET_EINVAL;
6263      seg_32bit = ldt_info.flags & 1;
6264      contents = (ldt_info.flags >> 1) & 3;
6265      read_exec_only = (ldt_info.flags >> 3) & 1;
6266      limit_in_pages = (ldt_info.flags >> 4) & 1;
6267      seg_not_present = (ldt_info.flags >> 5) & 1;
6268      useable = (ldt_info.flags >> 6) & 1;
6269  #ifdef TARGET_ABI32
6270      lm = 0;
6271  #else
6272      lm = (ldt_info.flags >> 7) & 1;
6273  #endif
6274  
6275      if (contents == 3) {
6276          if (seg_not_present == 0)
6277              return -TARGET_EINVAL;
6278      }
6279  
6280      /* NOTE: same code as Linux kernel */
6281      /* Allow LDTs to be cleared by the user. */
6282      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6283          if ((contents == 0             &&
6284               read_exec_only == 1       &&
6285               seg_32bit == 0            &&
6286               limit_in_pages == 0       &&
6287               seg_not_present == 1      &&
6288               useable == 0 )) {
6289              entry_1 = 0;
6290              entry_2 = 0;
6291              goto install;
6292          }
6293      }
6294  
6295      entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6296          (ldt_info.limit & 0x0ffff);
6297      entry_2 = (ldt_info.base_addr & 0xff000000) |
6298          ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6299          (ldt_info.limit & 0xf0000) |
6300          ((read_exec_only ^ 1) << 9) |
6301          (contents << 10) |
6302          ((seg_not_present ^ 1) << 15) |
6303          (seg_32bit << 22) |
6304          (limit_in_pages << 23) |
6305          (useable << 20) |
6306          (lm << 21) |
6307          0x7000;
6308  
6309      /* Install the new entry ...  */
6310  install:
6311      lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6312      lp[0] = tswap32(entry_1);
6313      lp[1] = tswap32(entry_2);
6314      return 0;
6315  }
6316  
6317  static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6318  {
6319      struct target_modify_ldt_ldt_s *target_ldt_info;
6320      uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6321      uint32_t base_addr, limit, flags;
6322      int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6323      int seg_not_present, useable, lm;
6324      uint32_t *lp, entry_1, entry_2;
6325  
6326      lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6327      if (!target_ldt_info)
6328          return -TARGET_EFAULT;
6329      idx = tswap32(target_ldt_info->entry_number);
6330      if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6331          idx > TARGET_GDT_ENTRY_TLS_MAX) {
6332          unlock_user_struct(target_ldt_info, ptr, 1);
6333          return -TARGET_EINVAL;
6334      }
6335      lp = (uint32_t *)(gdt_table + idx);
6336      entry_1 = tswap32(lp[0]);
6337      entry_2 = tswap32(lp[1]);
6338  
6339      read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6340      contents = (entry_2 >> 10) & 3;
6341      seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6342      seg_32bit = (entry_2 >> 22) & 1;
6343      limit_in_pages = (entry_2 >> 23) & 1;
6344      useable = (entry_2 >> 20) & 1;
6345  #ifdef TARGET_ABI32
6346      lm = 0;
6347  #else
6348      lm = (entry_2 >> 21) & 1;
6349  #endif
6350      flags = (seg_32bit << 0) | (contents << 1) |
6351          (read_exec_only << 3) | (limit_in_pages << 4) |
6352          (seg_not_present << 5) | (useable << 6) | (lm << 7);
6353      limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6354      base_addr = (entry_1 >> 16) |
6355          (entry_2 & 0xff000000) |
6356          ((entry_2 & 0xff) << 16);
6357      target_ldt_info->base_addr = tswapal(base_addr);
6358      target_ldt_info->limit = tswap32(limit);
6359      target_ldt_info->flags = tswap32(flags);
6360      unlock_user_struct(target_ldt_info, ptr, 1);
6361      return 0;
6362  }
6363  
6364  abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6365  {
6366      return -TARGET_ENOSYS;
6367  }
6368  #else
6369  abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6370  {
6371      abi_long ret = 0;
6372      abi_ulong val;
6373      int idx;
6374  
6375      switch(code) {
6376      case TARGET_ARCH_SET_GS:
6377      case TARGET_ARCH_SET_FS:
6378          if (code == TARGET_ARCH_SET_GS)
6379              idx = R_GS;
6380          else
6381              idx = R_FS;
6382          cpu_x86_load_seg(env, idx, 0);
6383          env->segs[idx].base = addr;
6384          break;
6385      case TARGET_ARCH_GET_GS:
6386      case TARGET_ARCH_GET_FS:
6387          if (code == TARGET_ARCH_GET_GS)
6388              idx = R_GS;
6389          else
6390              idx = R_FS;
6391          val = env->segs[idx].base;
6392          if (put_user(val, addr, abi_ulong))
6393              ret = -TARGET_EFAULT;
6394          break;
6395      default:
6396          ret = -TARGET_EINVAL;
6397          break;
6398      }
6399      return ret;
6400  }
6401  #endif /* defined(TARGET_ABI32 */
6402  
6403  #endif /* defined(TARGET_I386) */
6404  
6405  #define NEW_STACK_SIZE 0x40000
6406  
6407  
6408  static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6409  typedef struct {
6410      CPUArchState *env;
6411      pthread_mutex_t mutex;
6412      pthread_cond_t cond;
6413      pthread_t thread;
6414      uint32_t tid;
6415      abi_ulong child_tidptr;
6416      abi_ulong parent_tidptr;
6417      sigset_t sigmask;
6418  } new_thread_info;
6419  
6420  static void *clone_func(void *arg)
6421  {
6422      new_thread_info *info = arg;
6423      CPUArchState *env;
6424      CPUState *cpu;
6425      TaskState *ts;
6426  
6427      rcu_register_thread();
6428      tcg_register_thread();
6429      env = info->env;
6430      cpu = env_cpu(env);
6431      thread_cpu = cpu;
6432      ts = (TaskState *)cpu->opaque;
6433      info->tid = sys_gettid();
6434      task_settid(ts);
6435      if (info->child_tidptr)
6436          put_user_u32(info->tid, info->child_tidptr);
6437      if (info->parent_tidptr)
6438          put_user_u32(info->tid, info->parent_tidptr);
6439      qemu_guest_random_seed_thread_part2(cpu->random_seed);
6440      /* Enable signals.  */
6441      sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6442      /* Signal to the parent that we're ready.  */
6443      pthread_mutex_lock(&info->mutex);
6444      pthread_cond_broadcast(&info->cond);
6445      pthread_mutex_unlock(&info->mutex);
6446      /* Wait until the parent has finished initializing the tls state.  */
6447      pthread_mutex_lock(&clone_lock);
6448      pthread_mutex_unlock(&clone_lock);
6449      cpu_loop(env);
6450      /* never exits */
6451      return NULL;
6452  }
6453  
6454  /* do_fork() Must return host values and target errnos (unlike most
6455     do_*() functions). */
6456  static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6457                     abi_ulong parent_tidptr, target_ulong newtls,
6458                     abi_ulong child_tidptr)
6459  {
6460      CPUState *cpu = env_cpu(env);
6461      int ret;
6462      TaskState *ts;
6463      CPUState *new_cpu;
6464      CPUArchState *new_env;
6465      sigset_t sigmask;
6466  
6467      flags &= ~CLONE_IGNORED_FLAGS;
6468  
6469      /* Emulate vfork() with fork() */
6470      if (flags & CLONE_VFORK)
6471          flags &= ~(CLONE_VFORK | CLONE_VM);
6472  
6473      if (flags & CLONE_VM) {
6474          TaskState *parent_ts = (TaskState *)cpu->opaque;
6475          new_thread_info info;
6476          pthread_attr_t attr;
6477  
6478          if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6479              (flags & CLONE_INVALID_THREAD_FLAGS)) {
6480              return -TARGET_EINVAL;
6481          }
6482  
6483          ts = g_new0(TaskState, 1);
6484          init_task_state(ts);
6485  
6486          /* Grab a mutex so that thread setup appears atomic.  */
6487          pthread_mutex_lock(&clone_lock);
6488  
6489          /*
6490           * If this is our first additional thread, we need to ensure we
6491           * generate code for parallel execution and flush old translations.
6492           * Do this now so that the copy gets CF_PARALLEL too.
6493           */
6494          if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6495              cpu->tcg_cflags |= CF_PARALLEL;
6496              tb_flush(cpu);
6497          }
6498  
6499          /* we create a new CPU instance. */
6500          new_env = cpu_copy(env);
6501          /* Init regs that differ from the parent.  */
6502          cpu_clone_regs_child(new_env, newsp, flags);
6503          cpu_clone_regs_parent(env, flags);
6504          new_cpu = env_cpu(new_env);
6505          new_cpu->opaque = ts;
6506          ts->bprm = parent_ts->bprm;
6507          ts->info = parent_ts->info;
6508          ts->signal_mask = parent_ts->signal_mask;
6509  
6510          if (flags & CLONE_CHILD_CLEARTID) {
6511              ts->child_tidptr = child_tidptr;
6512          }
6513  
6514          if (flags & CLONE_SETTLS) {
6515              cpu_set_tls (new_env, newtls);
6516          }
6517  
6518          memset(&info, 0, sizeof(info));
6519          pthread_mutex_init(&info.mutex, NULL);
6520          pthread_mutex_lock(&info.mutex);
6521          pthread_cond_init(&info.cond, NULL);
6522          info.env = new_env;
6523          if (flags & CLONE_CHILD_SETTID) {
6524              info.child_tidptr = child_tidptr;
6525          }
6526          if (flags & CLONE_PARENT_SETTID) {
6527              info.parent_tidptr = parent_tidptr;
6528          }
6529  
6530          ret = pthread_attr_init(&attr);
6531          ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6532          ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6533          /* It is not safe to deliver signals until the child has finished
6534             initializing, so temporarily block all signals.  */
6535          sigfillset(&sigmask);
6536          sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6537          cpu->random_seed = qemu_guest_random_seed_thread_part1();
6538  
6539          ret = pthread_create(&info.thread, &attr, clone_func, &info);
6540          /* TODO: Free new CPU state if thread creation failed.  */
6541  
6542          sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6543          pthread_attr_destroy(&attr);
6544          if (ret == 0) {
6545              /* Wait for the child to initialize.  */
6546              pthread_cond_wait(&info.cond, &info.mutex);
6547              ret = info.tid;
6548          } else {
6549              ret = -1;
6550          }
6551          pthread_mutex_unlock(&info.mutex);
6552          pthread_cond_destroy(&info.cond);
6553          pthread_mutex_destroy(&info.mutex);
6554          pthread_mutex_unlock(&clone_lock);
6555      } else {
6556          /* if no CLONE_VM, we consider it is a fork */
6557          if (flags & CLONE_INVALID_FORK_FLAGS) {
6558              return -TARGET_EINVAL;
6559          }
6560  
6561          /* We can't support custom termination signals */
6562          if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6563              return -TARGET_EINVAL;
6564          }
6565  
6566          if (block_signals()) {
6567              return -TARGET_ERESTARTSYS;
6568          }
6569  
6570          fork_start();
6571          ret = fork();
6572          if (ret == 0) {
6573              /* Child Process.  */
6574              cpu_clone_regs_child(env, newsp, flags);
6575              fork_end(1);
6576              /* There is a race condition here.  The parent process could
6577                 theoretically read the TID in the child process before the child
6578                 tid is set.  This would require using either ptrace
6579                 (not implemented) or having *_tidptr to point at a shared memory
6580                 mapping.  We can't repeat the spinlock hack used above because
6581                 the child process gets its own copy of the lock.  */
6582              if (flags & CLONE_CHILD_SETTID)
6583                  put_user_u32(sys_gettid(), child_tidptr);
6584              if (flags & CLONE_PARENT_SETTID)
6585                  put_user_u32(sys_gettid(), parent_tidptr);
6586              ts = (TaskState *)cpu->opaque;
6587              if (flags & CLONE_SETTLS)
6588                  cpu_set_tls (env, newtls);
6589              if (flags & CLONE_CHILD_CLEARTID)
6590                  ts->child_tidptr = child_tidptr;
6591          } else {
6592              cpu_clone_regs_parent(env, flags);
6593              fork_end(0);
6594          }
6595      }
6596      return ret;
6597  }
6598  
6599  /* warning : doesn't handle linux specific flags... */
6600  static int target_to_host_fcntl_cmd(int cmd)
6601  {
6602      int ret;
6603  
6604      switch(cmd) {
6605      case TARGET_F_DUPFD:
6606      case TARGET_F_GETFD:
6607      case TARGET_F_SETFD:
6608      case TARGET_F_GETFL:
6609      case TARGET_F_SETFL:
6610      case TARGET_F_OFD_GETLK:
6611      case TARGET_F_OFD_SETLK:
6612      case TARGET_F_OFD_SETLKW:
6613          ret = cmd;
6614          break;
6615      case TARGET_F_GETLK:
6616          ret = F_GETLK64;
6617          break;
6618      case TARGET_F_SETLK:
6619          ret = F_SETLK64;
6620          break;
6621      case TARGET_F_SETLKW:
6622          ret = F_SETLKW64;
6623          break;
6624      case TARGET_F_GETOWN:
6625          ret = F_GETOWN;
6626          break;
6627      case TARGET_F_SETOWN:
6628          ret = F_SETOWN;
6629          break;
6630      case TARGET_F_GETSIG:
6631          ret = F_GETSIG;
6632          break;
6633      case TARGET_F_SETSIG:
6634          ret = F_SETSIG;
6635          break;
6636  #if TARGET_ABI_BITS == 32
6637      case TARGET_F_GETLK64:
6638          ret = F_GETLK64;
6639          break;
6640      case TARGET_F_SETLK64:
6641          ret = F_SETLK64;
6642          break;
6643      case TARGET_F_SETLKW64:
6644          ret = F_SETLKW64;
6645          break;
6646  #endif
6647      case TARGET_F_SETLEASE:
6648          ret = F_SETLEASE;
6649          break;
6650      case TARGET_F_GETLEASE:
6651          ret = F_GETLEASE;
6652          break;
6653  #ifdef F_DUPFD_CLOEXEC
6654      case TARGET_F_DUPFD_CLOEXEC:
6655          ret = F_DUPFD_CLOEXEC;
6656          break;
6657  #endif
6658      case TARGET_F_NOTIFY:
6659          ret = F_NOTIFY;
6660          break;
6661  #ifdef F_GETOWN_EX
6662      case TARGET_F_GETOWN_EX:
6663          ret = F_GETOWN_EX;
6664          break;
6665  #endif
6666  #ifdef F_SETOWN_EX
6667      case TARGET_F_SETOWN_EX:
6668          ret = F_SETOWN_EX;
6669          break;
6670  #endif
6671  #ifdef F_SETPIPE_SZ
6672      case TARGET_F_SETPIPE_SZ:
6673          ret = F_SETPIPE_SZ;
6674          break;
6675      case TARGET_F_GETPIPE_SZ:
6676          ret = F_GETPIPE_SZ;
6677          break;
6678  #endif
6679  #ifdef F_ADD_SEALS
6680      case TARGET_F_ADD_SEALS:
6681          ret = F_ADD_SEALS;
6682          break;
6683      case TARGET_F_GET_SEALS:
6684          ret = F_GET_SEALS;
6685          break;
6686  #endif
6687      default:
6688          ret = -TARGET_EINVAL;
6689          break;
6690      }
6691  
6692  #if defined(__powerpc64__)
6693      /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6694       * is not supported by kernel. The glibc fcntl call actually adjusts
6695       * them to 5, 6 and 7 before making the syscall(). Since we make the
6696       * syscall directly, adjust to what is supported by the kernel.
6697       */
6698      if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6699          ret -= F_GETLK64 - 5;
6700      }
6701  #endif
6702  
6703      return ret;
6704  }
6705  
6706  #define FLOCK_TRANSTBL \
6707      switch (type) { \
6708      TRANSTBL_CONVERT(F_RDLCK); \
6709      TRANSTBL_CONVERT(F_WRLCK); \
6710      TRANSTBL_CONVERT(F_UNLCK); \
6711      }
6712  
6713  static int target_to_host_flock(int type)
6714  {
6715  #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6716      FLOCK_TRANSTBL
6717  #undef  TRANSTBL_CONVERT
6718      return -TARGET_EINVAL;
6719  }
6720  
6721  static int host_to_target_flock(int type)
6722  {
6723  #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6724      FLOCK_TRANSTBL
6725  #undef  TRANSTBL_CONVERT
6726      /* if we don't know how to convert the value coming
6727       * from the host we copy to the target field as-is
6728       */
6729      return type;
6730  }
6731  
6732  static inline abi_long copy_from_user_flock(struct flock64 *fl,
6733                                              abi_ulong target_flock_addr)
6734  {
6735      struct target_flock *target_fl;
6736      int l_type;
6737  
6738      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6739          return -TARGET_EFAULT;
6740      }
6741  
6742      __get_user(l_type, &target_fl->l_type);
6743      l_type = target_to_host_flock(l_type);
6744      if (l_type < 0) {
6745          return l_type;
6746      }
6747      fl->l_type = l_type;
6748      __get_user(fl->l_whence, &target_fl->l_whence);
6749      __get_user(fl->l_start, &target_fl->l_start);
6750      __get_user(fl->l_len, &target_fl->l_len);
6751      __get_user(fl->l_pid, &target_fl->l_pid);
6752      unlock_user_struct(target_fl, target_flock_addr, 0);
6753      return 0;
6754  }
6755  
6756  static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6757                                            const struct flock64 *fl)
6758  {
6759      struct target_flock *target_fl;
6760      short l_type;
6761  
6762      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6763          return -TARGET_EFAULT;
6764      }
6765  
6766      l_type = host_to_target_flock(fl->l_type);
6767      __put_user(l_type, &target_fl->l_type);
6768      __put_user(fl->l_whence, &target_fl->l_whence);
6769      __put_user(fl->l_start, &target_fl->l_start);
6770      __put_user(fl->l_len, &target_fl->l_len);
6771      __put_user(fl->l_pid, &target_fl->l_pid);
6772      unlock_user_struct(target_fl, target_flock_addr, 1);
6773      return 0;
6774  }
6775  
6776  typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6777  typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6778  
6779  #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6780  static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6781                                                     abi_ulong target_flock_addr)
6782  {
6783      struct target_oabi_flock64 *target_fl;
6784      int l_type;
6785  
6786      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6787          return -TARGET_EFAULT;
6788      }
6789  
6790      __get_user(l_type, &target_fl->l_type);
6791      l_type = target_to_host_flock(l_type);
6792      if (l_type < 0) {
6793          return l_type;
6794      }
6795      fl->l_type = l_type;
6796      __get_user(fl->l_whence, &target_fl->l_whence);
6797      __get_user(fl->l_start, &target_fl->l_start);
6798      __get_user(fl->l_len, &target_fl->l_len);
6799      __get_user(fl->l_pid, &target_fl->l_pid);
6800      unlock_user_struct(target_fl, target_flock_addr, 0);
6801      return 0;
6802  }
6803  
6804  static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6805                                                   const struct flock64 *fl)
6806  {
6807      struct target_oabi_flock64 *target_fl;
6808      short l_type;
6809  
6810      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6811          return -TARGET_EFAULT;
6812      }
6813  
6814      l_type = host_to_target_flock(fl->l_type);
6815      __put_user(l_type, &target_fl->l_type);
6816      __put_user(fl->l_whence, &target_fl->l_whence);
6817      __put_user(fl->l_start, &target_fl->l_start);
6818      __put_user(fl->l_len, &target_fl->l_len);
6819      __put_user(fl->l_pid, &target_fl->l_pid);
6820      unlock_user_struct(target_fl, target_flock_addr, 1);
6821      return 0;
6822  }
6823  #endif
6824  
6825  static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6826                                                abi_ulong target_flock_addr)
6827  {
6828      struct target_flock64 *target_fl;
6829      int l_type;
6830  
6831      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6832          return -TARGET_EFAULT;
6833      }
6834  
6835      __get_user(l_type, &target_fl->l_type);
6836      l_type = target_to_host_flock(l_type);
6837      if (l_type < 0) {
6838          return l_type;
6839      }
6840      fl->l_type = l_type;
6841      __get_user(fl->l_whence, &target_fl->l_whence);
6842      __get_user(fl->l_start, &target_fl->l_start);
6843      __get_user(fl->l_len, &target_fl->l_len);
6844      __get_user(fl->l_pid, &target_fl->l_pid);
6845      unlock_user_struct(target_fl, target_flock_addr, 0);
6846      return 0;
6847  }
6848  
6849  static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6850                                              const struct flock64 *fl)
6851  {
6852      struct target_flock64 *target_fl;
6853      short l_type;
6854  
6855      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6856          return -TARGET_EFAULT;
6857      }
6858  
6859      l_type = host_to_target_flock(fl->l_type);
6860      __put_user(l_type, &target_fl->l_type);
6861      __put_user(fl->l_whence, &target_fl->l_whence);
6862      __put_user(fl->l_start, &target_fl->l_start);
6863      __put_user(fl->l_len, &target_fl->l_len);
6864      __put_user(fl->l_pid, &target_fl->l_pid);
6865      unlock_user_struct(target_fl, target_flock_addr, 1);
6866      return 0;
6867  }
6868  
6869  static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6870  {
6871      struct flock64 fl64;
6872  #ifdef F_GETOWN_EX
6873      struct f_owner_ex fox;
6874      struct target_f_owner_ex *target_fox;
6875  #endif
6876      abi_long ret;
6877      int host_cmd = target_to_host_fcntl_cmd(cmd);
6878  
6879      if (host_cmd == -TARGET_EINVAL)
6880  	    return host_cmd;
6881  
6882      switch(cmd) {
6883      case TARGET_F_GETLK:
6884          ret = copy_from_user_flock(&fl64, arg);
6885          if (ret) {
6886              return ret;
6887          }
6888          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6889          if (ret == 0) {
6890              ret = copy_to_user_flock(arg, &fl64);
6891          }
6892          break;
6893  
6894      case TARGET_F_SETLK:
6895      case TARGET_F_SETLKW:
6896          ret = copy_from_user_flock(&fl64, arg);
6897          if (ret) {
6898              return ret;
6899          }
6900          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6901          break;
6902  
6903      case TARGET_F_GETLK64:
6904      case TARGET_F_OFD_GETLK:
6905          ret = copy_from_user_flock64(&fl64, arg);
6906          if (ret) {
6907              return ret;
6908          }
6909          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6910          if (ret == 0) {
6911              ret = copy_to_user_flock64(arg, &fl64);
6912          }
6913          break;
6914      case TARGET_F_SETLK64:
6915      case TARGET_F_SETLKW64:
6916      case TARGET_F_OFD_SETLK:
6917      case TARGET_F_OFD_SETLKW:
6918          ret = copy_from_user_flock64(&fl64, arg);
6919          if (ret) {
6920              return ret;
6921          }
6922          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6923          break;
6924  
6925      case TARGET_F_GETFL:
6926          ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6927          if (ret >= 0) {
6928              ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6929          }
6930          break;
6931  
6932      case TARGET_F_SETFL:
6933          ret = get_errno(safe_fcntl(fd, host_cmd,
6934                                     target_to_host_bitmask(arg,
6935                                                            fcntl_flags_tbl)));
6936          break;
6937  
6938  #ifdef F_GETOWN_EX
6939      case TARGET_F_GETOWN_EX:
6940          ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6941          if (ret >= 0) {
6942              if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6943                  return -TARGET_EFAULT;
6944              target_fox->type = tswap32(fox.type);
6945              target_fox->pid = tswap32(fox.pid);
6946              unlock_user_struct(target_fox, arg, 1);
6947          }
6948          break;
6949  #endif
6950  
6951  #ifdef F_SETOWN_EX
6952      case TARGET_F_SETOWN_EX:
6953          if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6954              return -TARGET_EFAULT;
6955          fox.type = tswap32(target_fox->type);
6956          fox.pid = tswap32(target_fox->pid);
6957          unlock_user_struct(target_fox, arg, 0);
6958          ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6959          break;
6960  #endif
6961  
6962      case TARGET_F_SETSIG:
6963          ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6964          break;
6965  
6966      case TARGET_F_GETSIG:
6967          ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6968          break;
6969  
6970      case TARGET_F_SETOWN:
6971      case TARGET_F_GETOWN:
6972      case TARGET_F_SETLEASE:
6973      case TARGET_F_GETLEASE:
6974      case TARGET_F_SETPIPE_SZ:
6975      case TARGET_F_GETPIPE_SZ:
6976      case TARGET_F_ADD_SEALS:
6977      case TARGET_F_GET_SEALS:
6978          ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6979          break;
6980  
6981      default:
6982          ret = get_errno(safe_fcntl(fd, cmd, arg));
6983          break;
6984      }
6985      return ret;
6986  }
6987  
6988  #ifdef USE_UID16
6989  
6990  static inline int high2lowuid(int uid)
6991  {
6992      if (uid > 65535)
6993          return 65534;
6994      else
6995          return uid;
6996  }
6997  
6998  static inline int high2lowgid(int gid)
6999  {
7000      if (gid > 65535)
7001          return 65534;
7002      else
7003          return gid;
7004  }
7005  
7006  static inline int low2highuid(int uid)
7007  {
7008      if ((int16_t)uid == -1)
7009          return -1;
7010      else
7011          return uid;
7012  }
7013  
7014  static inline int low2highgid(int gid)
7015  {
7016      if ((int16_t)gid == -1)
7017          return -1;
7018      else
7019          return gid;
7020  }
7021  static inline int tswapid(int id)
7022  {
7023      return tswap16(id);
7024  }
7025  
7026  #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7027  
7028  #else /* !USE_UID16 */
7029  static inline int high2lowuid(int uid)
7030  {
7031      return uid;
7032  }
7033  static inline int high2lowgid(int gid)
7034  {
7035      return gid;
7036  }
7037  static inline int low2highuid(int uid)
7038  {
7039      return uid;
7040  }
7041  static inline int low2highgid(int gid)
7042  {
7043      return gid;
7044  }
7045  static inline int tswapid(int id)
7046  {
7047      return tswap32(id);
7048  }
7049  
7050  #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7051  
7052  #endif /* USE_UID16 */
7053  
7054  /* We must do direct syscalls for setting UID/GID, because we want to
7055   * implement the Linux system call semantics of "change only for this thread",
7056   * not the libc/POSIX semantics of "change for all threads in process".
7057   * (See http://ewontfix.com/17/ for more details.)
7058   * We use the 32-bit version of the syscalls if present; if it is not
7059   * then either the host architecture supports 32-bit UIDs natively with
7060   * the standard syscall, or the 16-bit UID is the best we can do.
7061   */
7062  #ifdef __NR_setuid32
7063  #define __NR_sys_setuid __NR_setuid32
7064  #else
7065  #define __NR_sys_setuid __NR_setuid
7066  #endif
7067  #ifdef __NR_setgid32
7068  #define __NR_sys_setgid __NR_setgid32
7069  #else
7070  #define __NR_sys_setgid __NR_setgid
7071  #endif
7072  #ifdef __NR_setresuid32
7073  #define __NR_sys_setresuid __NR_setresuid32
7074  #else
7075  #define __NR_sys_setresuid __NR_setresuid
7076  #endif
7077  #ifdef __NR_setresgid32
7078  #define __NR_sys_setresgid __NR_setresgid32
7079  #else
7080  #define __NR_sys_setresgid __NR_setresgid
7081  #endif
7082  
7083  _syscall1(int, sys_setuid, uid_t, uid)
7084  _syscall1(int, sys_setgid, gid_t, gid)
7085  _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7086  _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7087  
7088  void syscall_init(void)
7089  {
7090      IOCTLEntry *ie;
7091      const argtype *arg_type;
7092      int size;
7093      int i;
7094  
7095      thunk_init(STRUCT_MAX);
7096  
7097  #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7098  #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7099  #include "syscall_types.h"
7100  #undef STRUCT
7101  #undef STRUCT_SPECIAL
7102  
7103      /* Build target_to_host_errno_table[] table from
7104       * host_to_target_errno_table[]. */
7105      for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7106          target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7107      }
7108  
7109      /* we patch the ioctl size if necessary. We rely on the fact that
7110         no ioctl has all the bits at '1' in the size field */
7111      ie = ioctl_entries;
7112      while (ie->target_cmd != 0) {
7113          if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7114              TARGET_IOC_SIZEMASK) {
7115              arg_type = ie->arg_type;
7116              if (arg_type[0] != TYPE_PTR) {
7117                  fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7118                          ie->target_cmd);
7119                  exit(1);
7120              }
7121              arg_type++;
7122              size = thunk_type_size(arg_type, 0);
7123              ie->target_cmd = (ie->target_cmd &
7124                                ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7125                  (size << TARGET_IOC_SIZESHIFT);
7126          }
7127  
7128          /* automatic consistency check if same arch */
7129  #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7130      (defined(__x86_64__) && defined(TARGET_X86_64))
7131          if (unlikely(ie->target_cmd != ie->host_cmd)) {
7132              fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7133                      ie->name, ie->target_cmd, ie->host_cmd);
7134          }
7135  #endif
7136          ie++;
7137      }
7138  }
7139  
7140  #ifdef TARGET_NR_truncate64
7141  static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7142                                           abi_long arg2,
7143                                           abi_long arg3,
7144                                           abi_long arg4)
7145  {
7146      if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7147          arg2 = arg3;
7148          arg3 = arg4;
7149      }
7150      return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7151  }
7152  #endif
7153  
7154  #ifdef TARGET_NR_ftruncate64
7155  static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7156                                            abi_long arg2,
7157                                            abi_long arg3,
7158                                            abi_long arg4)
7159  {
7160      if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7161          arg2 = arg3;
7162          arg3 = arg4;
7163      }
7164      return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7165  }
7166  #endif
7167  
7168  #if defined(TARGET_NR_timer_settime) || \
7169      (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7170  static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7171                                                   abi_ulong target_addr)
7172  {
7173      if (target_to_host_timespec(&host_its->it_interval, target_addr +
7174                                  offsetof(struct target_itimerspec,
7175                                           it_interval)) ||
7176          target_to_host_timespec(&host_its->it_value, target_addr +
7177                                  offsetof(struct target_itimerspec,
7178                                           it_value))) {
7179          return -TARGET_EFAULT;
7180      }
7181  
7182      return 0;
7183  }
7184  #endif
7185  
7186  #if defined(TARGET_NR_timer_settime64) || \
7187      (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7188  static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7189                                                     abi_ulong target_addr)
7190  {
7191      if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7192                                    offsetof(struct target__kernel_itimerspec,
7193                                             it_interval)) ||
7194          target_to_host_timespec64(&host_its->it_value, target_addr +
7195                                    offsetof(struct target__kernel_itimerspec,
7196                                             it_value))) {
7197          return -TARGET_EFAULT;
7198      }
7199  
7200      return 0;
7201  }
7202  #endif
7203  
7204  #if ((defined(TARGET_NR_timerfd_gettime) || \
7205        defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7206        defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7207  static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7208                                                   struct itimerspec *host_its)
7209  {
7210      if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7211                                                         it_interval),
7212                                  &host_its->it_interval) ||
7213          host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7214                                                         it_value),
7215                                  &host_its->it_value)) {
7216          return -TARGET_EFAULT;
7217      }
7218      return 0;
7219  }
7220  #endif
7221  
7222  #if ((defined(TARGET_NR_timerfd_gettime64) || \
7223        defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7224        defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7225  static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7226                                                     struct itimerspec *host_its)
7227  {
7228      if (host_to_target_timespec64(target_addr +
7229                                    offsetof(struct target__kernel_itimerspec,
7230                                             it_interval),
7231                                    &host_its->it_interval) ||
7232          host_to_target_timespec64(target_addr +
7233                                    offsetof(struct target__kernel_itimerspec,
7234                                             it_value),
7235                                    &host_its->it_value)) {
7236          return -TARGET_EFAULT;
7237      }
7238      return 0;
7239  }
7240  #endif
7241  
7242  #if defined(TARGET_NR_adjtimex) || \
7243      (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7244  static inline abi_long target_to_host_timex(struct timex *host_tx,
7245                                              abi_long target_addr)
7246  {
7247      struct target_timex *target_tx;
7248  
7249      if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7250          return -TARGET_EFAULT;
7251      }
7252  
7253      __get_user(host_tx->modes, &target_tx->modes);
7254      __get_user(host_tx->offset, &target_tx->offset);
7255      __get_user(host_tx->freq, &target_tx->freq);
7256      __get_user(host_tx->maxerror, &target_tx->maxerror);
7257      __get_user(host_tx->esterror, &target_tx->esterror);
7258      __get_user(host_tx->status, &target_tx->status);
7259      __get_user(host_tx->constant, &target_tx->constant);
7260      __get_user(host_tx->precision, &target_tx->precision);
7261      __get_user(host_tx->tolerance, &target_tx->tolerance);
7262      __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7263      __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7264      __get_user(host_tx->tick, &target_tx->tick);
7265      __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7266      __get_user(host_tx->jitter, &target_tx->jitter);
7267      __get_user(host_tx->shift, &target_tx->shift);
7268      __get_user(host_tx->stabil, &target_tx->stabil);
7269      __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7270      __get_user(host_tx->calcnt, &target_tx->calcnt);
7271      __get_user(host_tx->errcnt, &target_tx->errcnt);
7272      __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7273      __get_user(host_tx->tai, &target_tx->tai);
7274  
7275      unlock_user_struct(target_tx, target_addr, 0);
7276      return 0;
7277  }
7278  
7279  static inline abi_long host_to_target_timex(abi_long target_addr,
7280                                              struct timex *host_tx)
7281  {
7282      struct target_timex *target_tx;
7283  
7284      if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7285          return -TARGET_EFAULT;
7286      }
7287  
7288      __put_user(host_tx->modes, &target_tx->modes);
7289      __put_user(host_tx->offset, &target_tx->offset);
7290      __put_user(host_tx->freq, &target_tx->freq);
7291      __put_user(host_tx->maxerror, &target_tx->maxerror);
7292      __put_user(host_tx->esterror, &target_tx->esterror);
7293      __put_user(host_tx->status, &target_tx->status);
7294      __put_user(host_tx->constant, &target_tx->constant);
7295      __put_user(host_tx->precision, &target_tx->precision);
7296      __put_user(host_tx->tolerance, &target_tx->tolerance);
7297      __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7298      __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7299      __put_user(host_tx->tick, &target_tx->tick);
7300      __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7301      __put_user(host_tx->jitter, &target_tx->jitter);
7302      __put_user(host_tx->shift, &target_tx->shift);
7303      __put_user(host_tx->stabil, &target_tx->stabil);
7304      __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7305      __put_user(host_tx->calcnt, &target_tx->calcnt);
7306      __put_user(host_tx->errcnt, &target_tx->errcnt);
7307      __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7308      __put_user(host_tx->tai, &target_tx->tai);
7309  
7310      unlock_user_struct(target_tx, target_addr, 1);
7311      return 0;
7312  }
7313  #endif
7314  
7315  
7316  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7317  static inline abi_long target_to_host_timex64(struct timex *host_tx,
7318                                                abi_long target_addr)
7319  {
7320      struct target__kernel_timex *target_tx;
7321  
7322      if (copy_from_user_timeval64(&host_tx->time, target_addr +
7323                                   offsetof(struct target__kernel_timex,
7324                                            time))) {
7325          return -TARGET_EFAULT;
7326      }
7327  
7328      if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7329          return -TARGET_EFAULT;
7330      }
7331  
7332      __get_user(host_tx->modes, &target_tx->modes);
7333      __get_user(host_tx->offset, &target_tx->offset);
7334      __get_user(host_tx->freq, &target_tx->freq);
7335      __get_user(host_tx->maxerror, &target_tx->maxerror);
7336      __get_user(host_tx->esterror, &target_tx->esterror);
7337      __get_user(host_tx->status, &target_tx->status);
7338      __get_user(host_tx->constant, &target_tx->constant);
7339      __get_user(host_tx->precision, &target_tx->precision);
7340      __get_user(host_tx->tolerance, &target_tx->tolerance);
7341      __get_user(host_tx->tick, &target_tx->tick);
7342      __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7343      __get_user(host_tx->jitter, &target_tx->jitter);
7344      __get_user(host_tx->shift, &target_tx->shift);
7345      __get_user(host_tx->stabil, &target_tx->stabil);
7346      __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7347      __get_user(host_tx->calcnt, &target_tx->calcnt);
7348      __get_user(host_tx->errcnt, &target_tx->errcnt);
7349      __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7350      __get_user(host_tx->tai, &target_tx->tai);
7351  
7352      unlock_user_struct(target_tx, target_addr, 0);
7353      return 0;
7354  }
7355  
7356  static inline abi_long host_to_target_timex64(abi_long target_addr,
7357                                                struct timex *host_tx)
7358  {
7359      struct target__kernel_timex *target_tx;
7360  
7361     if (copy_to_user_timeval64(target_addr +
7362                                offsetof(struct target__kernel_timex, time),
7363                                &host_tx->time)) {
7364          return -TARGET_EFAULT;
7365      }
7366  
7367      if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7368          return -TARGET_EFAULT;
7369      }
7370  
7371      __put_user(host_tx->modes, &target_tx->modes);
7372      __put_user(host_tx->offset, &target_tx->offset);
7373      __put_user(host_tx->freq, &target_tx->freq);
7374      __put_user(host_tx->maxerror, &target_tx->maxerror);
7375      __put_user(host_tx->esterror, &target_tx->esterror);
7376      __put_user(host_tx->status, &target_tx->status);
7377      __put_user(host_tx->constant, &target_tx->constant);
7378      __put_user(host_tx->precision, &target_tx->precision);
7379      __put_user(host_tx->tolerance, &target_tx->tolerance);
7380      __put_user(host_tx->tick, &target_tx->tick);
7381      __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7382      __put_user(host_tx->jitter, &target_tx->jitter);
7383      __put_user(host_tx->shift, &target_tx->shift);
7384      __put_user(host_tx->stabil, &target_tx->stabil);
7385      __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7386      __put_user(host_tx->calcnt, &target_tx->calcnt);
7387      __put_user(host_tx->errcnt, &target_tx->errcnt);
7388      __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7389      __put_user(host_tx->tai, &target_tx->tai);
7390  
7391      unlock_user_struct(target_tx, target_addr, 1);
7392      return 0;
7393  }
7394  #endif
7395  
7396  static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7397                                                 abi_ulong target_addr)
7398  {
7399      struct target_sigevent *target_sevp;
7400  
7401      if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7402          return -TARGET_EFAULT;
7403      }
7404  
7405      /* This union is awkward on 64 bit systems because it has a 32 bit
7406       * integer and a pointer in it; we follow the conversion approach
7407       * used for handling sigval types in signal.c so the guest should get
7408       * the correct value back even if we did a 64 bit byteswap and it's
7409       * using the 32 bit integer.
7410       */
7411      host_sevp->sigev_value.sival_ptr =
7412          (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7413      host_sevp->sigev_signo =
7414          target_to_host_signal(tswap32(target_sevp->sigev_signo));
7415      host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7416      host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7417  
7418      unlock_user_struct(target_sevp, target_addr, 1);
7419      return 0;
7420  }
7421  
7422  #if defined(TARGET_NR_mlockall)
7423  static inline int target_to_host_mlockall_arg(int arg)
7424  {
7425      int result = 0;
7426  
7427      if (arg & TARGET_MCL_CURRENT) {
7428          result |= MCL_CURRENT;
7429      }
7430      if (arg & TARGET_MCL_FUTURE) {
7431          result |= MCL_FUTURE;
7432      }
7433  #ifdef MCL_ONFAULT
7434      if (arg & TARGET_MCL_ONFAULT) {
7435          result |= MCL_ONFAULT;
7436      }
7437  #endif
7438  
7439      return result;
7440  }
7441  #endif
7442  
7443  #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7444       defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7445       defined(TARGET_NR_newfstatat))
7446  static inline abi_long host_to_target_stat64(void *cpu_env,
7447                                               abi_ulong target_addr,
7448                                               struct stat *host_st)
7449  {
7450  #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7451      if (((CPUARMState *)cpu_env)->eabi) {
7452          struct target_eabi_stat64 *target_st;
7453  
7454          if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7455              return -TARGET_EFAULT;
7456          memset(target_st, 0, sizeof(struct target_eabi_stat64));
7457          __put_user(host_st->st_dev, &target_st->st_dev);
7458          __put_user(host_st->st_ino, &target_st->st_ino);
7459  #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7460          __put_user(host_st->st_ino, &target_st->__st_ino);
7461  #endif
7462          __put_user(host_st->st_mode, &target_st->st_mode);
7463          __put_user(host_st->st_nlink, &target_st->st_nlink);
7464          __put_user(host_st->st_uid, &target_st->st_uid);
7465          __put_user(host_st->st_gid, &target_st->st_gid);
7466          __put_user(host_st->st_rdev, &target_st->st_rdev);
7467          __put_user(host_st->st_size, &target_st->st_size);
7468          __put_user(host_st->st_blksize, &target_st->st_blksize);
7469          __put_user(host_st->st_blocks, &target_st->st_blocks);
7470          __put_user(host_st->st_atime, &target_st->target_st_atime);
7471          __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7472          __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7473  #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7474          __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7475          __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7476          __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7477  #endif
7478          unlock_user_struct(target_st, target_addr, 1);
7479      } else
7480  #endif
7481      {
7482  #if defined(TARGET_HAS_STRUCT_STAT64)
7483          struct target_stat64 *target_st;
7484  #else
7485          struct target_stat *target_st;
7486  #endif
7487  
7488          if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7489              return -TARGET_EFAULT;
7490          memset(target_st, 0, sizeof(*target_st));
7491          __put_user(host_st->st_dev, &target_st->st_dev);
7492          __put_user(host_st->st_ino, &target_st->st_ino);
7493  #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7494          __put_user(host_st->st_ino, &target_st->__st_ino);
7495  #endif
7496          __put_user(host_st->st_mode, &target_st->st_mode);
7497          __put_user(host_st->st_nlink, &target_st->st_nlink);
7498          __put_user(host_st->st_uid, &target_st->st_uid);
7499          __put_user(host_st->st_gid, &target_st->st_gid);
7500          __put_user(host_st->st_rdev, &target_st->st_rdev);
7501          /* XXX: better use of kernel struct */
7502          __put_user(host_st->st_size, &target_st->st_size);
7503          __put_user(host_st->st_blksize, &target_st->st_blksize);
7504          __put_user(host_st->st_blocks, &target_st->st_blocks);
7505          __put_user(host_st->st_atime, &target_st->target_st_atime);
7506          __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7507          __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7508  #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7509          __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7510          __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7511          __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7512  #endif
7513          unlock_user_struct(target_st, target_addr, 1);
7514      }
7515  
7516      return 0;
7517  }
7518  #endif
7519  
7520  #if defined(TARGET_NR_statx) && defined(__NR_statx)
7521  static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7522                                              abi_ulong target_addr)
7523  {
7524      struct target_statx *target_stx;
7525  
7526      if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7527          return -TARGET_EFAULT;
7528      }
7529      memset(target_stx, 0, sizeof(*target_stx));
7530  
7531      __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7532      __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7533      __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7534      __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7535      __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7536      __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7537      __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7538      __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7539      __put_user(host_stx->stx_size, &target_stx->stx_size);
7540      __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7541      __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7542      __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7543      __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7544      __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7545      __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7546      __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7547      __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7548      __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7549      __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7550      __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7551      __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7552      __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7553      __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7554  
7555      unlock_user_struct(target_stx, target_addr, 1);
7556  
7557      return 0;
7558  }
7559  #endif
7560  
7561  static int do_sys_futex(int *uaddr, int op, int val,
7562                           const struct timespec *timeout, int *uaddr2,
7563                           int val3)
7564  {
7565  #if HOST_LONG_BITS == 64
7566  #if defined(__NR_futex)
7567      /* always a 64-bit time_t, it doesn't define _time64 version  */
7568      return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7569  
7570  #endif
7571  #else /* HOST_LONG_BITS == 64 */
7572  #if defined(__NR_futex_time64)
7573      if (sizeof(timeout->tv_sec) == 8) {
7574          /* _time64 function on 32bit arch */
7575          return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7576      }
7577  #endif
7578  #if defined(__NR_futex)
7579      /* old function on 32bit arch */
7580      return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7581  #endif
7582  #endif /* HOST_LONG_BITS == 64 */
7583      g_assert_not_reached();
7584  }
7585  
7586  static int do_safe_futex(int *uaddr, int op, int val,
7587                           const struct timespec *timeout, int *uaddr2,
7588                           int val3)
7589  {
7590  #if HOST_LONG_BITS == 64
7591  #if defined(__NR_futex)
7592      /* always a 64-bit time_t, it doesn't define _time64 version  */
7593      return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7594  #endif
7595  #else /* HOST_LONG_BITS == 64 */
7596  #if defined(__NR_futex_time64)
7597      if (sizeof(timeout->tv_sec) == 8) {
7598          /* _time64 function on 32bit arch */
7599          return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7600                                             val3));
7601      }
7602  #endif
7603  #if defined(__NR_futex)
7604      /* old function on 32bit arch */
7605      return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7606  #endif
7607  #endif /* HOST_LONG_BITS == 64 */
7608      return -TARGET_ENOSYS;
7609  }
7610  
7611  /* ??? Using host futex calls even when target atomic operations
7612     are not really atomic probably breaks things.  However implementing
7613     futexes locally would make futexes shared between multiple processes
7614     tricky.  However they're probably useless because guest atomic
7615     operations won't work either.  */
7616  #if defined(TARGET_NR_futex)
7617  static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7618                      target_ulong timeout, target_ulong uaddr2, int val3)
7619  {
7620      struct timespec ts, *pts;
7621      int base_op;
7622  
7623      /* ??? We assume FUTEX_* constants are the same on both host
7624         and target.  */
7625  #ifdef FUTEX_CMD_MASK
7626      base_op = op & FUTEX_CMD_MASK;
7627  #else
7628      base_op = op;
7629  #endif
7630      switch (base_op) {
7631      case FUTEX_WAIT:
7632      case FUTEX_WAIT_BITSET:
7633          if (timeout) {
7634              pts = &ts;
7635              target_to_host_timespec(pts, timeout);
7636          } else {
7637              pts = NULL;
7638          }
7639          return do_safe_futex(g2h(cpu, uaddr),
7640                               op, tswap32(val), pts, NULL, val3);
7641      case FUTEX_WAKE:
7642          return do_safe_futex(g2h(cpu, uaddr),
7643                               op, val, NULL, NULL, 0);
7644      case FUTEX_FD:
7645          return do_safe_futex(g2h(cpu, uaddr),
7646                               op, val, NULL, NULL, 0);
7647      case FUTEX_REQUEUE:
7648      case FUTEX_CMP_REQUEUE:
7649      case FUTEX_WAKE_OP:
7650          /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7651             TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7652             But the prototype takes a `struct timespec *'; insert casts
7653             to satisfy the compiler.  We do not need to tswap TIMEOUT
7654             since it's not compared to guest memory.  */
7655          pts = (struct timespec *)(uintptr_t) timeout;
7656          return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7657                               (base_op == FUTEX_CMP_REQUEUE
7658                                ? tswap32(val3) : val3));
7659      default:
7660          return -TARGET_ENOSYS;
7661      }
7662  }
7663  #endif
7664  
7665  #if defined(TARGET_NR_futex_time64)
7666  static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7667                             int val, target_ulong timeout,
7668                             target_ulong uaddr2, int val3)
7669  {
7670      struct timespec ts, *pts;
7671      int base_op;
7672  
7673      /* ??? We assume FUTEX_* constants are the same on both host
7674         and target.  */
7675  #ifdef FUTEX_CMD_MASK
7676      base_op = op & FUTEX_CMD_MASK;
7677  #else
7678      base_op = op;
7679  #endif
7680      switch (base_op) {
7681      case FUTEX_WAIT:
7682      case FUTEX_WAIT_BITSET:
7683          if (timeout) {
7684              pts = &ts;
7685              if (target_to_host_timespec64(pts, timeout)) {
7686                  return -TARGET_EFAULT;
7687              }
7688          } else {
7689              pts = NULL;
7690          }
7691          return do_safe_futex(g2h(cpu, uaddr), op,
7692                               tswap32(val), pts, NULL, val3);
7693      case FUTEX_WAKE:
7694          return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7695      case FUTEX_FD:
7696          return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7697      case FUTEX_REQUEUE:
7698      case FUTEX_CMP_REQUEUE:
7699      case FUTEX_WAKE_OP:
7700          /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7701             TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7702             But the prototype takes a `struct timespec *'; insert casts
7703             to satisfy the compiler.  We do not need to tswap TIMEOUT
7704             since it's not compared to guest memory.  */
7705          pts = (struct timespec *)(uintptr_t) timeout;
7706          return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7707                               (base_op == FUTEX_CMP_REQUEUE
7708                                ? tswap32(val3) : val3));
7709      default:
7710          return -TARGET_ENOSYS;
7711      }
7712  }
7713  #endif
7714  
7715  #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7716  static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7717                                       abi_long handle, abi_long mount_id,
7718                                       abi_long flags)
7719  {
7720      struct file_handle *target_fh;
7721      struct file_handle *fh;
7722      int mid = 0;
7723      abi_long ret;
7724      char *name;
7725      unsigned int size, total_size;
7726  
7727      if (get_user_s32(size, handle)) {
7728          return -TARGET_EFAULT;
7729      }
7730  
7731      name = lock_user_string(pathname);
7732      if (!name) {
7733          return -TARGET_EFAULT;
7734      }
7735  
7736      total_size = sizeof(struct file_handle) + size;
7737      target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7738      if (!target_fh) {
7739          unlock_user(name, pathname, 0);
7740          return -TARGET_EFAULT;
7741      }
7742  
7743      fh = g_malloc0(total_size);
7744      fh->handle_bytes = size;
7745  
7746      ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7747      unlock_user(name, pathname, 0);
7748  
7749      /* man name_to_handle_at(2):
7750       * Other than the use of the handle_bytes field, the caller should treat
7751       * the file_handle structure as an opaque data type
7752       */
7753  
7754      memcpy(target_fh, fh, total_size);
7755      target_fh->handle_bytes = tswap32(fh->handle_bytes);
7756      target_fh->handle_type = tswap32(fh->handle_type);
7757      g_free(fh);
7758      unlock_user(target_fh, handle, total_size);
7759  
7760      if (put_user_s32(mid, mount_id)) {
7761          return -TARGET_EFAULT;
7762      }
7763  
7764      return ret;
7765  
7766  }
7767  #endif
7768  
7769  #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7770  static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7771                                       abi_long flags)
7772  {
7773      struct file_handle *target_fh;
7774      struct file_handle *fh;
7775      unsigned int size, total_size;
7776      abi_long ret;
7777  
7778      if (get_user_s32(size, handle)) {
7779          return -TARGET_EFAULT;
7780      }
7781  
7782      total_size = sizeof(struct file_handle) + size;
7783      target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7784      if (!target_fh) {
7785          return -TARGET_EFAULT;
7786      }
7787  
7788      fh = g_memdup(target_fh, total_size);
7789      fh->handle_bytes = size;
7790      fh->handle_type = tswap32(target_fh->handle_type);
7791  
7792      ret = get_errno(open_by_handle_at(mount_fd, fh,
7793                      target_to_host_bitmask(flags, fcntl_flags_tbl)));
7794  
7795      g_free(fh);
7796  
7797      unlock_user(target_fh, handle, total_size);
7798  
7799      return ret;
7800  }
7801  #endif
7802  
7803  #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7804  
7805  static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7806  {
7807      int host_flags;
7808      target_sigset_t *target_mask;
7809      sigset_t host_mask;
7810      abi_long ret;
7811  
7812      if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7813          return -TARGET_EINVAL;
7814      }
7815      if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7816          return -TARGET_EFAULT;
7817      }
7818  
7819      target_to_host_sigset(&host_mask, target_mask);
7820  
7821      host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7822  
7823      ret = get_errno(signalfd(fd, &host_mask, host_flags));
7824      if (ret >= 0) {
7825          fd_trans_register(ret, &target_signalfd_trans);
7826      }
7827  
7828      unlock_user_struct(target_mask, mask, 0);
7829  
7830      return ret;
7831  }
7832  #endif
7833  
7834  /* Map host to target signal numbers for the wait family of syscalls.
7835     Assume all other status bits are the same.  */
7836  int host_to_target_waitstatus(int status)
7837  {
7838      if (WIFSIGNALED(status)) {
7839          return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7840      }
7841      if (WIFSTOPPED(status)) {
7842          return (host_to_target_signal(WSTOPSIG(status)) << 8)
7843                 | (status & 0xff);
7844      }
7845      return status;
7846  }
7847  
7848  static int open_self_cmdline(void *cpu_env, int fd)
7849  {
7850      CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7851      struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7852      int i;
7853  
7854      for (i = 0; i < bprm->argc; i++) {
7855          size_t len = strlen(bprm->argv[i]) + 1;
7856  
7857          if (write(fd, bprm->argv[i], len) != len) {
7858              return -1;
7859          }
7860      }
7861  
7862      return 0;
7863  }
7864  
7865  static int open_self_maps(void *cpu_env, int fd)
7866  {
7867      CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7868      TaskState *ts = cpu->opaque;
7869      GSList *map_info = read_self_maps();
7870      GSList *s;
7871      int count;
7872  
7873      for (s = map_info; s; s = g_slist_next(s)) {
7874          MapInfo *e = (MapInfo *) s->data;
7875  
7876          if (h2g_valid(e->start)) {
7877              unsigned long min = e->start;
7878              unsigned long max = e->end;
7879              int flags = page_get_flags(h2g(min));
7880              const char *path;
7881  
7882              max = h2g_valid(max - 1) ?
7883                  max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7884  
7885              if (page_check_range(h2g(min), max - min, flags) == -1) {
7886                  continue;
7887              }
7888  
7889              if (h2g(min) == ts->info->stack_limit) {
7890                  path = "[stack]";
7891              } else {
7892                  path = e->path;
7893              }
7894  
7895              count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7896                              " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7897                              h2g(min), h2g(max - 1) + 1,
7898                              (flags & PAGE_READ) ? 'r' : '-',
7899                              (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7900                              (flags & PAGE_EXEC) ? 'x' : '-',
7901                              e->is_priv ? 'p' : '-',
7902                              (uint64_t) e->offset, e->dev, e->inode);
7903              if (path) {
7904                  dprintf(fd, "%*s%s\n", 73 - count, "", path);
7905              } else {
7906                  dprintf(fd, "\n");
7907              }
7908          }
7909      }
7910  
7911      free_self_maps(map_info);
7912  
7913  #ifdef TARGET_VSYSCALL_PAGE
7914      /*
7915       * We only support execution from the vsyscall page.
7916       * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7917       */
7918      count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7919                      " --xp 00000000 00:00 0",
7920                      TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7921      dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7922  #endif
7923  
7924      return 0;
7925  }
7926  
7927  static int open_self_stat(void *cpu_env, int fd)
7928  {
7929      CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7930      TaskState *ts = cpu->opaque;
7931      g_autoptr(GString) buf = g_string_new(NULL);
7932      int i;
7933  
7934      for (i = 0; i < 44; i++) {
7935          if (i == 0) {
7936              /* pid */
7937              g_string_printf(buf, FMT_pid " ", getpid());
7938          } else if (i == 1) {
7939              /* app name */
7940              gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7941              bin = bin ? bin + 1 : ts->bprm->argv[0];
7942              g_string_printf(buf, "(%.15s) ", bin);
7943          } else if (i == 27) {
7944              /* stack bottom */
7945              g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7946          } else {
7947              /* for the rest, there is MasterCard */
7948              g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7949          }
7950  
7951          if (write(fd, buf->str, buf->len) != buf->len) {
7952              return -1;
7953          }
7954      }
7955  
7956      return 0;
7957  }
7958  
7959  static int open_self_auxv(void *cpu_env, int fd)
7960  {
7961      CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7962      TaskState *ts = cpu->opaque;
7963      abi_ulong auxv = ts->info->saved_auxv;
7964      abi_ulong len = ts->info->auxv_len;
7965      char *ptr;
7966  
7967      /*
7968       * Auxiliary vector is stored in target process stack.
7969       * read in whole auxv vector and copy it to file
7970       */
7971      ptr = lock_user(VERIFY_READ, auxv, len, 0);
7972      if (ptr != NULL) {
7973          while (len > 0) {
7974              ssize_t r;
7975              r = write(fd, ptr, len);
7976              if (r <= 0) {
7977                  break;
7978              }
7979              len -= r;
7980              ptr += r;
7981          }
7982          lseek(fd, 0, SEEK_SET);
7983          unlock_user(ptr, auxv, len);
7984      }
7985  
7986      return 0;
7987  }
7988  
7989  static int is_proc_myself(const char *filename, const char *entry)
7990  {
7991      if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7992          filename += strlen("/proc/");
7993          if (!strncmp(filename, "self/", strlen("self/"))) {
7994              filename += strlen("self/");
7995          } else if (*filename >= '1' && *filename <= '9') {
7996              char myself[80];
7997              snprintf(myself, sizeof(myself), "%d/", getpid());
7998              if (!strncmp(filename, myself, strlen(myself))) {
7999                  filename += strlen(myself);
8000              } else {
8001                  return 0;
8002              }
8003          } else {
8004              return 0;
8005          }
8006          if (!strcmp(filename, entry)) {
8007              return 1;
8008          }
8009      }
8010      return 0;
8011  }
8012  
8013  #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8014      defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8015  static int is_proc(const char *filename, const char *entry)
8016  {
8017      return strcmp(filename, entry) == 0;
8018  }
8019  #endif
8020  
8021  #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8022  static int open_net_route(void *cpu_env, int fd)
8023  {
8024      FILE *fp;
8025      char *line = NULL;
8026      size_t len = 0;
8027      ssize_t read;
8028  
8029      fp = fopen("/proc/net/route", "r");
8030      if (fp == NULL) {
8031          return -1;
8032      }
8033  
8034      /* read header */
8035  
8036      read = getline(&line, &len, fp);
8037      dprintf(fd, "%s", line);
8038  
8039      /* read routes */
8040  
8041      while ((read = getline(&line, &len, fp)) != -1) {
8042          char iface[16];
8043          uint32_t dest, gw, mask;
8044          unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8045          int fields;
8046  
8047          fields = sscanf(line,
8048                          "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8049                          iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8050                          &mask, &mtu, &window, &irtt);
8051          if (fields != 11) {
8052              continue;
8053          }
8054          dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8055                  iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8056                  metric, tswap32(mask), mtu, window, irtt);
8057      }
8058  
8059      free(line);
8060      fclose(fp);
8061  
8062      return 0;
8063  }
8064  #endif
8065  
8066  #if defined(TARGET_SPARC)
8067  static int open_cpuinfo(void *cpu_env, int fd)
8068  {
8069      dprintf(fd, "type\t\t: sun4u\n");
8070      return 0;
8071  }
8072  #endif
8073  
8074  #if defined(TARGET_HPPA)
8075  static int open_cpuinfo(void *cpu_env, int fd)
8076  {
8077      dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8078      dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8079      dprintf(fd, "capabilities\t: os32\n");
8080      dprintf(fd, "model\t\t: 9000/778/B160L\n");
8081      dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8082      return 0;
8083  }
8084  #endif
8085  
8086  #if defined(TARGET_M68K)
8087  static int open_hardware(void *cpu_env, int fd)
8088  {
8089      dprintf(fd, "Model:\t\tqemu-m68k\n");
8090      return 0;
8091  }
8092  #endif
8093  
8094  static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8095  {
8096      struct fake_open {
8097          const char *filename;
8098          int (*fill)(void *cpu_env, int fd);
8099          int (*cmp)(const char *s1, const char *s2);
8100      };
8101      const struct fake_open *fake_open;
8102      static const struct fake_open fakes[] = {
8103          { "maps", open_self_maps, is_proc_myself },
8104          { "stat", open_self_stat, is_proc_myself },
8105          { "auxv", open_self_auxv, is_proc_myself },
8106          { "cmdline", open_self_cmdline, is_proc_myself },
8107  #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8108          { "/proc/net/route", open_net_route, is_proc },
8109  #endif
8110  #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8111          { "/proc/cpuinfo", open_cpuinfo, is_proc },
8112  #endif
8113  #if defined(TARGET_M68K)
8114          { "/proc/hardware", open_hardware, is_proc },
8115  #endif
8116          { NULL, NULL, NULL }
8117      };
8118  
8119      if (is_proc_myself(pathname, "exe")) {
8120          int execfd = qemu_getauxval(AT_EXECFD);
8121          return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8122      }
8123  
8124      for (fake_open = fakes; fake_open->filename; fake_open++) {
8125          if (fake_open->cmp(pathname, fake_open->filename)) {
8126              break;
8127          }
8128      }
8129  
8130      if (fake_open->filename) {
8131          const char *tmpdir;
8132          char filename[PATH_MAX];
8133          int fd, r;
8134  
8135          /* create temporary file to map stat to */
8136          tmpdir = getenv("TMPDIR");
8137          if (!tmpdir)
8138              tmpdir = "/tmp";
8139          snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8140          fd = mkstemp(filename);
8141          if (fd < 0) {
8142              return fd;
8143          }
8144          unlink(filename);
8145  
8146          if ((r = fake_open->fill(cpu_env, fd))) {
8147              int e = errno;
8148              close(fd);
8149              errno = e;
8150              return r;
8151          }
8152          lseek(fd, 0, SEEK_SET);
8153  
8154          return fd;
8155      }
8156  
8157      return safe_openat(dirfd, path(pathname), flags, mode);
8158  }
8159  
8160  #define TIMER_MAGIC 0x0caf0000
8161  #define TIMER_MAGIC_MASK 0xffff0000
8162  
8163  /* Convert QEMU provided timer ID back to internal 16bit index format */
8164  static target_timer_t get_timer_id(abi_long arg)
8165  {
8166      target_timer_t timerid = arg;
8167  
8168      if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8169          return -TARGET_EINVAL;
8170      }
8171  
8172      timerid &= 0xffff;
8173  
8174      if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8175          return -TARGET_EINVAL;
8176      }
8177  
8178      return timerid;
8179  }
8180  
8181  static int target_to_host_cpu_mask(unsigned long *host_mask,
8182                                     size_t host_size,
8183                                     abi_ulong target_addr,
8184                                     size_t target_size)
8185  {
8186      unsigned target_bits = sizeof(abi_ulong) * 8;
8187      unsigned host_bits = sizeof(*host_mask) * 8;
8188      abi_ulong *target_mask;
8189      unsigned i, j;
8190  
8191      assert(host_size >= target_size);
8192  
8193      target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8194      if (!target_mask) {
8195          return -TARGET_EFAULT;
8196      }
8197      memset(host_mask, 0, host_size);
8198  
8199      for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8200          unsigned bit = i * target_bits;
8201          abi_ulong val;
8202  
8203          __get_user(val, &target_mask[i]);
8204          for (j = 0; j < target_bits; j++, bit++) {
8205              if (val & (1UL << j)) {
8206                  host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8207              }
8208          }
8209      }
8210  
8211      unlock_user(target_mask, target_addr, 0);
8212      return 0;
8213  }
8214  
8215  static int host_to_target_cpu_mask(const unsigned long *host_mask,
8216                                     size_t host_size,
8217                                     abi_ulong target_addr,
8218                                     size_t target_size)
8219  {
8220      unsigned target_bits = sizeof(abi_ulong) * 8;
8221      unsigned host_bits = sizeof(*host_mask) * 8;
8222      abi_ulong *target_mask;
8223      unsigned i, j;
8224  
8225      assert(host_size >= target_size);
8226  
8227      target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8228      if (!target_mask) {
8229          return -TARGET_EFAULT;
8230      }
8231  
8232      for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8233          unsigned bit = i * target_bits;
8234          abi_ulong val = 0;
8235  
8236          for (j = 0; j < target_bits; j++, bit++) {
8237              if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8238                  val |= 1UL << j;
8239              }
8240          }
8241          __put_user(val, &target_mask[i]);
8242      }
8243  
8244      unlock_user(target_mask, target_addr, target_size);
8245      return 0;
8246  }
8247  
8248  /* This is an internal helper for do_syscall so that it is easier
8249   * to have a single return point, so that actions, such as logging
8250   * of syscall results, can be performed.
8251   * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8252   */
8253  static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8254                              abi_long arg2, abi_long arg3, abi_long arg4,
8255                              abi_long arg5, abi_long arg6, abi_long arg7,
8256                              abi_long arg8)
8257  {
8258      CPUState *cpu = env_cpu(cpu_env);
8259      abi_long ret;
8260  #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8261      || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8262      || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8263      || defined(TARGET_NR_statx)
8264      struct stat st;
8265  #endif
8266  #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8267      || defined(TARGET_NR_fstatfs)
8268      struct statfs stfs;
8269  #endif
8270      void *p;
8271  
8272      switch(num) {
8273      case TARGET_NR_exit:
8274          /* In old applications this may be used to implement _exit(2).
8275             However in threaded applications it is used for thread termination,
8276             and _exit_group is used for application termination.
8277             Do thread termination if we have more then one thread.  */
8278  
8279          if (block_signals()) {
8280              return -TARGET_ERESTARTSYS;
8281          }
8282  
8283          pthread_mutex_lock(&clone_lock);
8284  
8285          if (CPU_NEXT(first_cpu)) {
8286              TaskState *ts = cpu->opaque;
8287  
8288              object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8289              object_unref(OBJECT(cpu));
8290              /*
8291               * At this point the CPU should be unrealized and removed
8292               * from cpu lists. We can clean-up the rest of the thread
8293               * data without the lock held.
8294               */
8295  
8296              pthread_mutex_unlock(&clone_lock);
8297  
8298              if (ts->child_tidptr) {
8299                  put_user_u32(0, ts->child_tidptr);
8300                  do_sys_futex(g2h(cpu, ts->child_tidptr),
8301                               FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8302              }
8303              thread_cpu = NULL;
8304              g_free(ts);
8305              rcu_unregister_thread();
8306              pthread_exit(NULL);
8307          }
8308  
8309          pthread_mutex_unlock(&clone_lock);
8310          preexit_cleanup(cpu_env, arg1);
8311          _exit(arg1);
8312          return 0; /* avoid warning */
8313      case TARGET_NR_read:
8314          if (arg2 == 0 && arg3 == 0) {
8315              return get_errno(safe_read(arg1, 0, 0));
8316          } else {
8317              if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8318                  return -TARGET_EFAULT;
8319              ret = get_errno(safe_read(arg1, p, arg3));
8320              if (ret >= 0 &&
8321                  fd_trans_host_to_target_data(arg1)) {
8322                  ret = fd_trans_host_to_target_data(arg1)(p, ret);
8323              }
8324              unlock_user(p, arg2, ret);
8325          }
8326          return ret;
8327      case TARGET_NR_write:
8328          if (arg2 == 0 && arg3 == 0) {
8329              return get_errno(safe_write(arg1, 0, 0));
8330          }
8331          if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8332              return -TARGET_EFAULT;
8333          if (fd_trans_target_to_host_data(arg1)) {
8334              void *copy = g_malloc(arg3);
8335              memcpy(copy, p, arg3);
8336              ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8337              if (ret >= 0) {
8338                  ret = get_errno(safe_write(arg1, copy, ret));
8339              }
8340              g_free(copy);
8341          } else {
8342              ret = get_errno(safe_write(arg1, p, arg3));
8343          }
8344          unlock_user(p, arg2, 0);
8345          return ret;
8346  
8347  #ifdef TARGET_NR_open
8348      case TARGET_NR_open:
8349          if (!(p = lock_user_string(arg1)))
8350              return -TARGET_EFAULT;
8351          ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8352                                    target_to_host_bitmask(arg2, fcntl_flags_tbl),
8353                                    arg3));
8354          fd_trans_unregister(ret);
8355          unlock_user(p, arg1, 0);
8356          return ret;
8357  #endif
8358      case TARGET_NR_openat:
8359          if (!(p = lock_user_string(arg2)))
8360              return -TARGET_EFAULT;
8361          ret = get_errno(do_openat(cpu_env, arg1, p,
8362                                    target_to_host_bitmask(arg3, fcntl_flags_tbl),
8363                                    arg4));
8364          fd_trans_unregister(ret);
8365          unlock_user(p, arg2, 0);
8366          return ret;
8367  #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8368      case TARGET_NR_name_to_handle_at:
8369          ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8370          return ret;
8371  #endif
8372  #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8373      case TARGET_NR_open_by_handle_at:
8374          ret = do_open_by_handle_at(arg1, arg2, arg3);
8375          fd_trans_unregister(ret);
8376          return ret;
8377  #endif
8378      case TARGET_NR_close:
8379          fd_trans_unregister(arg1);
8380          return get_errno(close(arg1));
8381  
8382      case TARGET_NR_brk:
8383          return do_brk(arg1);
8384  #ifdef TARGET_NR_fork
8385      case TARGET_NR_fork:
8386          return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8387  #endif
8388  #ifdef TARGET_NR_waitpid
8389      case TARGET_NR_waitpid:
8390          {
8391              int status;
8392              ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8393              if (!is_error(ret) && arg2 && ret
8394                  && put_user_s32(host_to_target_waitstatus(status), arg2))
8395                  return -TARGET_EFAULT;
8396          }
8397          return ret;
8398  #endif
8399  #ifdef TARGET_NR_waitid
8400      case TARGET_NR_waitid:
8401          {
8402              siginfo_t info;
8403              info.si_pid = 0;
8404              ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8405              if (!is_error(ret) && arg3 && info.si_pid != 0) {
8406                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8407                      return -TARGET_EFAULT;
8408                  host_to_target_siginfo(p, &info);
8409                  unlock_user(p, arg3, sizeof(target_siginfo_t));
8410              }
8411          }
8412          return ret;
8413  #endif
8414  #ifdef TARGET_NR_creat /* not on alpha */
8415      case TARGET_NR_creat:
8416          if (!(p = lock_user_string(arg1)))
8417              return -TARGET_EFAULT;
8418          ret = get_errno(creat(p, arg2));
8419          fd_trans_unregister(ret);
8420          unlock_user(p, arg1, 0);
8421          return ret;
8422  #endif
8423  #ifdef TARGET_NR_link
8424      case TARGET_NR_link:
8425          {
8426              void * p2;
8427              p = lock_user_string(arg1);
8428              p2 = lock_user_string(arg2);
8429              if (!p || !p2)
8430                  ret = -TARGET_EFAULT;
8431              else
8432                  ret = get_errno(link(p, p2));
8433              unlock_user(p2, arg2, 0);
8434              unlock_user(p, arg1, 0);
8435          }
8436          return ret;
8437  #endif
8438  #if defined(TARGET_NR_linkat)
8439      case TARGET_NR_linkat:
8440          {
8441              void * p2 = NULL;
8442              if (!arg2 || !arg4)
8443                  return -TARGET_EFAULT;
8444              p  = lock_user_string(arg2);
8445              p2 = lock_user_string(arg4);
8446              if (!p || !p2)
8447                  ret = -TARGET_EFAULT;
8448              else
8449                  ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8450              unlock_user(p, arg2, 0);
8451              unlock_user(p2, arg4, 0);
8452          }
8453          return ret;
8454  #endif
8455  #ifdef TARGET_NR_unlink
8456      case TARGET_NR_unlink:
8457          if (!(p = lock_user_string(arg1)))
8458              return -TARGET_EFAULT;
8459          ret = get_errno(unlink(p));
8460          unlock_user(p, arg1, 0);
8461          return ret;
8462  #endif
8463  #if defined(TARGET_NR_unlinkat)
8464      case TARGET_NR_unlinkat:
8465          if (!(p = lock_user_string(arg2)))
8466              return -TARGET_EFAULT;
8467          ret = get_errno(unlinkat(arg1, p, arg3));
8468          unlock_user(p, arg2, 0);
8469          return ret;
8470  #endif
8471      case TARGET_NR_execve:
8472          {
8473              char **argp, **envp;
8474              int argc, envc;
8475              abi_ulong gp;
8476              abi_ulong guest_argp;
8477              abi_ulong guest_envp;
8478              abi_ulong addr;
8479              char **q;
8480              int total_size = 0;
8481  
8482              argc = 0;
8483              guest_argp = arg2;
8484              for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8485                  if (get_user_ual(addr, gp))
8486                      return -TARGET_EFAULT;
8487                  if (!addr)
8488                      break;
8489                  argc++;
8490              }
8491              envc = 0;
8492              guest_envp = arg3;
8493              for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8494                  if (get_user_ual(addr, gp))
8495                      return -TARGET_EFAULT;
8496                  if (!addr)
8497                      break;
8498                  envc++;
8499              }
8500  
8501              argp = g_new0(char *, argc + 1);
8502              envp = g_new0(char *, envc + 1);
8503  
8504              for (gp = guest_argp, q = argp; gp;
8505                    gp += sizeof(abi_ulong), q++) {
8506                  if (get_user_ual(addr, gp))
8507                      goto execve_efault;
8508                  if (!addr)
8509                      break;
8510                  if (!(*q = lock_user_string(addr)))
8511                      goto execve_efault;
8512                  total_size += strlen(*q) + 1;
8513              }
8514              *q = NULL;
8515  
8516              for (gp = guest_envp, q = envp; gp;
8517                    gp += sizeof(abi_ulong), q++) {
8518                  if (get_user_ual(addr, gp))
8519                      goto execve_efault;
8520                  if (!addr)
8521                      break;
8522                  if (!(*q = lock_user_string(addr)))
8523                      goto execve_efault;
8524                  total_size += strlen(*q) + 1;
8525              }
8526              *q = NULL;
8527  
8528              if (!(p = lock_user_string(arg1)))
8529                  goto execve_efault;
8530              /* Although execve() is not an interruptible syscall it is
8531               * a special case where we must use the safe_syscall wrapper:
8532               * if we allow a signal to happen before we make the host
8533               * syscall then we will 'lose' it, because at the point of
8534               * execve the process leaves QEMU's control. So we use the
8535               * safe syscall wrapper to ensure that we either take the
8536               * signal as a guest signal, or else it does not happen
8537               * before the execve completes and makes it the other
8538               * program's problem.
8539               */
8540              ret = get_errno(safe_execve(p, argp, envp));
8541              unlock_user(p, arg1, 0);
8542  
8543              goto execve_end;
8544  
8545          execve_efault:
8546              ret = -TARGET_EFAULT;
8547  
8548          execve_end:
8549              for (gp = guest_argp, q = argp; *q;
8550                    gp += sizeof(abi_ulong), q++) {
8551                  if (get_user_ual(addr, gp)
8552                      || !addr)
8553                      break;
8554                  unlock_user(*q, addr, 0);
8555              }
8556              for (gp = guest_envp, q = envp; *q;
8557                    gp += sizeof(abi_ulong), q++) {
8558                  if (get_user_ual(addr, gp)
8559                      || !addr)
8560                      break;
8561                  unlock_user(*q, addr, 0);
8562              }
8563  
8564              g_free(argp);
8565              g_free(envp);
8566          }
8567          return ret;
8568      case TARGET_NR_chdir:
8569          if (!(p = lock_user_string(arg1)))
8570              return -TARGET_EFAULT;
8571          ret = get_errno(chdir(p));
8572          unlock_user(p, arg1, 0);
8573          return ret;
8574  #ifdef TARGET_NR_time
8575      case TARGET_NR_time:
8576          {
8577              time_t host_time;
8578              ret = get_errno(time(&host_time));
8579              if (!is_error(ret)
8580                  && arg1
8581                  && put_user_sal(host_time, arg1))
8582                  return -TARGET_EFAULT;
8583          }
8584          return ret;
8585  #endif
8586  #ifdef TARGET_NR_mknod
8587      case TARGET_NR_mknod:
8588          if (!(p = lock_user_string(arg1)))
8589              return -TARGET_EFAULT;
8590          ret = get_errno(mknod(p, arg2, arg3));
8591          unlock_user(p, arg1, 0);
8592          return ret;
8593  #endif
8594  #if defined(TARGET_NR_mknodat)
8595      case TARGET_NR_mknodat:
8596          if (!(p = lock_user_string(arg2)))
8597              return -TARGET_EFAULT;
8598          ret = get_errno(mknodat(arg1, p, arg3, arg4));
8599          unlock_user(p, arg2, 0);
8600          return ret;
8601  #endif
8602  #ifdef TARGET_NR_chmod
8603      case TARGET_NR_chmod:
8604          if (!(p = lock_user_string(arg1)))
8605              return -TARGET_EFAULT;
8606          ret = get_errno(chmod(p, arg2));
8607          unlock_user(p, arg1, 0);
8608          return ret;
8609  #endif
8610  #ifdef TARGET_NR_lseek
8611      case TARGET_NR_lseek:
8612          return get_errno(lseek(arg1, arg2, arg3));
8613  #endif
8614  #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8615      /* Alpha specific */
8616      case TARGET_NR_getxpid:
8617          ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8618          return get_errno(getpid());
8619  #endif
8620  #ifdef TARGET_NR_getpid
8621      case TARGET_NR_getpid:
8622          return get_errno(getpid());
8623  #endif
8624      case TARGET_NR_mount:
8625          {
8626              /* need to look at the data field */
8627              void *p2, *p3;
8628  
8629              if (arg1) {
8630                  p = lock_user_string(arg1);
8631                  if (!p) {
8632                      return -TARGET_EFAULT;
8633                  }
8634              } else {
8635                  p = NULL;
8636              }
8637  
8638              p2 = lock_user_string(arg2);
8639              if (!p2) {
8640                  if (arg1) {
8641                      unlock_user(p, arg1, 0);
8642                  }
8643                  return -TARGET_EFAULT;
8644              }
8645  
8646              if (arg3) {
8647                  p3 = lock_user_string(arg3);
8648                  if (!p3) {
8649                      if (arg1) {
8650                          unlock_user(p, arg1, 0);
8651                      }
8652                      unlock_user(p2, arg2, 0);
8653                      return -TARGET_EFAULT;
8654                  }
8655              } else {
8656                  p3 = NULL;
8657              }
8658  
8659              /* FIXME - arg5 should be locked, but it isn't clear how to
8660               * do that since it's not guaranteed to be a NULL-terminated
8661               * string.
8662               */
8663              if (!arg5) {
8664                  ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8665              } else {
8666                  ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8667              }
8668              ret = get_errno(ret);
8669  
8670              if (arg1) {
8671                  unlock_user(p, arg1, 0);
8672              }
8673              unlock_user(p2, arg2, 0);
8674              if (arg3) {
8675                  unlock_user(p3, arg3, 0);
8676              }
8677          }
8678          return ret;
8679  #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8680  #if defined(TARGET_NR_umount)
8681      case TARGET_NR_umount:
8682  #endif
8683  #if defined(TARGET_NR_oldumount)
8684      case TARGET_NR_oldumount:
8685  #endif
8686          if (!(p = lock_user_string(arg1)))
8687              return -TARGET_EFAULT;
8688          ret = get_errno(umount(p));
8689          unlock_user(p, arg1, 0);
8690          return ret;
8691  #endif
8692  #ifdef TARGET_NR_stime /* not on alpha */
8693      case TARGET_NR_stime:
8694          {
8695              struct timespec ts;
8696              ts.tv_nsec = 0;
8697              if (get_user_sal(ts.tv_sec, arg1)) {
8698                  return -TARGET_EFAULT;
8699              }
8700              return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8701          }
8702  #endif
8703  #ifdef TARGET_NR_alarm /* not on alpha */
8704      case TARGET_NR_alarm:
8705          return alarm(arg1);
8706  #endif
8707  #ifdef TARGET_NR_pause /* not on alpha */
8708      case TARGET_NR_pause:
8709          if (!block_signals()) {
8710              sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8711          }
8712          return -TARGET_EINTR;
8713  #endif
8714  #ifdef TARGET_NR_utime
8715      case TARGET_NR_utime:
8716          {
8717              struct utimbuf tbuf, *host_tbuf;
8718              struct target_utimbuf *target_tbuf;
8719              if (arg2) {
8720                  if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8721                      return -TARGET_EFAULT;
8722                  tbuf.actime = tswapal(target_tbuf->actime);
8723                  tbuf.modtime = tswapal(target_tbuf->modtime);
8724                  unlock_user_struct(target_tbuf, arg2, 0);
8725                  host_tbuf = &tbuf;
8726              } else {
8727                  host_tbuf = NULL;
8728              }
8729              if (!(p = lock_user_string(arg1)))
8730                  return -TARGET_EFAULT;
8731              ret = get_errno(utime(p, host_tbuf));
8732              unlock_user(p, arg1, 0);
8733          }
8734          return ret;
8735  #endif
8736  #ifdef TARGET_NR_utimes
8737      case TARGET_NR_utimes:
8738          {
8739              struct timeval *tvp, tv[2];
8740              if (arg2) {
8741                  if (copy_from_user_timeval(&tv[0], arg2)
8742                      || copy_from_user_timeval(&tv[1],
8743                                                arg2 + sizeof(struct target_timeval)))
8744                      return -TARGET_EFAULT;
8745                  tvp = tv;
8746              } else {
8747                  tvp = NULL;
8748              }
8749              if (!(p = lock_user_string(arg1)))
8750                  return -TARGET_EFAULT;
8751              ret = get_errno(utimes(p, tvp));
8752              unlock_user(p, arg1, 0);
8753          }
8754          return ret;
8755  #endif
8756  #if defined(TARGET_NR_futimesat)
8757      case TARGET_NR_futimesat:
8758          {
8759              struct timeval *tvp, tv[2];
8760              if (arg3) {
8761                  if (copy_from_user_timeval(&tv[0], arg3)
8762                      || copy_from_user_timeval(&tv[1],
8763                                                arg3 + sizeof(struct target_timeval)))
8764                      return -TARGET_EFAULT;
8765                  tvp = tv;
8766              } else {
8767                  tvp = NULL;
8768              }
8769              if (!(p = lock_user_string(arg2))) {
8770                  return -TARGET_EFAULT;
8771              }
8772              ret = get_errno(futimesat(arg1, path(p), tvp));
8773              unlock_user(p, arg2, 0);
8774          }
8775          return ret;
8776  #endif
8777  #ifdef TARGET_NR_access
8778      case TARGET_NR_access:
8779          if (!(p = lock_user_string(arg1))) {
8780              return -TARGET_EFAULT;
8781          }
8782          ret = get_errno(access(path(p), arg2));
8783          unlock_user(p, arg1, 0);
8784          return ret;
8785  #endif
8786  #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8787      case TARGET_NR_faccessat:
8788          if (!(p = lock_user_string(arg2))) {
8789              return -TARGET_EFAULT;
8790          }
8791          ret = get_errno(faccessat(arg1, p, arg3, 0));
8792          unlock_user(p, arg2, 0);
8793          return ret;
8794  #endif
8795  #ifdef TARGET_NR_nice /* not on alpha */
8796      case TARGET_NR_nice:
8797          return get_errno(nice(arg1));
8798  #endif
8799      case TARGET_NR_sync:
8800          sync();
8801          return 0;
8802  #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8803      case TARGET_NR_syncfs:
8804          return get_errno(syncfs(arg1));
8805  #endif
8806      case TARGET_NR_kill:
8807          return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8808  #ifdef TARGET_NR_rename
8809      case TARGET_NR_rename:
8810          {
8811              void *p2;
8812              p = lock_user_string(arg1);
8813              p2 = lock_user_string(arg2);
8814              if (!p || !p2)
8815                  ret = -TARGET_EFAULT;
8816              else
8817                  ret = get_errno(rename(p, p2));
8818              unlock_user(p2, arg2, 0);
8819              unlock_user(p, arg1, 0);
8820          }
8821          return ret;
8822  #endif
8823  #if defined(TARGET_NR_renameat)
8824      case TARGET_NR_renameat:
8825          {
8826              void *p2;
8827              p  = lock_user_string(arg2);
8828              p2 = lock_user_string(arg4);
8829              if (!p || !p2)
8830                  ret = -TARGET_EFAULT;
8831              else
8832                  ret = get_errno(renameat(arg1, p, arg3, p2));
8833              unlock_user(p2, arg4, 0);
8834              unlock_user(p, arg2, 0);
8835          }
8836          return ret;
8837  #endif
8838  #if defined(TARGET_NR_renameat2)
8839      case TARGET_NR_renameat2:
8840          {
8841              void *p2;
8842              p  = lock_user_string(arg2);
8843              p2 = lock_user_string(arg4);
8844              if (!p || !p2) {
8845                  ret = -TARGET_EFAULT;
8846              } else {
8847                  ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8848              }
8849              unlock_user(p2, arg4, 0);
8850              unlock_user(p, arg2, 0);
8851          }
8852          return ret;
8853  #endif
8854  #ifdef TARGET_NR_mkdir
8855      case TARGET_NR_mkdir:
8856          if (!(p = lock_user_string(arg1)))
8857              return -TARGET_EFAULT;
8858          ret = get_errno(mkdir(p, arg2));
8859          unlock_user(p, arg1, 0);
8860          return ret;
8861  #endif
8862  #if defined(TARGET_NR_mkdirat)
8863      case TARGET_NR_mkdirat:
8864          if (!(p = lock_user_string(arg2)))
8865              return -TARGET_EFAULT;
8866          ret = get_errno(mkdirat(arg1, p, arg3));
8867          unlock_user(p, arg2, 0);
8868          return ret;
8869  #endif
8870  #ifdef TARGET_NR_rmdir
8871      case TARGET_NR_rmdir:
8872          if (!(p = lock_user_string(arg1)))
8873              return -TARGET_EFAULT;
8874          ret = get_errno(rmdir(p));
8875          unlock_user(p, arg1, 0);
8876          return ret;
8877  #endif
8878      case TARGET_NR_dup:
8879          ret = get_errno(dup(arg1));
8880          if (ret >= 0) {
8881              fd_trans_dup(arg1, ret);
8882          }
8883          return ret;
8884  #ifdef TARGET_NR_pipe
8885      case TARGET_NR_pipe:
8886          return do_pipe(cpu_env, arg1, 0, 0);
8887  #endif
8888  #ifdef TARGET_NR_pipe2
8889      case TARGET_NR_pipe2:
8890          return do_pipe(cpu_env, arg1,
8891                         target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8892  #endif
8893      case TARGET_NR_times:
8894          {
8895              struct target_tms *tmsp;
8896              struct tms tms;
8897              ret = get_errno(times(&tms));
8898              if (arg1) {
8899                  tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8900                  if (!tmsp)
8901                      return -TARGET_EFAULT;
8902                  tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8903                  tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8904                  tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8905                  tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8906              }
8907              if (!is_error(ret))
8908                  ret = host_to_target_clock_t(ret);
8909          }
8910          return ret;
8911      case TARGET_NR_acct:
8912          if (arg1 == 0) {
8913              ret = get_errno(acct(NULL));
8914          } else {
8915              if (!(p = lock_user_string(arg1))) {
8916                  return -TARGET_EFAULT;
8917              }
8918              ret = get_errno(acct(path(p)));
8919              unlock_user(p, arg1, 0);
8920          }
8921          return ret;
8922  #ifdef TARGET_NR_umount2
8923      case TARGET_NR_umount2:
8924          if (!(p = lock_user_string(arg1)))
8925              return -TARGET_EFAULT;
8926          ret = get_errno(umount2(p, arg2));
8927          unlock_user(p, arg1, 0);
8928          return ret;
8929  #endif
8930      case TARGET_NR_ioctl:
8931          return do_ioctl(arg1, arg2, arg3);
8932  #ifdef TARGET_NR_fcntl
8933      case TARGET_NR_fcntl:
8934          return do_fcntl(arg1, arg2, arg3);
8935  #endif
8936      case TARGET_NR_setpgid:
8937          return get_errno(setpgid(arg1, arg2));
8938      case TARGET_NR_umask:
8939          return get_errno(umask(arg1));
8940      case TARGET_NR_chroot:
8941          if (!(p = lock_user_string(arg1)))
8942              return -TARGET_EFAULT;
8943          ret = get_errno(chroot(p));
8944          unlock_user(p, arg1, 0);
8945          return ret;
8946  #ifdef TARGET_NR_dup2
8947      case TARGET_NR_dup2:
8948          ret = get_errno(dup2(arg1, arg2));
8949          if (ret >= 0) {
8950              fd_trans_dup(arg1, arg2);
8951          }
8952          return ret;
8953  #endif
8954  #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8955      case TARGET_NR_dup3:
8956      {
8957          int host_flags;
8958  
8959          if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8960              return -EINVAL;
8961          }
8962          host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8963          ret = get_errno(dup3(arg1, arg2, host_flags));
8964          if (ret >= 0) {
8965              fd_trans_dup(arg1, arg2);
8966          }
8967          return ret;
8968      }
8969  #endif
8970  #ifdef TARGET_NR_getppid /* not on alpha */
8971      case TARGET_NR_getppid:
8972          return get_errno(getppid());
8973  #endif
8974  #ifdef TARGET_NR_getpgrp
8975      case TARGET_NR_getpgrp:
8976          return get_errno(getpgrp());
8977  #endif
8978      case TARGET_NR_setsid:
8979          return get_errno(setsid());
8980  #ifdef TARGET_NR_sigaction
8981      case TARGET_NR_sigaction:
8982          {
8983  #if defined(TARGET_MIPS)
8984  	    struct target_sigaction act, oact, *pact, *old_act;
8985  
8986  	    if (arg2) {
8987                  if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8988                      return -TARGET_EFAULT;
8989  		act._sa_handler = old_act->_sa_handler;
8990  		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8991  		act.sa_flags = old_act->sa_flags;
8992  		unlock_user_struct(old_act, arg2, 0);
8993  		pact = &act;
8994  	    } else {
8995  		pact = NULL;
8996  	    }
8997  
8998          ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8999  
9000  	    if (!is_error(ret) && arg3) {
9001                  if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9002                      return -TARGET_EFAULT;
9003  		old_act->_sa_handler = oact._sa_handler;
9004  		old_act->sa_flags = oact.sa_flags;
9005  		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9006  		old_act->sa_mask.sig[1] = 0;
9007  		old_act->sa_mask.sig[2] = 0;
9008  		old_act->sa_mask.sig[3] = 0;
9009  		unlock_user_struct(old_act, arg3, 1);
9010  	    }
9011  #else
9012              struct target_old_sigaction *old_act;
9013              struct target_sigaction act, oact, *pact;
9014              if (arg2) {
9015                  if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9016                      return -TARGET_EFAULT;
9017                  act._sa_handler = old_act->_sa_handler;
9018                  target_siginitset(&act.sa_mask, old_act->sa_mask);
9019                  act.sa_flags = old_act->sa_flags;
9020  #ifdef TARGET_ARCH_HAS_SA_RESTORER
9021                  act.sa_restorer = old_act->sa_restorer;
9022  #endif
9023                  unlock_user_struct(old_act, arg2, 0);
9024                  pact = &act;
9025              } else {
9026                  pact = NULL;
9027              }
9028              ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9029              if (!is_error(ret) && arg3) {
9030                  if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9031                      return -TARGET_EFAULT;
9032                  old_act->_sa_handler = oact._sa_handler;
9033                  old_act->sa_mask = oact.sa_mask.sig[0];
9034                  old_act->sa_flags = oact.sa_flags;
9035  #ifdef TARGET_ARCH_HAS_SA_RESTORER
9036                  old_act->sa_restorer = oact.sa_restorer;
9037  #endif
9038                  unlock_user_struct(old_act, arg3, 1);
9039              }
9040  #endif
9041          }
9042          return ret;
9043  #endif
9044      case TARGET_NR_rt_sigaction:
9045          {
9046              /*
9047               * For Alpha and SPARC this is a 5 argument syscall, with
9048               * a 'restorer' parameter which must be copied into the
9049               * sa_restorer field of the sigaction struct.
9050               * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9051               * and arg5 is the sigsetsize.
9052               */
9053  #if defined(TARGET_ALPHA)
9054              target_ulong sigsetsize = arg4;
9055              target_ulong restorer = arg5;
9056  #elif defined(TARGET_SPARC)
9057              target_ulong restorer = arg4;
9058              target_ulong sigsetsize = arg5;
9059  #else
9060              target_ulong sigsetsize = arg4;
9061              target_ulong restorer = 0;
9062  #endif
9063              struct target_sigaction *act = NULL;
9064              struct target_sigaction *oact = NULL;
9065  
9066              if (sigsetsize != sizeof(target_sigset_t)) {
9067                  return -TARGET_EINVAL;
9068              }
9069              if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9070                  return -TARGET_EFAULT;
9071              }
9072              if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9073                  ret = -TARGET_EFAULT;
9074              } else {
9075                  ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9076                  if (oact) {
9077                      unlock_user_struct(oact, arg3, 1);
9078                  }
9079              }
9080              if (act) {
9081                  unlock_user_struct(act, arg2, 0);
9082              }
9083          }
9084          return ret;
9085  #ifdef TARGET_NR_sgetmask /* not on alpha */
9086      case TARGET_NR_sgetmask:
9087          {
9088              sigset_t cur_set;
9089              abi_ulong target_set;
9090              ret = do_sigprocmask(0, NULL, &cur_set);
9091              if (!ret) {
9092                  host_to_target_old_sigset(&target_set, &cur_set);
9093                  ret = target_set;
9094              }
9095          }
9096          return ret;
9097  #endif
9098  #ifdef TARGET_NR_ssetmask /* not on alpha */
9099      case TARGET_NR_ssetmask:
9100          {
9101              sigset_t set, oset;
9102              abi_ulong target_set = arg1;
9103              target_to_host_old_sigset(&set, &target_set);
9104              ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9105              if (!ret) {
9106                  host_to_target_old_sigset(&target_set, &oset);
9107                  ret = target_set;
9108              }
9109          }
9110          return ret;
9111  #endif
9112  #ifdef TARGET_NR_sigprocmask
9113      case TARGET_NR_sigprocmask:
9114          {
9115  #if defined(TARGET_ALPHA)
9116              sigset_t set, oldset;
9117              abi_ulong mask;
9118              int how;
9119  
9120              switch (arg1) {
9121              case TARGET_SIG_BLOCK:
9122                  how = SIG_BLOCK;
9123                  break;
9124              case TARGET_SIG_UNBLOCK:
9125                  how = SIG_UNBLOCK;
9126                  break;
9127              case TARGET_SIG_SETMASK:
9128                  how = SIG_SETMASK;
9129                  break;
9130              default:
9131                  return -TARGET_EINVAL;
9132              }
9133              mask = arg2;
9134              target_to_host_old_sigset(&set, &mask);
9135  
9136              ret = do_sigprocmask(how, &set, &oldset);
9137              if (!is_error(ret)) {
9138                  host_to_target_old_sigset(&mask, &oldset);
9139                  ret = mask;
9140                  ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9141              }
9142  #else
9143              sigset_t set, oldset, *set_ptr;
9144              int how;
9145  
9146              if (arg2) {
9147                  switch (arg1) {
9148                  case TARGET_SIG_BLOCK:
9149                      how = SIG_BLOCK;
9150                      break;
9151                  case TARGET_SIG_UNBLOCK:
9152                      how = SIG_UNBLOCK;
9153                      break;
9154                  case TARGET_SIG_SETMASK:
9155                      how = SIG_SETMASK;
9156                      break;
9157                  default:
9158                      return -TARGET_EINVAL;
9159                  }
9160                  if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9161                      return -TARGET_EFAULT;
9162                  target_to_host_old_sigset(&set, p);
9163                  unlock_user(p, arg2, 0);
9164                  set_ptr = &set;
9165              } else {
9166                  how = 0;
9167                  set_ptr = NULL;
9168              }
9169              ret = do_sigprocmask(how, set_ptr, &oldset);
9170              if (!is_error(ret) && arg3) {
9171                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9172                      return -TARGET_EFAULT;
9173                  host_to_target_old_sigset(p, &oldset);
9174                  unlock_user(p, arg3, sizeof(target_sigset_t));
9175              }
9176  #endif
9177          }
9178          return ret;
9179  #endif
9180      case TARGET_NR_rt_sigprocmask:
9181          {
9182              int how = arg1;
9183              sigset_t set, oldset, *set_ptr;
9184  
9185              if (arg4 != sizeof(target_sigset_t)) {
9186                  return -TARGET_EINVAL;
9187              }
9188  
9189              if (arg2) {
9190                  switch(how) {
9191                  case TARGET_SIG_BLOCK:
9192                      how = SIG_BLOCK;
9193                      break;
9194                  case TARGET_SIG_UNBLOCK:
9195                      how = SIG_UNBLOCK;
9196                      break;
9197                  case TARGET_SIG_SETMASK:
9198                      how = SIG_SETMASK;
9199                      break;
9200                  default:
9201                      return -TARGET_EINVAL;
9202                  }
9203                  if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9204                      return -TARGET_EFAULT;
9205                  target_to_host_sigset(&set, p);
9206                  unlock_user(p, arg2, 0);
9207                  set_ptr = &set;
9208              } else {
9209                  how = 0;
9210                  set_ptr = NULL;
9211              }
9212              ret = do_sigprocmask(how, set_ptr, &oldset);
9213              if (!is_error(ret) && arg3) {
9214                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9215                      return -TARGET_EFAULT;
9216                  host_to_target_sigset(p, &oldset);
9217                  unlock_user(p, arg3, sizeof(target_sigset_t));
9218              }
9219          }
9220          return ret;
9221  #ifdef TARGET_NR_sigpending
9222      case TARGET_NR_sigpending:
9223          {
9224              sigset_t set;
9225              ret = get_errno(sigpending(&set));
9226              if (!is_error(ret)) {
9227                  if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9228                      return -TARGET_EFAULT;
9229                  host_to_target_old_sigset(p, &set);
9230                  unlock_user(p, arg1, sizeof(target_sigset_t));
9231              }
9232          }
9233          return ret;
9234  #endif
9235      case TARGET_NR_rt_sigpending:
9236          {
9237              sigset_t set;
9238  
9239              /* Yes, this check is >, not != like most. We follow the kernel's
9240               * logic and it does it like this because it implements
9241               * NR_sigpending through the same code path, and in that case
9242               * the old_sigset_t is smaller in size.
9243               */
9244              if (arg2 > sizeof(target_sigset_t)) {
9245                  return -TARGET_EINVAL;
9246              }
9247  
9248              ret = get_errno(sigpending(&set));
9249              if (!is_error(ret)) {
9250                  if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9251                      return -TARGET_EFAULT;
9252                  host_to_target_sigset(p, &set);
9253                  unlock_user(p, arg1, sizeof(target_sigset_t));
9254              }
9255          }
9256          return ret;
9257  #ifdef TARGET_NR_sigsuspend
9258      case TARGET_NR_sigsuspend:
9259          {
9260              TaskState *ts = cpu->opaque;
9261  #if defined(TARGET_ALPHA)
9262              abi_ulong mask = arg1;
9263              target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9264  #else
9265              if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9266                  return -TARGET_EFAULT;
9267              target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9268              unlock_user(p, arg1, 0);
9269  #endif
9270              ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9271                                                 SIGSET_T_SIZE));
9272              if (ret != -TARGET_ERESTARTSYS) {
9273                  ts->in_sigsuspend = 1;
9274              }
9275          }
9276          return ret;
9277  #endif
9278      case TARGET_NR_rt_sigsuspend:
9279          {
9280              TaskState *ts = cpu->opaque;
9281  
9282              if (arg2 != sizeof(target_sigset_t)) {
9283                  return -TARGET_EINVAL;
9284              }
9285              if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9286                  return -TARGET_EFAULT;
9287              target_to_host_sigset(&ts->sigsuspend_mask, p);
9288              unlock_user(p, arg1, 0);
9289              ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9290                                                 SIGSET_T_SIZE));
9291              if (ret != -TARGET_ERESTARTSYS) {
9292                  ts->in_sigsuspend = 1;
9293              }
9294          }
9295          return ret;
9296  #ifdef TARGET_NR_rt_sigtimedwait
9297      case TARGET_NR_rt_sigtimedwait:
9298          {
9299              sigset_t set;
9300              struct timespec uts, *puts;
9301              siginfo_t uinfo;
9302  
9303              if (arg4 != sizeof(target_sigset_t)) {
9304                  return -TARGET_EINVAL;
9305              }
9306  
9307              if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9308                  return -TARGET_EFAULT;
9309              target_to_host_sigset(&set, p);
9310              unlock_user(p, arg1, 0);
9311              if (arg3) {
9312                  puts = &uts;
9313                  if (target_to_host_timespec(puts, arg3)) {
9314                      return -TARGET_EFAULT;
9315                  }
9316              } else {
9317                  puts = NULL;
9318              }
9319              ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9320                                                   SIGSET_T_SIZE));
9321              if (!is_error(ret)) {
9322                  if (arg2) {
9323                      p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9324                                    0);
9325                      if (!p) {
9326                          return -TARGET_EFAULT;
9327                      }
9328                      host_to_target_siginfo(p, &uinfo);
9329                      unlock_user(p, arg2, sizeof(target_siginfo_t));
9330                  }
9331                  ret = host_to_target_signal(ret);
9332              }
9333          }
9334          return ret;
9335  #endif
9336  #ifdef TARGET_NR_rt_sigtimedwait_time64
9337      case TARGET_NR_rt_sigtimedwait_time64:
9338          {
9339              sigset_t set;
9340              struct timespec uts, *puts;
9341              siginfo_t uinfo;
9342  
9343              if (arg4 != sizeof(target_sigset_t)) {
9344                  return -TARGET_EINVAL;
9345              }
9346  
9347              p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9348              if (!p) {
9349                  return -TARGET_EFAULT;
9350              }
9351              target_to_host_sigset(&set, p);
9352              unlock_user(p, arg1, 0);
9353              if (arg3) {
9354                  puts = &uts;
9355                  if (target_to_host_timespec64(puts, arg3)) {
9356                      return -TARGET_EFAULT;
9357                  }
9358              } else {
9359                  puts = NULL;
9360              }
9361              ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9362                                                   SIGSET_T_SIZE));
9363              if (!is_error(ret)) {
9364                  if (arg2) {
9365                      p = lock_user(VERIFY_WRITE, arg2,
9366                                    sizeof(target_siginfo_t), 0);
9367                      if (!p) {
9368                          return -TARGET_EFAULT;
9369                      }
9370                      host_to_target_siginfo(p, &uinfo);
9371                      unlock_user(p, arg2, sizeof(target_siginfo_t));
9372                  }
9373                  ret = host_to_target_signal(ret);
9374              }
9375          }
9376          return ret;
9377  #endif
9378      case TARGET_NR_rt_sigqueueinfo:
9379          {
9380              siginfo_t uinfo;
9381  
9382              p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9383              if (!p) {
9384                  return -TARGET_EFAULT;
9385              }
9386              target_to_host_siginfo(&uinfo, p);
9387              unlock_user(p, arg3, 0);
9388              ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9389          }
9390          return ret;
9391      case TARGET_NR_rt_tgsigqueueinfo:
9392          {
9393              siginfo_t uinfo;
9394  
9395              p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9396              if (!p) {
9397                  return -TARGET_EFAULT;
9398              }
9399              target_to_host_siginfo(&uinfo, p);
9400              unlock_user(p, arg4, 0);
9401              ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9402          }
9403          return ret;
9404  #ifdef TARGET_NR_sigreturn
9405      case TARGET_NR_sigreturn:
9406          if (block_signals()) {
9407              return -TARGET_ERESTARTSYS;
9408          }
9409          return do_sigreturn(cpu_env);
9410  #endif
9411      case TARGET_NR_rt_sigreturn:
9412          if (block_signals()) {
9413              return -TARGET_ERESTARTSYS;
9414          }
9415          return do_rt_sigreturn(cpu_env);
9416      case TARGET_NR_sethostname:
9417          if (!(p = lock_user_string(arg1)))
9418              return -TARGET_EFAULT;
9419          ret = get_errno(sethostname(p, arg2));
9420          unlock_user(p, arg1, 0);
9421          return ret;
9422  #ifdef TARGET_NR_setrlimit
9423      case TARGET_NR_setrlimit:
9424          {
9425              int resource = target_to_host_resource(arg1);
9426              struct target_rlimit *target_rlim;
9427              struct rlimit rlim;
9428              if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9429                  return -TARGET_EFAULT;
9430              rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9431              rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9432              unlock_user_struct(target_rlim, arg2, 0);
9433              /*
9434               * If we just passed through resource limit settings for memory then
9435               * they would also apply to QEMU's own allocations, and QEMU will
9436               * crash or hang or die if its allocations fail. Ideally we would
9437               * track the guest allocations in QEMU and apply the limits ourselves.
9438               * For now, just tell the guest the call succeeded but don't actually
9439               * limit anything.
9440               */
9441              if (resource != RLIMIT_AS &&
9442                  resource != RLIMIT_DATA &&
9443                  resource != RLIMIT_STACK) {
9444                  return get_errno(setrlimit(resource, &rlim));
9445              } else {
9446                  return 0;
9447              }
9448          }
9449  #endif
9450  #ifdef TARGET_NR_getrlimit
9451      case TARGET_NR_getrlimit:
9452          {
9453              int resource = target_to_host_resource(arg1);
9454              struct target_rlimit *target_rlim;
9455              struct rlimit rlim;
9456  
9457              ret = get_errno(getrlimit(resource, &rlim));
9458              if (!is_error(ret)) {
9459                  if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9460                      return -TARGET_EFAULT;
9461                  target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9462                  target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9463                  unlock_user_struct(target_rlim, arg2, 1);
9464              }
9465          }
9466          return ret;
9467  #endif
9468      case TARGET_NR_getrusage:
9469          {
9470              struct rusage rusage;
9471              ret = get_errno(getrusage(arg1, &rusage));
9472              if (!is_error(ret)) {
9473                  ret = host_to_target_rusage(arg2, &rusage);
9474              }
9475          }
9476          return ret;
9477  #if defined(TARGET_NR_gettimeofday)
9478      case TARGET_NR_gettimeofday:
9479          {
9480              struct timeval tv;
9481              struct timezone tz;
9482  
9483              ret = get_errno(gettimeofday(&tv, &tz));
9484              if (!is_error(ret)) {
9485                  if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9486                      return -TARGET_EFAULT;
9487                  }
9488                  if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9489                      return -TARGET_EFAULT;
9490                  }
9491              }
9492          }
9493          return ret;
9494  #endif
9495  #if defined(TARGET_NR_settimeofday)
9496      case TARGET_NR_settimeofday:
9497          {
9498              struct timeval tv, *ptv = NULL;
9499              struct timezone tz, *ptz = NULL;
9500  
9501              if (arg1) {
9502                  if (copy_from_user_timeval(&tv, arg1)) {
9503                      return -TARGET_EFAULT;
9504                  }
9505                  ptv = &tv;
9506              }
9507  
9508              if (arg2) {
9509                  if (copy_from_user_timezone(&tz, arg2)) {
9510                      return -TARGET_EFAULT;
9511                  }
9512                  ptz = &tz;
9513              }
9514  
9515              return get_errno(settimeofday(ptv, ptz));
9516          }
9517  #endif
9518  #if defined(TARGET_NR_select)
9519      case TARGET_NR_select:
9520  #if defined(TARGET_WANT_NI_OLD_SELECT)
9521          /* some architectures used to have old_select here
9522           * but now ENOSYS it.
9523           */
9524          ret = -TARGET_ENOSYS;
9525  #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9526          ret = do_old_select(arg1);
9527  #else
9528          ret = do_select(arg1, arg2, arg3, arg4, arg5);
9529  #endif
9530          return ret;
9531  #endif
9532  #ifdef TARGET_NR_pselect6
9533      case TARGET_NR_pselect6:
9534          return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9535  #endif
9536  #ifdef TARGET_NR_pselect6_time64
9537      case TARGET_NR_pselect6_time64:
9538          return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9539  #endif
9540  #ifdef TARGET_NR_symlink
9541      case TARGET_NR_symlink:
9542          {
9543              void *p2;
9544              p = lock_user_string(arg1);
9545              p2 = lock_user_string(arg2);
9546              if (!p || !p2)
9547                  ret = -TARGET_EFAULT;
9548              else
9549                  ret = get_errno(symlink(p, p2));
9550              unlock_user(p2, arg2, 0);
9551              unlock_user(p, arg1, 0);
9552          }
9553          return ret;
9554  #endif
9555  #if defined(TARGET_NR_symlinkat)
9556      case TARGET_NR_symlinkat:
9557          {
9558              void *p2;
9559              p  = lock_user_string(arg1);
9560              p2 = lock_user_string(arg3);
9561              if (!p || !p2)
9562                  ret = -TARGET_EFAULT;
9563              else
9564                  ret = get_errno(symlinkat(p, arg2, p2));
9565              unlock_user(p2, arg3, 0);
9566              unlock_user(p, arg1, 0);
9567          }
9568          return ret;
9569  #endif
9570  #ifdef TARGET_NR_readlink
9571      case TARGET_NR_readlink:
9572          {
9573              void *p2;
9574              p = lock_user_string(arg1);
9575              p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9576              if (!p || !p2) {
9577                  ret = -TARGET_EFAULT;
9578              } else if (!arg3) {
9579                  /* Short circuit this for the magic exe check. */
9580                  ret = -TARGET_EINVAL;
9581              } else if (is_proc_myself((const char *)p, "exe")) {
9582                  char real[PATH_MAX], *temp;
9583                  temp = realpath(exec_path, real);
9584                  /* Return value is # of bytes that we wrote to the buffer. */
9585                  if (temp == NULL) {
9586                      ret = get_errno(-1);
9587                  } else {
9588                      /* Don't worry about sign mismatch as earlier mapping
9589                       * logic would have thrown a bad address error. */
9590                      ret = MIN(strlen(real), arg3);
9591                      /* We cannot NUL terminate the string. */
9592                      memcpy(p2, real, ret);
9593                  }
9594              } else {
9595                  ret = get_errno(readlink(path(p), p2, arg3));
9596              }
9597              unlock_user(p2, arg2, ret);
9598              unlock_user(p, arg1, 0);
9599          }
9600          return ret;
9601  #endif
9602  #if defined(TARGET_NR_readlinkat)
9603      case TARGET_NR_readlinkat:
9604          {
9605              void *p2;
9606              p  = lock_user_string(arg2);
9607              p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9608              if (!p || !p2) {
9609                  ret = -TARGET_EFAULT;
9610              } else if (is_proc_myself((const char *)p, "exe")) {
9611                  char real[PATH_MAX], *temp;
9612                  temp = realpath(exec_path, real);
9613                  ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9614                  snprintf((char *)p2, arg4, "%s", real);
9615              } else {
9616                  ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9617              }
9618              unlock_user(p2, arg3, ret);
9619              unlock_user(p, arg2, 0);
9620          }
9621          return ret;
9622  #endif
9623  #ifdef TARGET_NR_swapon
9624      case TARGET_NR_swapon:
9625          if (!(p = lock_user_string(arg1)))
9626              return -TARGET_EFAULT;
9627          ret = get_errno(swapon(p, arg2));
9628          unlock_user(p, arg1, 0);
9629          return ret;
9630  #endif
9631      case TARGET_NR_reboot:
9632          if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9633             /* arg4 must be ignored in all other cases */
9634             p = lock_user_string(arg4);
9635             if (!p) {
9636                 return -TARGET_EFAULT;
9637             }
9638             ret = get_errno(reboot(arg1, arg2, arg3, p));
9639             unlock_user(p, arg4, 0);
9640          } else {
9641             ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9642          }
9643          return ret;
9644  #ifdef TARGET_NR_mmap
9645      case TARGET_NR_mmap:
9646  #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9647      (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9648      defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9649      || defined(TARGET_S390X)
9650          {
9651              abi_ulong *v;
9652              abi_ulong v1, v2, v3, v4, v5, v6;
9653              if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9654                  return -TARGET_EFAULT;
9655              v1 = tswapal(v[0]);
9656              v2 = tswapal(v[1]);
9657              v3 = tswapal(v[2]);
9658              v4 = tswapal(v[3]);
9659              v5 = tswapal(v[4]);
9660              v6 = tswapal(v[5]);
9661              unlock_user(v, arg1, 0);
9662              ret = get_errno(target_mmap(v1, v2, v3,
9663                                          target_to_host_bitmask(v4, mmap_flags_tbl),
9664                                          v5, v6));
9665          }
9666  #else
9667          /* mmap pointers are always untagged */
9668          ret = get_errno(target_mmap(arg1, arg2, arg3,
9669                                      target_to_host_bitmask(arg4, mmap_flags_tbl),
9670                                      arg5,
9671                                      arg6));
9672  #endif
9673          return ret;
9674  #endif
9675  #ifdef TARGET_NR_mmap2
9676      case TARGET_NR_mmap2:
9677  #ifndef MMAP_SHIFT
9678  #define MMAP_SHIFT 12
9679  #endif
9680          ret = target_mmap(arg1, arg2, arg3,
9681                            target_to_host_bitmask(arg4, mmap_flags_tbl),
9682                            arg5, arg6 << MMAP_SHIFT);
9683          return get_errno(ret);
9684  #endif
9685      case TARGET_NR_munmap:
9686          arg1 = cpu_untagged_addr(cpu, arg1);
9687          return get_errno(target_munmap(arg1, arg2));
9688      case TARGET_NR_mprotect:
9689          arg1 = cpu_untagged_addr(cpu, arg1);
9690          {
9691              TaskState *ts = cpu->opaque;
9692              /* Special hack to detect libc making the stack executable.  */
9693              if ((arg3 & PROT_GROWSDOWN)
9694                  && arg1 >= ts->info->stack_limit
9695                  && arg1 <= ts->info->start_stack) {
9696                  arg3 &= ~PROT_GROWSDOWN;
9697                  arg2 = arg2 + arg1 - ts->info->stack_limit;
9698                  arg1 = ts->info->stack_limit;
9699              }
9700          }
9701          return get_errno(target_mprotect(arg1, arg2, arg3));
9702  #ifdef TARGET_NR_mremap
9703      case TARGET_NR_mremap:
9704          arg1 = cpu_untagged_addr(cpu, arg1);
9705          /* mremap new_addr (arg5) is always untagged */
9706          return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9707  #endif
9708          /* ??? msync/mlock/munlock are broken for softmmu.  */
9709  #ifdef TARGET_NR_msync
9710      case TARGET_NR_msync:
9711          return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9712  #endif
9713  #ifdef TARGET_NR_mlock
9714      case TARGET_NR_mlock:
9715          return get_errno(mlock(g2h(cpu, arg1), arg2));
9716  #endif
9717  #ifdef TARGET_NR_munlock
9718      case TARGET_NR_munlock:
9719          return get_errno(munlock(g2h(cpu, arg1), arg2));
9720  #endif
9721  #ifdef TARGET_NR_mlockall
9722      case TARGET_NR_mlockall:
9723          return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9724  #endif
9725  #ifdef TARGET_NR_munlockall
9726      case TARGET_NR_munlockall:
9727          return get_errno(munlockall());
9728  #endif
9729  #ifdef TARGET_NR_truncate
9730      case TARGET_NR_truncate:
9731          if (!(p = lock_user_string(arg1)))
9732              return -TARGET_EFAULT;
9733          ret = get_errno(truncate(p, arg2));
9734          unlock_user(p, arg1, 0);
9735          return ret;
9736  #endif
9737  #ifdef TARGET_NR_ftruncate
9738      case TARGET_NR_ftruncate:
9739          return get_errno(ftruncate(arg1, arg2));
9740  #endif
9741      case TARGET_NR_fchmod:
9742          return get_errno(fchmod(arg1, arg2));
9743  #if defined(TARGET_NR_fchmodat)
9744      case TARGET_NR_fchmodat:
9745          if (!(p = lock_user_string(arg2)))
9746              return -TARGET_EFAULT;
9747          ret = get_errno(fchmodat(arg1, p, arg3, 0));
9748          unlock_user(p, arg2, 0);
9749          return ret;
9750  #endif
9751      case TARGET_NR_getpriority:
9752          /* Note that negative values are valid for getpriority, so we must
9753             differentiate based on errno settings.  */
9754          errno = 0;
9755          ret = getpriority(arg1, arg2);
9756          if (ret == -1 && errno != 0) {
9757              return -host_to_target_errno(errno);
9758          }
9759  #ifdef TARGET_ALPHA
9760          /* Return value is the unbiased priority.  Signal no error.  */
9761          ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9762  #else
9763          /* Return value is a biased priority to avoid negative numbers.  */
9764          ret = 20 - ret;
9765  #endif
9766          return ret;
9767      case TARGET_NR_setpriority:
9768          return get_errno(setpriority(arg1, arg2, arg3));
9769  #ifdef TARGET_NR_statfs
9770      case TARGET_NR_statfs:
9771          if (!(p = lock_user_string(arg1))) {
9772              return -TARGET_EFAULT;
9773          }
9774          ret = get_errno(statfs(path(p), &stfs));
9775          unlock_user(p, arg1, 0);
9776      convert_statfs:
9777          if (!is_error(ret)) {
9778              struct target_statfs *target_stfs;
9779  
9780              if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9781                  return -TARGET_EFAULT;
9782              __put_user(stfs.f_type, &target_stfs->f_type);
9783              __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9784              __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9785              __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9786              __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9787              __put_user(stfs.f_files, &target_stfs->f_files);
9788              __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9789              __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9790              __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9791              __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9792              __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9793  #ifdef _STATFS_F_FLAGS
9794              __put_user(stfs.f_flags, &target_stfs->f_flags);
9795  #else
9796              __put_user(0, &target_stfs->f_flags);
9797  #endif
9798              memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9799              unlock_user_struct(target_stfs, arg2, 1);
9800          }
9801          return ret;
9802  #endif
9803  #ifdef TARGET_NR_fstatfs
9804      case TARGET_NR_fstatfs:
9805          ret = get_errno(fstatfs(arg1, &stfs));
9806          goto convert_statfs;
9807  #endif
9808  #ifdef TARGET_NR_statfs64
9809      case TARGET_NR_statfs64:
9810          if (!(p = lock_user_string(arg1))) {
9811              return -TARGET_EFAULT;
9812          }
9813          ret = get_errno(statfs(path(p), &stfs));
9814          unlock_user(p, arg1, 0);
9815      convert_statfs64:
9816          if (!is_error(ret)) {
9817              struct target_statfs64 *target_stfs;
9818  
9819              if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9820                  return -TARGET_EFAULT;
9821              __put_user(stfs.f_type, &target_stfs->f_type);
9822              __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9823              __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9824              __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9825              __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9826              __put_user(stfs.f_files, &target_stfs->f_files);
9827              __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9828              __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9829              __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9830              __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9831              __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9832  #ifdef _STATFS_F_FLAGS
9833              __put_user(stfs.f_flags, &target_stfs->f_flags);
9834  #else
9835              __put_user(0, &target_stfs->f_flags);
9836  #endif
9837              memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9838              unlock_user_struct(target_stfs, arg3, 1);
9839          }
9840          return ret;
9841      case TARGET_NR_fstatfs64:
9842          ret = get_errno(fstatfs(arg1, &stfs));
9843          goto convert_statfs64;
9844  #endif
9845  #ifdef TARGET_NR_socketcall
9846      case TARGET_NR_socketcall:
9847          return do_socketcall(arg1, arg2);
9848  #endif
9849  #ifdef TARGET_NR_accept
9850      case TARGET_NR_accept:
9851          return do_accept4(arg1, arg2, arg3, 0);
9852  #endif
9853  #ifdef TARGET_NR_accept4
9854      case TARGET_NR_accept4:
9855          return do_accept4(arg1, arg2, arg3, arg4);
9856  #endif
9857  #ifdef TARGET_NR_bind
9858      case TARGET_NR_bind:
9859          return do_bind(arg1, arg2, arg3);
9860  #endif
9861  #ifdef TARGET_NR_connect
9862      case TARGET_NR_connect:
9863          return do_connect(arg1, arg2, arg3);
9864  #endif
9865  #ifdef TARGET_NR_getpeername
9866      case TARGET_NR_getpeername:
9867          return do_getpeername(arg1, arg2, arg3);
9868  #endif
9869  #ifdef TARGET_NR_getsockname
9870      case TARGET_NR_getsockname:
9871          return do_getsockname(arg1, arg2, arg3);
9872  #endif
9873  #ifdef TARGET_NR_getsockopt
9874      case TARGET_NR_getsockopt:
9875          return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9876  #endif
9877  #ifdef TARGET_NR_listen
9878      case TARGET_NR_listen:
9879          return get_errno(listen(arg1, arg2));
9880  #endif
9881  #ifdef TARGET_NR_recv
9882      case TARGET_NR_recv:
9883          return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9884  #endif
9885  #ifdef TARGET_NR_recvfrom
9886      case TARGET_NR_recvfrom:
9887          return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9888  #endif
9889  #ifdef TARGET_NR_recvmsg
9890      case TARGET_NR_recvmsg:
9891          return do_sendrecvmsg(arg1, arg2, arg3, 0);
9892  #endif
9893  #ifdef TARGET_NR_send
9894      case TARGET_NR_send:
9895          return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9896  #endif
9897  #ifdef TARGET_NR_sendmsg
9898      case TARGET_NR_sendmsg:
9899          return do_sendrecvmsg(arg1, arg2, arg3, 1);
9900  #endif
9901  #ifdef TARGET_NR_sendmmsg
9902      case TARGET_NR_sendmmsg:
9903          return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9904  #endif
9905  #ifdef TARGET_NR_recvmmsg
9906      case TARGET_NR_recvmmsg:
9907          return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9908  #endif
9909  #ifdef TARGET_NR_sendto
9910      case TARGET_NR_sendto:
9911          return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9912  #endif
9913  #ifdef TARGET_NR_shutdown
9914      case TARGET_NR_shutdown:
9915          return get_errno(shutdown(arg1, arg2));
9916  #endif
9917  #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9918      case TARGET_NR_getrandom:
9919          p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9920          if (!p) {
9921              return -TARGET_EFAULT;
9922          }
9923          ret = get_errno(getrandom(p, arg2, arg3));
9924          unlock_user(p, arg1, ret);
9925          return ret;
9926  #endif
9927  #ifdef TARGET_NR_socket
9928      case TARGET_NR_socket:
9929          return do_socket(arg1, arg2, arg3);
9930  #endif
9931  #ifdef TARGET_NR_socketpair
9932      case TARGET_NR_socketpair:
9933          return do_socketpair(arg1, arg2, arg3, arg4);
9934  #endif
9935  #ifdef TARGET_NR_setsockopt
9936      case TARGET_NR_setsockopt:
9937          return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9938  #endif
9939  #if defined(TARGET_NR_syslog)
9940      case TARGET_NR_syslog:
9941          {
9942              int len = arg2;
9943  
9944              switch (arg1) {
9945              case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9946              case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9947              case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9948              case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9949              case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9950              case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9951              case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9952              case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9953                  return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9954              case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9955              case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9956              case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9957                  {
9958                      if (len < 0) {
9959                          return -TARGET_EINVAL;
9960                      }
9961                      if (len == 0) {
9962                          return 0;
9963                      }
9964                      p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9965                      if (!p) {
9966                          return -TARGET_EFAULT;
9967                      }
9968                      ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9969                      unlock_user(p, arg2, arg3);
9970                  }
9971                  return ret;
9972              default:
9973                  return -TARGET_EINVAL;
9974              }
9975          }
9976          break;
9977  #endif
9978      case TARGET_NR_setitimer:
9979          {
9980              struct itimerval value, ovalue, *pvalue;
9981  
9982              if (arg2) {
9983                  pvalue = &value;
9984                  if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9985                      || copy_from_user_timeval(&pvalue->it_value,
9986                                                arg2 + sizeof(struct target_timeval)))
9987                      return -TARGET_EFAULT;
9988              } else {
9989                  pvalue = NULL;
9990              }
9991              ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9992              if (!is_error(ret) && arg3) {
9993                  if (copy_to_user_timeval(arg3,
9994                                           &ovalue.it_interval)
9995                      || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9996                                              &ovalue.it_value))
9997                      return -TARGET_EFAULT;
9998              }
9999          }
10000          return ret;
10001      case TARGET_NR_getitimer:
10002          {
10003              struct itimerval value;
10004  
10005              ret = get_errno(getitimer(arg1, &value));
10006              if (!is_error(ret) && arg2) {
10007                  if (copy_to_user_timeval(arg2,
10008                                           &value.it_interval)
10009                      || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10010                                              &value.it_value))
10011                      return -TARGET_EFAULT;
10012              }
10013          }
10014          return ret;
10015  #ifdef TARGET_NR_stat
10016      case TARGET_NR_stat:
10017          if (!(p = lock_user_string(arg1))) {
10018              return -TARGET_EFAULT;
10019          }
10020          ret = get_errno(stat(path(p), &st));
10021          unlock_user(p, arg1, 0);
10022          goto do_stat;
10023  #endif
10024  #ifdef TARGET_NR_lstat
10025      case TARGET_NR_lstat:
10026          if (!(p = lock_user_string(arg1))) {
10027              return -TARGET_EFAULT;
10028          }
10029          ret = get_errno(lstat(path(p), &st));
10030          unlock_user(p, arg1, 0);
10031          goto do_stat;
10032  #endif
10033  #ifdef TARGET_NR_fstat
10034      case TARGET_NR_fstat:
10035          {
10036              ret = get_errno(fstat(arg1, &st));
10037  #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10038          do_stat:
10039  #endif
10040              if (!is_error(ret)) {
10041                  struct target_stat *target_st;
10042  
10043                  if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10044                      return -TARGET_EFAULT;
10045                  memset(target_st, 0, sizeof(*target_st));
10046                  __put_user(st.st_dev, &target_st->st_dev);
10047                  __put_user(st.st_ino, &target_st->st_ino);
10048                  __put_user(st.st_mode, &target_st->st_mode);
10049                  __put_user(st.st_uid, &target_st->st_uid);
10050                  __put_user(st.st_gid, &target_st->st_gid);
10051                  __put_user(st.st_nlink, &target_st->st_nlink);
10052                  __put_user(st.st_rdev, &target_st->st_rdev);
10053                  __put_user(st.st_size, &target_st->st_size);
10054                  __put_user(st.st_blksize, &target_st->st_blksize);
10055                  __put_user(st.st_blocks, &target_st->st_blocks);
10056                  __put_user(st.st_atime, &target_st->target_st_atime);
10057                  __put_user(st.st_mtime, &target_st->target_st_mtime);
10058                  __put_user(st.st_ctime, &target_st->target_st_ctime);
10059  #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10060      defined(TARGET_STAT_HAVE_NSEC)
10061                  __put_user(st.st_atim.tv_nsec,
10062                             &target_st->target_st_atime_nsec);
10063                  __put_user(st.st_mtim.tv_nsec,
10064                             &target_st->target_st_mtime_nsec);
10065                  __put_user(st.st_ctim.tv_nsec,
10066                             &target_st->target_st_ctime_nsec);
10067  #endif
10068                  unlock_user_struct(target_st, arg2, 1);
10069              }
10070          }
10071          return ret;
10072  #endif
10073      case TARGET_NR_vhangup:
10074          return get_errno(vhangup());
10075  #ifdef TARGET_NR_syscall
10076      case TARGET_NR_syscall:
10077          return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10078                            arg6, arg7, arg8, 0);
10079  #endif
10080  #if defined(TARGET_NR_wait4)
10081      case TARGET_NR_wait4:
10082          {
10083              int status;
10084              abi_long status_ptr = arg2;
10085              struct rusage rusage, *rusage_ptr;
10086              abi_ulong target_rusage = arg4;
10087              abi_long rusage_err;
10088              if (target_rusage)
10089                  rusage_ptr = &rusage;
10090              else
10091                  rusage_ptr = NULL;
10092              ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10093              if (!is_error(ret)) {
10094                  if (status_ptr && ret) {
10095                      status = host_to_target_waitstatus(status);
10096                      if (put_user_s32(status, status_ptr))
10097                          return -TARGET_EFAULT;
10098                  }
10099                  if (target_rusage) {
10100                      rusage_err = host_to_target_rusage(target_rusage, &rusage);
10101                      if (rusage_err) {
10102                          ret = rusage_err;
10103                      }
10104                  }
10105              }
10106          }
10107          return ret;
10108  #endif
10109  #ifdef TARGET_NR_swapoff
10110      case TARGET_NR_swapoff:
10111          if (!(p = lock_user_string(arg1)))
10112              return -TARGET_EFAULT;
10113          ret = get_errno(swapoff(p));
10114          unlock_user(p, arg1, 0);
10115          return ret;
10116  #endif
10117      case TARGET_NR_sysinfo:
10118          {
10119              struct target_sysinfo *target_value;
10120              struct sysinfo value;
10121              ret = get_errno(sysinfo(&value));
10122              if (!is_error(ret) && arg1)
10123              {
10124                  if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10125                      return -TARGET_EFAULT;
10126                  __put_user(value.uptime, &target_value->uptime);
10127                  __put_user(value.loads[0], &target_value->loads[0]);
10128                  __put_user(value.loads[1], &target_value->loads[1]);
10129                  __put_user(value.loads[2], &target_value->loads[2]);
10130                  __put_user(value.totalram, &target_value->totalram);
10131                  __put_user(value.freeram, &target_value->freeram);
10132                  __put_user(value.sharedram, &target_value->sharedram);
10133                  __put_user(value.bufferram, &target_value->bufferram);
10134                  __put_user(value.totalswap, &target_value->totalswap);
10135                  __put_user(value.freeswap, &target_value->freeswap);
10136                  __put_user(value.procs, &target_value->procs);
10137                  __put_user(value.totalhigh, &target_value->totalhigh);
10138                  __put_user(value.freehigh, &target_value->freehigh);
10139                  __put_user(value.mem_unit, &target_value->mem_unit);
10140                  unlock_user_struct(target_value, arg1, 1);
10141              }
10142          }
10143          return ret;
10144  #ifdef TARGET_NR_ipc
10145      case TARGET_NR_ipc:
10146          return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10147  #endif
10148  #ifdef TARGET_NR_semget
10149      case TARGET_NR_semget:
10150          return get_errno(semget(arg1, arg2, arg3));
10151  #endif
10152  #ifdef TARGET_NR_semop
10153      case TARGET_NR_semop:
10154          return do_semtimedop(arg1, arg2, arg3, 0, false);
10155  #endif
10156  #ifdef TARGET_NR_semtimedop
10157      case TARGET_NR_semtimedop:
10158          return do_semtimedop(arg1, arg2, arg3, arg4, false);
10159  #endif
10160  #ifdef TARGET_NR_semtimedop_time64
10161      case TARGET_NR_semtimedop_time64:
10162          return do_semtimedop(arg1, arg2, arg3, arg4, true);
10163  #endif
10164  #ifdef TARGET_NR_semctl
10165      case TARGET_NR_semctl:
10166          return do_semctl(arg1, arg2, arg3, arg4);
10167  #endif
10168  #ifdef TARGET_NR_msgctl
10169      case TARGET_NR_msgctl:
10170          return do_msgctl(arg1, arg2, arg3);
10171  #endif
10172  #ifdef TARGET_NR_msgget
10173      case TARGET_NR_msgget:
10174          return get_errno(msgget(arg1, arg2));
10175  #endif
10176  #ifdef TARGET_NR_msgrcv
10177      case TARGET_NR_msgrcv:
10178          return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10179  #endif
10180  #ifdef TARGET_NR_msgsnd
10181      case TARGET_NR_msgsnd:
10182          return do_msgsnd(arg1, arg2, arg3, arg4);
10183  #endif
10184  #ifdef TARGET_NR_shmget
10185      case TARGET_NR_shmget:
10186          return get_errno(shmget(arg1, arg2, arg3));
10187  #endif
10188  #ifdef TARGET_NR_shmctl
10189      case TARGET_NR_shmctl:
10190          return do_shmctl(arg1, arg2, arg3);
10191  #endif
10192  #ifdef TARGET_NR_shmat
10193      case TARGET_NR_shmat:
10194          return do_shmat(cpu_env, arg1, arg2, arg3);
10195  #endif
10196  #ifdef TARGET_NR_shmdt
10197      case TARGET_NR_shmdt:
10198          return do_shmdt(arg1);
10199  #endif
10200      case TARGET_NR_fsync:
10201          return get_errno(fsync(arg1));
10202      case TARGET_NR_clone:
10203          /* Linux manages to have three different orderings for its
10204           * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10205           * match the kernel's CONFIG_CLONE_* settings.
10206           * Microblaze is further special in that it uses a sixth
10207           * implicit argument to clone for the TLS pointer.
10208           */
10209  #if defined(TARGET_MICROBLAZE)
10210          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10211  #elif defined(TARGET_CLONE_BACKWARDS)
10212          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10213  #elif defined(TARGET_CLONE_BACKWARDS2)
10214          ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10215  #else
10216          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10217  #endif
10218          return ret;
10219  #ifdef __NR_exit_group
10220          /* new thread calls */
10221      case TARGET_NR_exit_group:
10222          preexit_cleanup(cpu_env, arg1);
10223          return get_errno(exit_group(arg1));
10224  #endif
10225      case TARGET_NR_setdomainname:
10226          if (!(p = lock_user_string(arg1)))
10227              return -TARGET_EFAULT;
10228          ret = get_errno(setdomainname(p, arg2));
10229          unlock_user(p, arg1, 0);
10230          return ret;
10231      case TARGET_NR_uname:
10232          /* no need to transcode because we use the linux syscall */
10233          {
10234              struct new_utsname * buf;
10235  
10236              if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10237                  return -TARGET_EFAULT;
10238              ret = get_errno(sys_uname(buf));
10239              if (!is_error(ret)) {
10240                  /* Overwrite the native machine name with whatever is being
10241                     emulated. */
10242                  g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10243                            sizeof(buf->machine));
10244                  /* Allow the user to override the reported release.  */
10245                  if (qemu_uname_release && *qemu_uname_release) {
10246                      g_strlcpy(buf->release, qemu_uname_release,
10247                                sizeof(buf->release));
10248                  }
10249              }
10250              unlock_user_struct(buf, arg1, 1);
10251          }
10252          return ret;
10253  #ifdef TARGET_I386
10254      case TARGET_NR_modify_ldt:
10255          return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10256  #if !defined(TARGET_X86_64)
10257      case TARGET_NR_vm86:
10258          return do_vm86(cpu_env, arg1, arg2);
10259  #endif
10260  #endif
10261  #if defined(TARGET_NR_adjtimex)
10262      case TARGET_NR_adjtimex:
10263          {
10264              struct timex host_buf;
10265  
10266              if (target_to_host_timex(&host_buf, arg1) != 0) {
10267                  return -TARGET_EFAULT;
10268              }
10269              ret = get_errno(adjtimex(&host_buf));
10270              if (!is_error(ret)) {
10271                  if (host_to_target_timex(arg1, &host_buf) != 0) {
10272                      return -TARGET_EFAULT;
10273                  }
10274              }
10275          }
10276          return ret;
10277  #endif
10278  #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10279      case TARGET_NR_clock_adjtime:
10280          {
10281              struct timex htx, *phtx = &htx;
10282  
10283              if (target_to_host_timex(phtx, arg2) != 0) {
10284                  return -TARGET_EFAULT;
10285              }
10286              ret = get_errno(clock_adjtime(arg1, phtx));
10287              if (!is_error(ret) && phtx) {
10288                  if (host_to_target_timex(arg2, phtx) != 0) {
10289                      return -TARGET_EFAULT;
10290                  }
10291              }
10292          }
10293          return ret;
10294  #endif
10295  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10296      case TARGET_NR_clock_adjtime64:
10297          {
10298              struct timex htx;
10299  
10300              if (target_to_host_timex64(&htx, arg2) != 0) {
10301                  return -TARGET_EFAULT;
10302              }
10303              ret = get_errno(clock_adjtime(arg1, &htx));
10304              if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10305                      return -TARGET_EFAULT;
10306              }
10307          }
10308          return ret;
10309  #endif
10310      case TARGET_NR_getpgid:
10311          return get_errno(getpgid(arg1));
10312      case TARGET_NR_fchdir:
10313          return get_errno(fchdir(arg1));
10314      case TARGET_NR_personality:
10315          return get_errno(personality(arg1));
10316  #ifdef TARGET_NR__llseek /* Not on alpha */
10317      case TARGET_NR__llseek:
10318          {
10319              int64_t res;
10320  #if !defined(__NR_llseek)
10321              res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10322              if (res == -1) {
10323                  ret = get_errno(res);
10324              } else {
10325                  ret = 0;
10326              }
10327  #else
10328              ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10329  #endif
10330              if ((ret == 0) && put_user_s64(res, arg4)) {
10331                  return -TARGET_EFAULT;
10332              }
10333          }
10334          return ret;
10335  #endif
10336  #ifdef TARGET_NR_getdents
10337      case TARGET_NR_getdents:
10338  #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10339  #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10340          {
10341              struct target_dirent *target_dirp;
10342              struct linux_dirent *dirp;
10343              abi_long count = arg3;
10344  
10345              dirp = g_try_malloc(count);
10346              if (!dirp) {
10347                  return -TARGET_ENOMEM;
10348              }
10349  
10350              ret = get_errno(sys_getdents(arg1, dirp, count));
10351              if (!is_error(ret)) {
10352                  struct linux_dirent *de;
10353  		struct target_dirent *tde;
10354                  int len = ret;
10355                  int reclen, treclen;
10356  		int count1, tnamelen;
10357  
10358  		count1 = 0;
10359                  de = dirp;
10360                  if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10361                      return -TARGET_EFAULT;
10362  		tde = target_dirp;
10363                  while (len > 0) {
10364                      reclen = de->d_reclen;
10365                      tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10366                      assert(tnamelen >= 0);
10367                      treclen = tnamelen + offsetof(struct target_dirent, d_name);
10368                      assert(count1 + treclen <= count);
10369                      tde->d_reclen = tswap16(treclen);
10370                      tde->d_ino = tswapal(de->d_ino);
10371                      tde->d_off = tswapal(de->d_off);
10372                      memcpy(tde->d_name, de->d_name, tnamelen);
10373                      de = (struct linux_dirent *)((char *)de + reclen);
10374                      len -= reclen;
10375                      tde = (struct target_dirent *)((char *)tde + treclen);
10376  		    count1 += treclen;
10377                  }
10378  		ret = count1;
10379                  unlock_user(target_dirp, arg2, ret);
10380              }
10381              g_free(dirp);
10382          }
10383  #else
10384          {
10385              struct linux_dirent *dirp;
10386              abi_long count = arg3;
10387  
10388              if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10389                  return -TARGET_EFAULT;
10390              ret = get_errno(sys_getdents(arg1, dirp, count));
10391              if (!is_error(ret)) {
10392                  struct linux_dirent *de;
10393                  int len = ret;
10394                  int reclen;
10395                  de = dirp;
10396                  while (len > 0) {
10397                      reclen = de->d_reclen;
10398                      if (reclen > len)
10399                          break;
10400                      de->d_reclen = tswap16(reclen);
10401                      tswapls(&de->d_ino);
10402                      tswapls(&de->d_off);
10403                      de = (struct linux_dirent *)((char *)de + reclen);
10404                      len -= reclen;
10405                  }
10406              }
10407              unlock_user(dirp, arg2, ret);
10408          }
10409  #endif
10410  #else
10411          /* Implement getdents in terms of getdents64 */
10412          {
10413              struct linux_dirent64 *dirp;
10414              abi_long count = arg3;
10415  
10416              dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10417              if (!dirp) {
10418                  return -TARGET_EFAULT;
10419              }
10420              ret = get_errno(sys_getdents64(arg1, dirp, count));
10421              if (!is_error(ret)) {
10422                  /* Convert the dirent64 structs to target dirent.  We do this
10423                   * in-place, since we can guarantee that a target_dirent is no
10424                   * larger than a dirent64; however this means we have to be
10425                   * careful to read everything before writing in the new format.
10426                   */
10427                  struct linux_dirent64 *de;
10428                  struct target_dirent *tde;
10429                  int len = ret;
10430                  int tlen = 0;
10431  
10432                  de = dirp;
10433                  tde = (struct target_dirent *)dirp;
10434                  while (len > 0) {
10435                      int namelen, treclen;
10436                      int reclen = de->d_reclen;
10437                      uint64_t ino = de->d_ino;
10438                      int64_t off = de->d_off;
10439                      uint8_t type = de->d_type;
10440  
10441                      namelen = strlen(de->d_name);
10442                      treclen = offsetof(struct target_dirent, d_name)
10443                          + namelen + 2;
10444                      treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10445  
10446                      memmove(tde->d_name, de->d_name, namelen + 1);
10447                      tde->d_ino = tswapal(ino);
10448                      tde->d_off = tswapal(off);
10449                      tde->d_reclen = tswap16(treclen);
10450                      /* The target_dirent type is in what was formerly a padding
10451                       * byte at the end of the structure:
10452                       */
10453                      *(((char *)tde) + treclen - 1) = type;
10454  
10455                      de = (struct linux_dirent64 *)((char *)de + reclen);
10456                      tde = (struct target_dirent *)((char *)tde + treclen);
10457                      len -= reclen;
10458                      tlen += treclen;
10459                  }
10460                  ret = tlen;
10461              }
10462              unlock_user(dirp, arg2, ret);
10463          }
10464  #endif
10465          return ret;
10466  #endif /* TARGET_NR_getdents */
10467  #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10468      case TARGET_NR_getdents64:
10469          {
10470              struct linux_dirent64 *dirp;
10471              abi_long count = arg3;
10472              if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10473                  return -TARGET_EFAULT;
10474              ret = get_errno(sys_getdents64(arg1, dirp, count));
10475              if (!is_error(ret)) {
10476                  struct linux_dirent64 *de;
10477                  int len = ret;
10478                  int reclen;
10479                  de = dirp;
10480                  while (len > 0) {
10481                      reclen = de->d_reclen;
10482                      if (reclen > len)
10483                          break;
10484                      de->d_reclen = tswap16(reclen);
10485                      tswap64s((uint64_t *)&de->d_ino);
10486                      tswap64s((uint64_t *)&de->d_off);
10487                      de = (struct linux_dirent64 *)((char *)de + reclen);
10488                      len -= reclen;
10489                  }
10490              }
10491              unlock_user(dirp, arg2, ret);
10492          }
10493          return ret;
10494  #endif /* TARGET_NR_getdents64 */
10495  #if defined(TARGET_NR__newselect)
10496      case TARGET_NR__newselect:
10497          return do_select(arg1, arg2, arg3, arg4, arg5);
10498  #endif
10499  #ifdef TARGET_NR_poll
10500      case TARGET_NR_poll:
10501          return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10502  #endif
10503  #ifdef TARGET_NR_ppoll
10504      case TARGET_NR_ppoll:
10505          return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10506  #endif
10507  #ifdef TARGET_NR_ppoll_time64
10508      case TARGET_NR_ppoll_time64:
10509          return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10510  #endif
10511      case TARGET_NR_flock:
10512          /* NOTE: the flock constant seems to be the same for every
10513             Linux platform */
10514          return get_errno(safe_flock(arg1, arg2));
10515      case TARGET_NR_readv:
10516          {
10517              struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10518              if (vec != NULL) {
10519                  ret = get_errno(safe_readv(arg1, vec, arg3));
10520                  unlock_iovec(vec, arg2, arg3, 1);
10521              } else {
10522                  ret = -host_to_target_errno(errno);
10523              }
10524          }
10525          return ret;
10526      case TARGET_NR_writev:
10527          {
10528              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10529              if (vec != NULL) {
10530                  ret = get_errno(safe_writev(arg1, vec, arg3));
10531                  unlock_iovec(vec, arg2, arg3, 0);
10532              } else {
10533                  ret = -host_to_target_errno(errno);
10534              }
10535          }
10536          return ret;
10537  #if defined(TARGET_NR_preadv)
10538      case TARGET_NR_preadv:
10539          {
10540              struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10541              if (vec != NULL) {
10542                  unsigned long low, high;
10543  
10544                  target_to_host_low_high(arg4, arg5, &low, &high);
10545                  ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10546                  unlock_iovec(vec, arg2, arg3, 1);
10547              } else {
10548                  ret = -host_to_target_errno(errno);
10549             }
10550          }
10551          return ret;
10552  #endif
10553  #if defined(TARGET_NR_pwritev)
10554      case TARGET_NR_pwritev:
10555          {
10556              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10557              if (vec != NULL) {
10558                  unsigned long low, high;
10559  
10560                  target_to_host_low_high(arg4, arg5, &low, &high);
10561                  ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10562                  unlock_iovec(vec, arg2, arg3, 0);
10563              } else {
10564                  ret = -host_to_target_errno(errno);
10565             }
10566          }
10567          return ret;
10568  #endif
10569      case TARGET_NR_getsid:
10570          return get_errno(getsid(arg1));
10571  #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10572      case TARGET_NR_fdatasync:
10573          return get_errno(fdatasync(arg1));
10574  #endif
10575      case TARGET_NR_sched_getaffinity:
10576          {
10577              unsigned int mask_size;
10578              unsigned long *mask;
10579  
10580              /*
10581               * sched_getaffinity needs multiples of ulong, so need to take
10582               * care of mismatches between target ulong and host ulong sizes.
10583               */
10584              if (arg2 & (sizeof(abi_ulong) - 1)) {
10585                  return -TARGET_EINVAL;
10586              }
10587              mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10588  
10589              mask = alloca(mask_size);
10590              memset(mask, 0, mask_size);
10591              ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10592  
10593              if (!is_error(ret)) {
10594                  if (ret > arg2) {
10595                      /* More data returned than the caller's buffer will fit.
10596                       * This only happens if sizeof(abi_long) < sizeof(long)
10597                       * and the caller passed us a buffer holding an odd number
10598                       * of abi_longs. If the host kernel is actually using the
10599                       * extra 4 bytes then fail EINVAL; otherwise we can just
10600                       * ignore them and only copy the interesting part.
10601                       */
10602                      int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10603                      if (numcpus > arg2 * 8) {
10604                          return -TARGET_EINVAL;
10605                      }
10606                      ret = arg2;
10607                  }
10608  
10609                  if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10610                      return -TARGET_EFAULT;
10611                  }
10612              }
10613          }
10614          return ret;
10615      case TARGET_NR_sched_setaffinity:
10616          {
10617              unsigned int mask_size;
10618              unsigned long *mask;
10619  
10620              /*
10621               * sched_setaffinity needs multiples of ulong, so need to take
10622               * care of mismatches between target ulong and host ulong sizes.
10623               */
10624              if (arg2 & (sizeof(abi_ulong) - 1)) {
10625                  return -TARGET_EINVAL;
10626              }
10627              mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10628              mask = alloca(mask_size);
10629  
10630              ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10631              if (ret) {
10632                  return ret;
10633              }
10634  
10635              return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10636          }
10637      case TARGET_NR_getcpu:
10638          {
10639              unsigned cpu, node;
10640              ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10641                                         arg2 ? &node : NULL,
10642                                         NULL));
10643              if (is_error(ret)) {
10644                  return ret;
10645              }
10646              if (arg1 && put_user_u32(cpu, arg1)) {
10647                  return -TARGET_EFAULT;
10648              }
10649              if (arg2 && put_user_u32(node, arg2)) {
10650                  return -TARGET_EFAULT;
10651              }
10652          }
10653          return ret;
10654      case TARGET_NR_sched_setparam:
10655          {
10656              struct sched_param *target_schp;
10657              struct sched_param schp;
10658  
10659              if (arg2 == 0) {
10660                  return -TARGET_EINVAL;
10661              }
10662              if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10663                  return -TARGET_EFAULT;
10664              schp.sched_priority = tswap32(target_schp->sched_priority);
10665              unlock_user_struct(target_schp, arg2, 0);
10666              return get_errno(sched_setparam(arg1, &schp));
10667          }
10668      case TARGET_NR_sched_getparam:
10669          {
10670              struct sched_param *target_schp;
10671              struct sched_param schp;
10672  
10673              if (arg2 == 0) {
10674                  return -TARGET_EINVAL;
10675              }
10676              ret = get_errno(sched_getparam(arg1, &schp));
10677              if (!is_error(ret)) {
10678                  if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10679                      return -TARGET_EFAULT;
10680                  target_schp->sched_priority = tswap32(schp.sched_priority);
10681                  unlock_user_struct(target_schp, arg2, 1);
10682              }
10683          }
10684          return ret;
10685      case TARGET_NR_sched_setscheduler:
10686          {
10687              struct sched_param *target_schp;
10688              struct sched_param schp;
10689              if (arg3 == 0) {
10690                  return -TARGET_EINVAL;
10691              }
10692              if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10693                  return -TARGET_EFAULT;
10694              schp.sched_priority = tswap32(target_schp->sched_priority);
10695              unlock_user_struct(target_schp, arg3, 0);
10696              return get_errno(sched_setscheduler(arg1, arg2, &schp));
10697          }
10698      case TARGET_NR_sched_getscheduler:
10699          return get_errno(sched_getscheduler(arg1));
10700      case TARGET_NR_sched_yield:
10701          return get_errno(sched_yield());
10702      case TARGET_NR_sched_get_priority_max:
10703          return get_errno(sched_get_priority_max(arg1));
10704      case TARGET_NR_sched_get_priority_min:
10705          return get_errno(sched_get_priority_min(arg1));
10706  #ifdef TARGET_NR_sched_rr_get_interval
10707      case TARGET_NR_sched_rr_get_interval:
10708          {
10709              struct timespec ts;
10710              ret = get_errno(sched_rr_get_interval(arg1, &ts));
10711              if (!is_error(ret)) {
10712                  ret = host_to_target_timespec(arg2, &ts);
10713              }
10714          }
10715          return ret;
10716  #endif
10717  #ifdef TARGET_NR_sched_rr_get_interval_time64
10718      case TARGET_NR_sched_rr_get_interval_time64:
10719          {
10720              struct timespec ts;
10721              ret = get_errno(sched_rr_get_interval(arg1, &ts));
10722              if (!is_error(ret)) {
10723                  ret = host_to_target_timespec64(arg2, &ts);
10724              }
10725          }
10726          return ret;
10727  #endif
10728  #if defined(TARGET_NR_nanosleep)
10729      case TARGET_NR_nanosleep:
10730          {
10731              struct timespec req, rem;
10732              target_to_host_timespec(&req, arg1);
10733              ret = get_errno(safe_nanosleep(&req, &rem));
10734              if (is_error(ret) && arg2) {
10735                  host_to_target_timespec(arg2, &rem);
10736              }
10737          }
10738          return ret;
10739  #endif
10740      case TARGET_NR_prctl:
10741          switch (arg1) {
10742          case PR_GET_PDEATHSIG:
10743          {
10744              int deathsig;
10745              ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10746              if (!is_error(ret) && arg2
10747                  && put_user_s32(deathsig, arg2)) {
10748                  return -TARGET_EFAULT;
10749              }
10750              return ret;
10751          }
10752  #ifdef PR_GET_NAME
10753          case PR_GET_NAME:
10754          {
10755              void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10756              if (!name) {
10757                  return -TARGET_EFAULT;
10758              }
10759              ret = get_errno(prctl(arg1, (unsigned long)name,
10760                                    arg3, arg4, arg5));
10761              unlock_user(name, arg2, 16);
10762              return ret;
10763          }
10764          case PR_SET_NAME:
10765          {
10766              void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10767              if (!name) {
10768                  return -TARGET_EFAULT;
10769              }
10770              ret = get_errno(prctl(arg1, (unsigned long)name,
10771                                    arg3, arg4, arg5));
10772              unlock_user(name, arg2, 0);
10773              return ret;
10774          }
10775  #endif
10776  #ifdef TARGET_MIPS
10777          case TARGET_PR_GET_FP_MODE:
10778          {
10779              CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10780              ret = 0;
10781              if (env->CP0_Status & (1 << CP0St_FR)) {
10782                  ret |= TARGET_PR_FP_MODE_FR;
10783              }
10784              if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10785                  ret |= TARGET_PR_FP_MODE_FRE;
10786              }
10787              return ret;
10788          }
10789          case TARGET_PR_SET_FP_MODE:
10790          {
10791              CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10792              bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10793              bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10794              bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10795              bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10796  
10797              const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10798                                              TARGET_PR_FP_MODE_FRE;
10799  
10800              /* If nothing to change, return right away, successfully.  */
10801              if (old_fr == new_fr && old_fre == new_fre) {
10802                  return 0;
10803              }
10804              /* Check the value is valid */
10805              if (arg2 & ~known_bits) {
10806                  return -TARGET_EOPNOTSUPP;
10807              }
10808              /* Setting FRE without FR is not supported.  */
10809              if (new_fre && !new_fr) {
10810                  return -TARGET_EOPNOTSUPP;
10811              }
10812              if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10813                  /* FR1 is not supported */
10814                  return -TARGET_EOPNOTSUPP;
10815              }
10816              if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10817                  && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10818                  /* cannot set FR=0 */
10819                  return -TARGET_EOPNOTSUPP;
10820              }
10821              if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10822                  /* Cannot set FRE=1 */
10823                  return -TARGET_EOPNOTSUPP;
10824              }
10825  
10826              int i;
10827              fpr_t *fpr = env->active_fpu.fpr;
10828              for (i = 0; i < 32 ; i += 2) {
10829                  if (!old_fr && new_fr) {
10830                      fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10831                  } else if (old_fr && !new_fr) {
10832                      fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10833                  }
10834              }
10835  
10836              if (new_fr) {
10837                  env->CP0_Status |= (1 << CP0St_FR);
10838                  env->hflags |= MIPS_HFLAG_F64;
10839              } else {
10840                  env->CP0_Status &= ~(1 << CP0St_FR);
10841                  env->hflags &= ~MIPS_HFLAG_F64;
10842              }
10843              if (new_fre) {
10844                  env->CP0_Config5 |= (1 << CP0C5_FRE);
10845                  if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10846                      env->hflags |= MIPS_HFLAG_FRE;
10847                  }
10848              } else {
10849                  env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10850                  env->hflags &= ~MIPS_HFLAG_FRE;
10851              }
10852  
10853              return 0;
10854          }
10855  #endif /* MIPS */
10856  #ifdef TARGET_AARCH64
10857          case TARGET_PR_SVE_SET_VL:
10858              /*
10859               * We cannot support either PR_SVE_SET_VL_ONEXEC or
10860               * PR_SVE_VL_INHERIT.  Note the kernel definition
10861               * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10862               * even though the current architectural maximum is VQ=16.
10863               */
10864              ret = -TARGET_EINVAL;
10865              if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10866                  && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10867                  CPUARMState *env = cpu_env;
10868                  ARMCPU *cpu = env_archcpu(env);
10869                  uint32_t vq, old_vq;
10870  
10871                  old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10872                  vq = MAX(arg2 / 16, 1);
10873                  vq = MIN(vq, cpu->sve_max_vq);
10874  
10875                  if (vq < old_vq) {
10876                      aarch64_sve_narrow_vq(env, vq);
10877                  }
10878                  env->vfp.zcr_el[1] = vq - 1;
10879                  arm_rebuild_hflags(env);
10880                  ret = vq * 16;
10881              }
10882              return ret;
10883          case TARGET_PR_SVE_GET_VL:
10884              ret = -TARGET_EINVAL;
10885              {
10886                  ARMCPU *cpu = env_archcpu(cpu_env);
10887                  if (cpu_isar_feature(aa64_sve, cpu)) {
10888                      ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10889                  }
10890              }
10891              return ret;
10892          case TARGET_PR_PAC_RESET_KEYS:
10893              {
10894                  CPUARMState *env = cpu_env;
10895                  ARMCPU *cpu = env_archcpu(env);
10896  
10897                  if (arg3 || arg4 || arg5) {
10898                      return -TARGET_EINVAL;
10899                  }
10900                  if (cpu_isar_feature(aa64_pauth, cpu)) {
10901                      int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10902                                 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10903                                 TARGET_PR_PAC_APGAKEY);
10904                      int ret = 0;
10905                      Error *err = NULL;
10906  
10907                      if (arg2 == 0) {
10908                          arg2 = all;
10909                      } else if (arg2 & ~all) {
10910                          return -TARGET_EINVAL;
10911                      }
10912                      if (arg2 & TARGET_PR_PAC_APIAKEY) {
10913                          ret |= qemu_guest_getrandom(&env->keys.apia,
10914                                                      sizeof(ARMPACKey), &err);
10915                      }
10916                      if (arg2 & TARGET_PR_PAC_APIBKEY) {
10917                          ret |= qemu_guest_getrandom(&env->keys.apib,
10918                                                      sizeof(ARMPACKey), &err);
10919                      }
10920                      if (arg2 & TARGET_PR_PAC_APDAKEY) {
10921                          ret |= qemu_guest_getrandom(&env->keys.apda,
10922                                                      sizeof(ARMPACKey), &err);
10923                      }
10924                      if (arg2 & TARGET_PR_PAC_APDBKEY) {
10925                          ret |= qemu_guest_getrandom(&env->keys.apdb,
10926                                                      sizeof(ARMPACKey), &err);
10927                      }
10928                      if (arg2 & TARGET_PR_PAC_APGAKEY) {
10929                          ret |= qemu_guest_getrandom(&env->keys.apga,
10930                                                      sizeof(ARMPACKey), &err);
10931                      }
10932                      if (ret != 0) {
10933                          /*
10934                           * Some unknown failure in the crypto.  The best
10935                           * we can do is log it and fail the syscall.
10936                           * The real syscall cannot fail this way.
10937                           */
10938                          qemu_log_mask(LOG_UNIMP,
10939                                        "PR_PAC_RESET_KEYS: Crypto failure: %s",
10940                                        error_get_pretty(err));
10941                          error_free(err);
10942                          return -TARGET_EIO;
10943                      }
10944                      return 0;
10945                  }
10946              }
10947              return -TARGET_EINVAL;
10948          case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10949              {
10950                  abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10951                  CPUARMState *env = cpu_env;
10952                  ARMCPU *cpu = env_archcpu(env);
10953  
10954                  if (cpu_isar_feature(aa64_mte, cpu)) {
10955                      valid_mask |= TARGET_PR_MTE_TCF_MASK;
10956                      valid_mask |= TARGET_PR_MTE_TAG_MASK;
10957                  }
10958  
10959                  if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10960                      return -TARGET_EINVAL;
10961                  }
10962                  env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10963  
10964                  if (cpu_isar_feature(aa64_mte, cpu)) {
10965                      switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10966                      case TARGET_PR_MTE_TCF_NONE:
10967                      case TARGET_PR_MTE_TCF_SYNC:
10968                      case TARGET_PR_MTE_TCF_ASYNC:
10969                          break;
10970                      default:
10971                          return -EINVAL;
10972                      }
10973  
10974                      /*
10975                       * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10976                       * Note that the syscall values are consistent with hw.
10977                       */
10978                      env->cp15.sctlr_el[1] =
10979                          deposit64(env->cp15.sctlr_el[1], 38, 2,
10980                                    arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10981  
10982                      /*
10983                       * Write PR_MTE_TAG to GCR_EL1[Exclude].
10984                       * Note that the syscall uses an include mask,
10985                       * and hardware uses an exclude mask -- invert.
10986                       */
10987                      env->cp15.gcr_el1 =
10988                          deposit64(env->cp15.gcr_el1, 0, 16,
10989                                    ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10990                      arm_rebuild_hflags(env);
10991                  }
10992                  return 0;
10993              }
10994          case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10995              {
10996                  abi_long ret = 0;
10997                  CPUARMState *env = cpu_env;
10998                  ARMCPU *cpu = env_archcpu(env);
10999  
11000                  if (arg2 || arg3 || arg4 || arg5) {
11001                      return -TARGET_EINVAL;
11002                  }
11003                  if (env->tagged_addr_enable) {
11004                      ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
11005                  }
11006                  if (cpu_isar_feature(aa64_mte, cpu)) {
11007                      /* See above. */
11008                      ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
11009                              << TARGET_PR_MTE_TCF_SHIFT);
11010                      ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
11011                                      ~env->cp15.gcr_el1);
11012                  }
11013                  return ret;
11014              }
11015  #endif /* AARCH64 */
11016          case PR_GET_SECCOMP:
11017          case PR_SET_SECCOMP:
11018              /* Disable seccomp to prevent the target disabling syscalls we
11019               * need. */
11020              return -TARGET_EINVAL;
11021          default:
11022              /* Most prctl options have no pointer arguments */
11023              return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
11024          }
11025          break;
11026  #ifdef TARGET_NR_arch_prctl
11027      case TARGET_NR_arch_prctl:
11028          return do_arch_prctl(cpu_env, arg1, arg2);
11029  #endif
11030  #ifdef TARGET_NR_pread64
11031      case TARGET_NR_pread64:
11032          if (regpairs_aligned(cpu_env, num)) {
11033              arg4 = arg5;
11034              arg5 = arg6;
11035          }
11036          if (arg2 == 0 && arg3 == 0) {
11037              /* Special-case NULL buffer and zero length, which should succeed */
11038              p = 0;
11039          } else {
11040              p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11041              if (!p) {
11042                  return -TARGET_EFAULT;
11043              }
11044          }
11045          ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11046          unlock_user(p, arg2, ret);
11047          return ret;
11048      case TARGET_NR_pwrite64:
11049          if (regpairs_aligned(cpu_env, num)) {
11050              arg4 = arg5;
11051              arg5 = arg6;
11052          }
11053          if (arg2 == 0 && arg3 == 0) {
11054              /* Special-case NULL buffer and zero length, which should succeed */
11055              p = 0;
11056          } else {
11057              p = lock_user(VERIFY_READ, arg2, arg3, 1);
11058              if (!p) {
11059                  return -TARGET_EFAULT;
11060              }
11061          }
11062          ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11063          unlock_user(p, arg2, 0);
11064          return ret;
11065  #endif
11066      case TARGET_NR_getcwd:
11067          if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11068              return -TARGET_EFAULT;
11069          ret = get_errno(sys_getcwd1(p, arg2));
11070          unlock_user(p, arg1, ret);
11071          return ret;
11072      case TARGET_NR_capget:
11073      case TARGET_NR_capset:
11074      {
11075          struct target_user_cap_header *target_header;
11076          struct target_user_cap_data *target_data = NULL;
11077          struct __user_cap_header_struct header;
11078          struct __user_cap_data_struct data[2];
11079          struct __user_cap_data_struct *dataptr = NULL;
11080          int i, target_datalen;
11081          int data_items = 1;
11082  
11083          if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11084              return -TARGET_EFAULT;
11085          }
11086          header.version = tswap32(target_header->version);
11087          header.pid = tswap32(target_header->pid);
11088  
11089          if (header.version != _LINUX_CAPABILITY_VERSION) {
11090              /* Version 2 and up takes pointer to two user_data structs */
11091              data_items = 2;
11092          }
11093  
11094          target_datalen = sizeof(*target_data) * data_items;
11095  
11096          if (arg2) {
11097              if (num == TARGET_NR_capget) {
11098                  target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11099              } else {
11100                  target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11101              }
11102              if (!target_data) {
11103                  unlock_user_struct(target_header, arg1, 0);
11104                  return -TARGET_EFAULT;
11105              }
11106  
11107              if (num == TARGET_NR_capset) {
11108                  for (i = 0; i < data_items; i++) {
11109                      data[i].effective = tswap32(target_data[i].effective);
11110                      data[i].permitted = tswap32(target_data[i].permitted);
11111                      data[i].inheritable = tswap32(target_data[i].inheritable);
11112                  }
11113              }
11114  
11115              dataptr = data;
11116          }
11117  
11118          if (num == TARGET_NR_capget) {
11119              ret = get_errno(capget(&header, dataptr));
11120          } else {
11121              ret = get_errno(capset(&header, dataptr));
11122          }
11123  
11124          /* The kernel always updates version for both capget and capset */
11125          target_header->version = tswap32(header.version);
11126          unlock_user_struct(target_header, arg1, 1);
11127  
11128          if (arg2) {
11129              if (num == TARGET_NR_capget) {
11130                  for (i = 0; i < data_items; i++) {
11131                      target_data[i].effective = tswap32(data[i].effective);
11132                      target_data[i].permitted = tswap32(data[i].permitted);
11133                      target_data[i].inheritable = tswap32(data[i].inheritable);
11134                  }
11135                  unlock_user(target_data, arg2, target_datalen);
11136              } else {
11137                  unlock_user(target_data, arg2, 0);
11138              }
11139          }
11140          return ret;
11141      }
11142      case TARGET_NR_sigaltstack:
11143          return do_sigaltstack(arg1, arg2, cpu_env);
11144  
11145  #ifdef CONFIG_SENDFILE
11146  #ifdef TARGET_NR_sendfile
11147      case TARGET_NR_sendfile:
11148      {
11149          off_t *offp = NULL;
11150          off_t off;
11151          if (arg3) {
11152              ret = get_user_sal(off, arg3);
11153              if (is_error(ret)) {
11154                  return ret;
11155              }
11156              offp = &off;
11157          }
11158          ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11159          if (!is_error(ret) && arg3) {
11160              abi_long ret2 = put_user_sal(off, arg3);
11161              if (is_error(ret2)) {
11162                  ret = ret2;
11163              }
11164          }
11165          return ret;
11166      }
11167  #endif
11168  #ifdef TARGET_NR_sendfile64
11169      case TARGET_NR_sendfile64:
11170      {
11171          off_t *offp = NULL;
11172          off_t off;
11173          if (arg3) {
11174              ret = get_user_s64(off, arg3);
11175              if (is_error(ret)) {
11176                  return ret;
11177              }
11178              offp = &off;
11179          }
11180          ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11181          if (!is_error(ret) && arg3) {
11182              abi_long ret2 = put_user_s64(off, arg3);
11183              if (is_error(ret2)) {
11184                  ret = ret2;
11185              }
11186          }
11187          return ret;
11188      }
11189  #endif
11190  #endif
11191  #ifdef TARGET_NR_vfork
11192      case TARGET_NR_vfork:
11193          return get_errno(do_fork(cpu_env,
11194                           CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11195                           0, 0, 0, 0));
11196  #endif
11197  #ifdef TARGET_NR_ugetrlimit
11198      case TARGET_NR_ugetrlimit:
11199      {
11200  	struct rlimit rlim;
11201  	int resource = target_to_host_resource(arg1);
11202  	ret = get_errno(getrlimit(resource, &rlim));
11203  	if (!is_error(ret)) {
11204  	    struct target_rlimit *target_rlim;
11205              if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11206                  return -TARGET_EFAULT;
11207  	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11208  	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11209              unlock_user_struct(target_rlim, arg2, 1);
11210  	}
11211          return ret;
11212      }
11213  #endif
11214  #ifdef TARGET_NR_truncate64
11215      case TARGET_NR_truncate64:
11216          if (!(p = lock_user_string(arg1)))
11217              return -TARGET_EFAULT;
11218  	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11219          unlock_user(p, arg1, 0);
11220          return ret;
11221  #endif
11222  #ifdef TARGET_NR_ftruncate64
11223      case TARGET_NR_ftruncate64:
11224          return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11225  #endif
11226  #ifdef TARGET_NR_stat64
11227      case TARGET_NR_stat64:
11228          if (!(p = lock_user_string(arg1))) {
11229              return -TARGET_EFAULT;
11230          }
11231          ret = get_errno(stat(path(p), &st));
11232          unlock_user(p, arg1, 0);
11233          if (!is_error(ret))
11234              ret = host_to_target_stat64(cpu_env, arg2, &st);
11235          return ret;
11236  #endif
11237  #ifdef TARGET_NR_lstat64
11238      case TARGET_NR_lstat64:
11239          if (!(p = lock_user_string(arg1))) {
11240              return -TARGET_EFAULT;
11241          }
11242          ret = get_errno(lstat(path(p), &st));
11243          unlock_user(p, arg1, 0);
11244          if (!is_error(ret))
11245              ret = host_to_target_stat64(cpu_env, arg2, &st);
11246          return ret;
11247  #endif
11248  #ifdef TARGET_NR_fstat64
11249      case TARGET_NR_fstat64:
11250          ret = get_errno(fstat(arg1, &st));
11251          if (!is_error(ret))
11252              ret = host_to_target_stat64(cpu_env, arg2, &st);
11253          return ret;
11254  #endif
11255  #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11256  #ifdef TARGET_NR_fstatat64
11257      case TARGET_NR_fstatat64:
11258  #endif
11259  #ifdef TARGET_NR_newfstatat
11260      case TARGET_NR_newfstatat:
11261  #endif
11262          if (!(p = lock_user_string(arg2))) {
11263              return -TARGET_EFAULT;
11264          }
11265          ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11266          unlock_user(p, arg2, 0);
11267          if (!is_error(ret))
11268              ret = host_to_target_stat64(cpu_env, arg3, &st);
11269          return ret;
11270  #endif
11271  #if defined(TARGET_NR_statx)
11272      case TARGET_NR_statx:
11273          {
11274              struct target_statx *target_stx;
11275              int dirfd = arg1;
11276              int flags = arg3;
11277  
11278              p = lock_user_string(arg2);
11279              if (p == NULL) {
11280                  return -TARGET_EFAULT;
11281              }
11282  #if defined(__NR_statx)
11283              {
11284                  /*
11285                   * It is assumed that struct statx is architecture independent.
11286                   */
11287                  struct target_statx host_stx;
11288                  int mask = arg4;
11289  
11290                  ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11291                  if (!is_error(ret)) {
11292                      if (host_to_target_statx(&host_stx, arg5) != 0) {
11293                          unlock_user(p, arg2, 0);
11294                          return -TARGET_EFAULT;
11295                      }
11296                  }
11297  
11298                  if (ret != -TARGET_ENOSYS) {
11299                      unlock_user(p, arg2, 0);
11300                      return ret;
11301                  }
11302              }
11303  #endif
11304              ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11305              unlock_user(p, arg2, 0);
11306  
11307              if (!is_error(ret)) {
11308                  if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11309                      return -TARGET_EFAULT;
11310                  }
11311                  memset(target_stx, 0, sizeof(*target_stx));
11312                  __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11313                  __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11314                  __put_user(st.st_ino, &target_stx->stx_ino);
11315                  __put_user(st.st_mode, &target_stx->stx_mode);
11316                  __put_user(st.st_uid, &target_stx->stx_uid);
11317                  __put_user(st.st_gid, &target_stx->stx_gid);
11318                  __put_user(st.st_nlink, &target_stx->stx_nlink);
11319                  __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11320                  __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11321                  __put_user(st.st_size, &target_stx->stx_size);
11322                  __put_user(st.st_blksize, &target_stx->stx_blksize);
11323                  __put_user(st.st_blocks, &target_stx->stx_blocks);
11324                  __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11325                  __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11326                  __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11327                  unlock_user_struct(target_stx, arg5, 1);
11328              }
11329          }
11330          return ret;
11331  #endif
11332  #ifdef TARGET_NR_lchown
11333      case TARGET_NR_lchown:
11334          if (!(p = lock_user_string(arg1)))
11335              return -TARGET_EFAULT;
11336          ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11337          unlock_user(p, arg1, 0);
11338          return ret;
11339  #endif
11340  #ifdef TARGET_NR_getuid
11341      case TARGET_NR_getuid:
11342          return get_errno(high2lowuid(getuid()));
11343  #endif
11344  #ifdef TARGET_NR_getgid
11345      case TARGET_NR_getgid:
11346          return get_errno(high2lowgid(getgid()));
11347  #endif
11348  #ifdef TARGET_NR_geteuid
11349      case TARGET_NR_geteuid:
11350          return get_errno(high2lowuid(geteuid()));
11351  #endif
11352  #ifdef TARGET_NR_getegid
11353      case TARGET_NR_getegid:
11354          return get_errno(high2lowgid(getegid()));
11355  #endif
11356      case TARGET_NR_setreuid:
11357          return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11358      case TARGET_NR_setregid:
11359          return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11360      case TARGET_NR_getgroups:
11361          {
11362              int gidsetsize = arg1;
11363              target_id *target_grouplist;
11364              gid_t *grouplist;
11365              int i;
11366  
11367              grouplist = alloca(gidsetsize * sizeof(gid_t));
11368              ret = get_errno(getgroups(gidsetsize, grouplist));
11369              if (gidsetsize == 0)
11370                  return ret;
11371              if (!is_error(ret)) {
11372                  target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11373                  if (!target_grouplist)
11374                      return -TARGET_EFAULT;
11375                  for(i = 0;i < ret; i++)
11376                      target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11377                  unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11378              }
11379          }
11380          return ret;
11381      case TARGET_NR_setgroups:
11382          {
11383              int gidsetsize = arg1;
11384              target_id *target_grouplist;
11385              gid_t *grouplist = NULL;
11386              int i;
11387              if (gidsetsize) {
11388                  grouplist = alloca(gidsetsize * sizeof(gid_t));
11389                  target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11390                  if (!target_grouplist) {
11391                      return -TARGET_EFAULT;
11392                  }
11393                  for (i = 0; i < gidsetsize; i++) {
11394                      grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11395                  }
11396                  unlock_user(target_grouplist, arg2, 0);
11397              }
11398              return get_errno(setgroups(gidsetsize, grouplist));
11399          }
11400      case TARGET_NR_fchown:
11401          return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11402  #if defined(TARGET_NR_fchownat)
11403      case TARGET_NR_fchownat:
11404          if (!(p = lock_user_string(arg2)))
11405              return -TARGET_EFAULT;
11406          ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11407                                   low2highgid(arg4), arg5));
11408          unlock_user(p, arg2, 0);
11409          return ret;
11410  #endif
11411  #ifdef TARGET_NR_setresuid
11412      case TARGET_NR_setresuid:
11413          return get_errno(sys_setresuid(low2highuid(arg1),
11414                                         low2highuid(arg2),
11415                                         low2highuid(arg3)));
11416  #endif
11417  #ifdef TARGET_NR_getresuid
11418      case TARGET_NR_getresuid:
11419          {
11420              uid_t ruid, euid, suid;
11421              ret = get_errno(getresuid(&ruid, &euid, &suid));
11422              if (!is_error(ret)) {
11423                  if (put_user_id(high2lowuid(ruid), arg1)
11424                      || put_user_id(high2lowuid(euid), arg2)
11425                      || put_user_id(high2lowuid(suid), arg3))
11426                      return -TARGET_EFAULT;
11427              }
11428          }
11429          return ret;
11430  #endif
11431  #ifdef TARGET_NR_getresgid
11432      case TARGET_NR_setresgid:
11433          return get_errno(sys_setresgid(low2highgid(arg1),
11434                                         low2highgid(arg2),
11435                                         low2highgid(arg3)));
11436  #endif
11437  #ifdef TARGET_NR_getresgid
11438      case TARGET_NR_getresgid:
11439          {
11440              gid_t rgid, egid, sgid;
11441              ret = get_errno(getresgid(&rgid, &egid, &sgid));
11442              if (!is_error(ret)) {
11443                  if (put_user_id(high2lowgid(rgid), arg1)
11444                      || put_user_id(high2lowgid(egid), arg2)
11445                      || put_user_id(high2lowgid(sgid), arg3))
11446                      return -TARGET_EFAULT;
11447              }
11448          }
11449          return ret;
11450  #endif
11451  #ifdef TARGET_NR_chown
11452      case TARGET_NR_chown:
11453          if (!(p = lock_user_string(arg1)))
11454              return -TARGET_EFAULT;
11455          ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11456          unlock_user(p, arg1, 0);
11457          return ret;
11458  #endif
11459      case TARGET_NR_setuid:
11460          return get_errno(sys_setuid(low2highuid(arg1)));
11461      case TARGET_NR_setgid:
11462          return get_errno(sys_setgid(low2highgid(arg1)));
11463      case TARGET_NR_setfsuid:
11464          return get_errno(setfsuid(arg1));
11465      case TARGET_NR_setfsgid:
11466          return get_errno(setfsgid(arg1));
11467  
11468  #ifdef TARGET_NR_lchown32
11469      case TARGET_NR_lchown32:
11470          if (!(p = lock_user_string(arg1)))
11471              return -TARGET_EFAULT;
11472          ret = get_errno(lchown(p, arg2, arg3));
11473          unlock_user(p, arg1, 0);
11474          return ret;
11475  #endif
11476  #ifdef TARGET_NR_getuid32
11477      case TARGET_NR_getuid32:
11478          return get_errno(getuid());
11479  #endif
11480  
11481  #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11482     /* Alpha specific */
11483      case TARGET_NR_getxuid:
11484           {
11485              uid_t euid;
11486              euid=geteuid();
11487              ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11488           }
11489          return get_errno(getuid());
11490  #endif
11491  #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11492     /* Alpha specific */
11493      case TARGET_NR_getxgid:
11494           {
11495              uid_t egid;
11496              egid=getegid();
11497              ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11498           }
11499          return get_errno(getgid());
11500  #endif
11501  #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11502      /* Alpha specific */
11503      case TARGET_NR_osf_getsysinfo:
11504          ret = -TARGET_EOPNOTSUPP;
11505          switch (arg1) {
11506            case TARGET_GSI_IEEE_FP_CONTROL:
11507              {
11508                  uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11509                  uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11510  
11511                  swcr &= ~SWCR_STATUS_MASK;
11512                  swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11513  
11514                  if (put_user_u64 (swcr, arg2))
11515                          return -TARGET_EFAULT;
11516                  ret = 0;
11517              }
11518              break;
11519  
11520            /* case GSI_IEEE_STATE_AT_SIGNAL:
11521               -- Not implemented in linux kernel.
11522               case GSI_UACPROC:
11523               -- Retrieves current unaligned access state; not much used.
11524               case GSI_PROC_TYPE:
11525               -- Retrieves implver information; surely not used.
11526               case GSI_GET_HWRPB:
11527               -- Grabs a copy of the HWRPB; surely not used.
11528            */
11529          }
11530          return ret;
11531  #endif
11532  #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11533      /* Alpha specific */
11534      case TARGET_NR_osf_setsysinfo:
11535          ret = -TARGET_EOPNOTSUPP;
11536          switch (arg1) {
11537            case TARGET_SSI_IEEE_FP_CONTROL:
11538              {
11539                  uint64_t swcr, fpcr;
11540  
11541                  if (get_user_u64 (swcr, arg2)) {
11542                      return -TARGET_EFAULT;
11543                  }
11544  
11545                  /*
11546                   * The kernel calls swcr_update_status to update the
11547                   * status bits from the fpcr at every point that it
11548                   * could be queried.  Therefore, we store the status
11549                   * bits only in FPCR.
11550                   */
11551                  ((CPUAlphaState *)cpu_env)->swcr
11552                      = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11553  
11554                  fpcr = cpu_alpha_load_fpcr(cpu_env);
11555                  fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11556                  fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11557                  cpu_alpha_store_fpcr(cpu_env, fpcr);
11558                  ret = 0;
11559              }
11560              break;
11561  
11562            case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11563              {
11564                  uint64_t exc, fpcr, fex;
11565  
11566                  if (get_user_u64(exc, arg2)) {
11567                      return -TARGET_EFAULT;
11568                  }
11569                  exc &= SWCR_STATUS_MASK;
11570                  fpcr = cpu_alpha_load_fpcr(cpu_env);
11571  
11572                  /* Old exceptions are not signaled.  */
11573                  fex = alpha_ieee_fpcr_to_swcr(fpcr);
11574                  fex = exc & ~fex;
11575                  fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11576                  fex &= ((CPUArchState *)cpu_env)->swcr;
11577  
11578                  /* Update the hardware fpcr.  */
11579                  fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11580                  cpu_alpha_store_fpcr(cpu_env, fpcr);
11581  
11582                  if (fex) {
11583                      int si_code = TARGET_FPE_FLTUNK;
11584                      target_siginfo_t info;
11585  
11586                      if (fex & SWCR_TRAP_ENABLE_DNO) {
11587                          si_code = TARGET_FPE_FLTUND;
11588                      }
11589                      if (fex & SWCR_TRAP_ENABLE_INE) {
11590                          si_code = TARGET_FPE_FLTRES;
11591                      }
11592                      if (fex & SWCR_TRAP_ENABLE_UNF) {
11593                          si_code = TARGET_FPE_FLTUND;
11594                      }
11595                      if (fex & SWCR_TRAP_ENABLE_OVF) {
11596                          si_code = TARGET_FPE_FLTOVF;
11597                      }
11598                      if (fex & SWCR_TRAP_ENABLE_DZE) {
11599                          si_code = TARGET_FPE_FLTDIV;
11600                      }
11601                      if (fex & SWCR_TRAP_ENABLE_INV) {
11602                          si_code = TARGET_FPE_FLTINV;
11603                      }
11604  
11605                      info.si_signo = SIGFPE;
11606                      info.si_errno = 0;
11607                      info.si_code = si_code;
11608                      info._sifields._sigfault._addr
11609                          = ((CPUArchState *)cpu_env)->pc;
11610                      queue_signal((CPUArchState *)cpu_env, info.si_signo,
11611                                   QEMU_SI_FAULT, &info);
11612                  }
11613                  ret = 0;
11614              }
11615              break;
11616  
11617            /* case SSI_NVPAIRS:
11618               -- Used with SSIN_UACPROC to enable unaligned accesses.
11619               case SSI_IEEE_STATE_AT_SIGNAL:
11620               case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11621               -- Not implemented in linux kernel
11622            */
11623          }
11624          return ret;
11625  #endif
11626  #ifdef TARGET_NR_osf_sigprocmask
11627      /* Alpha specific.  */
11628      case TARGET_NR_osf_sigprocmask:
11629          {
11630              abi_ulong mask;
11631              int how;
11632              sigset_t set, oldset;
11633  
11634              switch(arg1) {
11635              case TARGET_SIG_BLOCK:
11636                  how = SIG_BLOCK;
11637                  break;
11638              case TARGET_SIG_UNBLOCK:
11639                  how = SIG_UNBLOCK;
11640                  break;
11641              case TARGET_SIG_SETMASK:
11642                  how = SIG_SETMASK;
11643                  break;
11644              default:
11645                  return -TARGET_EINVAL;
11646              }
11647              mask = arg2;
11648              target_to_host_old_sigset(&set, &mask);
11649              ret = do_sigprocmask(how, &set, &oldset);
11650              if (!ret) {
11651                  host_to_target_old_sigset(&mask, &oldset);
11652                  ret = mask;
11653              }
11654          }
11655          return ret;
11656  #endif
11657  
11658  #ifdef TARGET_NR_getgid32
11659      case TARGET_NR_getgid32:
11660          return get_errno(getgid());
11661  #endif
11662  #ifdef TARGET_NR_geteuid32
11663      case TARGET_NR_geteuid32:
11664          return get_errno(geteuid());
11665  #endif
11666  #ifdef TARGET_NR_getegid32
11667      case TARGET_NR_getegid32:
11668          return get_errno(getegid());
11669  #endif
11670  #ifdef TARGET_NR_setreuid32
11671      case TARGET_NR_setreuid32:
11672          return get_errno(setreuid(arg1, arg2));
11673  #endif
11674  #ifdef TARGET_NR_setregid32
11675      case TARGET_NR_setregid32:
11676          return get_errno(setregid(arg1, arg2));
11677  #endif
11678  #ifdef TARGET_NR_getgroups32
11679      case TARGET_NR_getgroups32:
11680          {
11681              int gidsetsize = arg1;
11682              uint32_t *target_grouplist;
11683              gid_t *grouplist;
11684              int i;
11685  
11686              grouplist = alloca(gidsetsize * sizeof(gid_t));
11687              ret = get_errno(getgroups(gidsetsize, grouplist));
11688              if (gidsetsize == 0)
11689                  return ret;
11690              if (!is_error(ret)) {
11691                  target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11692                  if (!target_grouplist) {
11693                      return -TARGET_EFAULT;
11694                  }
11695                  for(i = 0;i < ret; i++)
11696                      target_grouplist[i] = tswap32(grouplist[i]);
11697                  unlock_user(target_grouplist, arg2, gidsetsize * 4);
11698              }
11699          }
11700          return ret;
11701  #endif
11702  #ifdef TARGET_NR_setgroups32
11703      case TARGET_NR_setgroups32:
11704          {
11705              int gidsetsize = arg1;
11706              uint32_t *target_grouplist;
11707              gid_t *grouplist;
11708              int i;
11709  
11710              grouplist = alloca(gidsetsize * sizeof(gid_t));
11711              target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11712              if (!target_grouplist) {
11713                  return -TARGET_EFAULT;
11714              }
11715              for(i = 0;i < gidsetsize; i++)
11716                  grouplist[i] = tswap32(target_grouplist[i]);
11717              unlock_user(target_grouplist, arg2, 0);
11718              return get_errno(setgroups(gidsetsize, grouplist));
11719          }
11720  #endif
11721  #ifdef TARGET_NR_fchown32
11722      case TARGET_NR_fchown32:
11723          return get_errno(fchown(arg1, arg2, arg3));
11724  #endif
11725  #ifdef TARGET_NR_setresuid32
11726      case TARGET_NR_setresuid32:
11727          return get_errno(sys_setresuid(arg1, arg2, arg3));
11728  #endif
11729  #ifdef TARGET_NR_getresuid32
11730      case TARGET_NR_getresuid32:
11731          {
11732              uid_t ruid, euid, suid;
11733              ret = get_errno(getresuid(&ruid, &euid, &suid));
11734              if (!is_error(ret)) {
11735                  if (put_user_u32(ruid, arg1)
11736                      || put_user_u32(euid, arg2)
11737                      || put_user_u32(suid, arg3))
11738                      return -TARGET_EFAULT;
11739              }
11740          }
11741          return ret;
11742  #endif
11743  #ifdef TARGET_NR_setresgid32
11744      case TARGET_NR_setresgid32:
11745          return get_errno(sys_setresgid(arg1, arg2, arg3));
11746  #endif
11747  #ifdef TARGET_NR_getresgid32
11748      case TARGET_NR_getresgid32:
11749          {
11750              gid_t rgid, egid, sgid;
11751              ret = get_errno(getresgid(&rgid, &egid, &sgid));
11752              if (!is_error(ret)) {
11753                  if (put_user_u32(rgid, arg1)
11754                      || put_user_u32(egid, arg2)
11755                      || put_user_u32(sgid, arg3))
11756                      return -TARGET_EFAULT;
11757              }
11758          }
11759          return ret;
11760  #endif
11761  #ifdef TARGET_NR_chown32
11762      case TARGET_NR_chown32:
11763          if (!(p = lock_user_string(arg1)))
11764              return -TARGET_EFAULT;
11765          ret = get_errno(chown(p, arg2, arg3));
11766          unlock_user(p, arg1, 0);
11767          return ret;
11768  #endif
11769  #ifdef TARGET_NR_setuid32
11770      case TARGET_NR_setuid32:
11771          return get_errno(sys_setuid(arg1));
11772  #endif
11773  #ifdef TARGET_NR_setgid32
11774      case TARGET_NR_setgid32:
11775          return get_errno(sys_setgid(arg1));
11776  #endif
11777  #ifdef TARGET_NR_setfsuid32
11778      case TARGET_NR_setfsuid32:
11779          return get_errno(setfsuid(arg1));
11780  #endif
11781  #ifdef TARGET_NR_setfsgid32
11782      case TARGET_NR_setfsgid32:
11783          return get_errno(setfsgid(arg1));
11784  #endif
11785  #ifdef TARGET_NR_mincore
11786      case TARGET_NR_mincore:
11787          {
11788              void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11789              if (!a) {
11790                  return -TARGET_ENOMEM;
11791              }
11792              p = lock_user_string(arg3);
11793              if (!p) {
11794                  ret = -TARGET_EFAULT;
11795              } else {
11796                  ret = get_errno(mincore(a, arg2, p));
11797                  unlock_user(p, arg3, ret);
11798              }
11799              unlock_user(a, arg1, 0);
11800          }
11801          return ret;
11802  #endif
11803  #ifdef TARGET_NR_arm_fadvise64_64
11804      case TARGET_NR_arm_fadvise64_64:
11805          /* arm_fadvise64_64 looks like fadvise64_64 but
11806           * with different argument order: fd, advice, offset, len
11807           * rather than the usual fd, offset, len, advice.
11808           * Note that offset and len are both 64-bit so appear as
11809           * pairs of 32-bit registers.
11810           */
11811          ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11812                              target_offset64(arg5, arg6), arg2);
11813          return -host_to_target_errno(ret);
11814  #endif
11815  
11816  #if TARGET_ABI_BITS == 32
11817  
11818  #ifdef TARGET_NR_fadvise64_64
11819      case TARGET_NR_fadvise64_64:
11820  #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11821          /* 6 args: fd, advice, offset (high, low), len (high, low) */
11822          ret = arg2;
11823          arg2 = arg3;
11824          arg3 = arg4;
11825          arg4 = arg5;
11826          arg5 = arg6;
11827          arg6 = ret;
11828  #else
11829          /* 6 args: fd, offset (high, low), len (high, low), advice */
11830          if (regpairs_aligned(cpu_env, num)) {
11831              /* offset is in (3,4), len in (5,6) and advice in 7 */
11832              arg2 = arg3;
11833              arg3 = arg4;
11834              arg4 = arg5;
11835              arg5 = arg6;
11836              arg6 = arg7;
11837          }
11838  #endif
11839          ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11840                              target_offset64(arg4, arg5), arg6);
11841          return -host_to_target_errno(ret);
11842  #endif
11843  
11844  #ifdef TARGET_NR_fadvise64
11845      case TARGET_NR_fadvise64:
11846          /* 5 args: fd, offset (high, low), len, advice */
11847          if (regpairs_aligned(cpu_env, num)) {
11848              /* offset is in (3,4), len in 5 and advice in 6 */
11849              arg2 = arg3;
11850              arg3 = arg4;
11851              arg4 = arg5;
11852              arg5 = arg6;
11853          }
11854          ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11855          return -host_to_target_errno(ret);
11856  #endif
11857  
11858  #else /* not a 32-bit ABI */
11859  #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11860  #ifdef TARGET_NR_fadvise64_64
11861      case TARGET_NR_fadvise64_64:
11862  #endif
11863  #ifdef TARGET_NR_fadvise64
11864      case TARGET_NR_fadvise64:
11865  #endif
11866  #ifdef TARGET_S390X
11867          switch (arg4) {
11868          case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11869          case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11870          case 6: arg4 = POSIX_FADV_DONTNEED; break;
11871          case 7: arg4 = POSIX_FADV_NOREUSE; break;
11872          default: break;
11873          }
11874  #endif
11875          return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11876  #endif
11877  #endif /* end of 64-bit ABI fadvise handling */
11878  
11879  #ifdef TARGET_NR_madvise
11880      case TARGET_NR_madvise:
11881          /* A straight passthrough may not be safe because qemu sometimes
11882             turns private file-backed mappings into anonymous mappings.
11883             This will break MADV_DONTNEED.
11884             This is a hint, so ignoring and returning success is ok.  */
11885          return 0;
11886  #endif
11887  #ifdef TARGET_NR_fcntl64
11888      case TARGET_NR_fcntl64:
11889      {
11890          int cmd;
11891          struct flock64 fl;
11892          from_flock64_fn *copyfrom = copy_from_user_flock64;
11893          to_flock64_fn *copyto = copy_to_user_flock64;
11894  
11895  #ifdef TARGET_ARM
11896          if (!((CPUARMState *)cpu_env)->eabi) {
11897              copyfrom = copy_from_user_oabi_flock64;
11898              copyto = copy_to_user_oabi_flock64;
11899          }
11900  #endif
11901  
11902          cmd = target_to_host_fcntl_cmd(arg2);
11903          if (cmd == -TARGET_EINVAL) {
11904              return cmd;
11905          }
11906  
11907          switch(arg2) {
11908          case TARGET_F_GETLK64:
11909              ret = copyfrom(&fl, arg3);
11910              if (ret) {
11911                  break;
11912              }
11913              ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11914              if (ret == 0) {
11915                  ret = copyto(arg3, &fl);
11916              }
11917  	    break;
11918  
11919          case TARGET_F_SETLK64:
11920          case TARGET_F_SETLKW64:
11921              ret = copyfrom(&fl, arg3);
11922              if (ret) {
11923                  break;
11924              }
11925              ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11926  	    break;
11927          default:
11928              ret = do_fcntl(arg1, arg2, arg3);
11929              break;
11930          }
11931          return ret;
11932      }
11933  #endif
11934  #ifdef TARGET_NR_cacheflush
11935      case TARGET_NR_cacheflush:
11936          /* self-modifying code is handled automatically, so nothing needed */
11937          return 0;
11938  #endif
11939  #ifdef TARGET_NR_getpagesize
11940      case TARGET_NR_getpagesize:
11941          return TARGET_PAGE_SIZE;
11942  #endif
11943      case TARGET_NR_gettid:
11944          return get_errno(sys_gettid());
11945  #ifdef TARGET_NR_readahead
11946      case TARGET_NR_readahead:
11947  #if TARGET_ABI_BITS == 32
11948          if (regpairs_aligned(cpu_env, num)) {
11949              arg2 = arg3;
11950              arg3 = arg4;
11951              arg4 = arg5;
11952          }
11953          ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11954  #else
11955          ret = get_errno(readahead(arg1, arg2, arg3));
11956  #endif
11957          return ret;
11958  #endif
11959  #ifdef CONFIG_ATTR
11960  #ifdef TARGET_NR_setxattr
11961      case TARGET_NR_listxattr:
11962      case TARGET_NR_llistxattr:
11963      {
11964          void *p, *b = 0;
11965          if (arg2) {
11966              b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11967              if (!b) {
11968                  return -TARGET_EFAULT;
11969              }
11970          }
11971          p = lock_user_string(arg1);
11972          if (p) {
11973              if (num == TARGET_NR_listxattr) {
11974                  ret = get_errno(listxattr(p, b, arg3));
11975              } else {
11976                  ret = get_errno(llistxattr(p, b, arg3));
11977              }
11978          } else {
11979              ret = -TARGET_EFAULT;
11980          }
11981          unlock_user(p, arg1, 0);
11982          unlock_user(b, arg2, arg3);
11983          return ret;
11984      }
11985      case TARGET_NR_flistxattr:
11986      {
11987          void *b = 0;
11988          if (arg2) {
11989              b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11990              if (!b) {
11991                  return -TARGET_EFAULT;
11992              }
11993          }
11994          ret = get_errno(flistxattr(arg1, b, arg3));
11995          unlock_user(b, arg2, arg3);
11996          return ret;
11997      }
11998      case TARGET_NR_setxattr:
11999      case TARGET_NR_lsetxattr:
12000          {
12001              void *p, *n, *v = 0;
12002              if (arg3) {
12003                  v = lock_user(VERIFY_READ, arg3, arg4, 1);
12004                  if (!v) {
12005                      return -TARGET_EFAULT;
12006                  }
12007              }
12008              p = lock_user_string(arg1);
12009              n = lock_user_string(arg2);
12010              if (p && n) {
12011                  if (num == TARGET_NR_setxattr) {
12012                      ret = get_errno(setxattr(p, n, v, arg4, arg5));
12013                  } else {
12014                      ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12015                  }
12016              } else {
12017                  ret = -TARGET_EFAULT;
12018              }
12019              unlock_user(p, arg1, 0);
12020              unlock_user(n, arg2, 0);
12021              unlock_user(v, arg3, 0);
12022          }
12023          return ret;
12024      case TARGET_NR_fsetxattr:
12025          {
12026              void *n, *v = 0;
12027              if (arg3) {
12028                  v = lock_user(VERIFY_READ, arg3, arg4, 1);
12029                  if (!v) {
12030                      return -TARGET_EFAULT;
12031                  }
12032              }
12033              n = lock_user_string(arg2);
12034              if (n) {
12035                  ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12036              } else {
12037                  ret = -TARGET_EFAULT;
12038              }
12039              unlock_user(n, arg2, 0);
12040              unlock_user(v, arg3, 0);
12041          }
12042          return ret;
12043      case TARGET_NR_getxattr:
12044      case TARGET_NR_lgetxattr:
12045          {
12046              void *p, *n, *v = 0;
12047              if (arg3) {
12048                  v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12049                  if (!v) {
12050                      return -TARGET_EFAULT;
12051                  }
12052              }
12053              p = lock_user_string(arg1);
12054              n = lock_user_string(arg2);
12055              if (p && n) {
12056                  if (num == TARGET_NR_getxattr) {
12057                      ret = get_errno(getxattr(p, n, v, arg4));
12058                  } else {
12059                      ret = get_errno(lgetxattr(p, n, v, arg4));
12060                  }
12061              } else {
12062                  ret = -TARGET_EFAULT;
12063              }
12064              unlock_user(p, arg1, 0);
12065              unlock_user(n, arg2, 0);
12066              unlock_user(v, arg3, arg4);
12067          }
12068          return ret;
12069      case TARGET_NR_fgetxattr:
12070          {
12071              void *n, *v = 0;
12072              if (arg3) {
12073                  v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12074                  if (!v) {
12075                      return -TARGET_EFAULT;
12076                  }
12077              }
12078              n = lock_user_string(arg2);
12079              if (n) {
12080                  ret = get_errno(fgetxattr(arg1, n, v, arg4));
12081              } else {
12082                  ret = -TARGET_EFAULT;
12083              }
12084              unlock_user(n, arg2, 0);
12085              unlock_user(v, arg3, arg4);
12086          }
12087          return ret;
12088      case TARGET_NR_removexattr:
12089      case TARGET_NR_lremovexattr:
12090          {
12091              void *p, *n;
12092              p = lock_user_string(arg1);
12093              n = lock_user_string(arg2);
12094              if (p && n) {
12095                  if (num == TARGET_NR_removexattr) {
12096                      ret = get_errno(removexattr(p, n));
12097                  } else {
12098                      ret = get_errno(lremovexattr(p, n));
12099                  }
12100              } else {
12101                  ret = -TARGET_EFAULT;
12102              }
12103              unlock_user(p, arg1, 0);
12104              unlock_user(n, arg2, 0);
12105          }
12106          return ret;
12107      case TARGET_NR_fremovexattr:
12108          {
12109              void *n;
12110              n = lock_user_string(arg2);
12111              if (n) {
12112                  ret = get_errno(fremovexattr(arg1, n));
12113              } else {
12114                  ret = -TARGET_EFAULT;
12115              }
12116              unlock_user(n, arg2, 0);
12117          }
12118          return ret;
12119  #endif
12120  #endif /* CONFIG_ATTR */
12121  #ifdef TARGET_NR_set_thread_area
12122      case TARGET_NR_set_thread_area:
12123  #if defined(TARGET_MIPS)
12124        ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12125        return 0;
12126  #elif defined(TARGET_CRIS)
12127        if (arg1 & 0xff)
12128            ret = -TARGET_EINVAL;
12129        else {
12130            ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12131            ret = 0;
12132        }
12133        return ret;
12134  #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12135        return do_set_thread_area(cpu_env, arg1);
12136  #elif defined(TARGET_M68K)
12137        {
12138            TaskState *ts = cpu->opaque;
12139            ts->tp_value = arg1;
12140            return 0;
12141        }
12142  #else
12143        return -TARGET_ENOSYS;
12144  #endif
12145  #endif
12146  #ifdef TARGET_NR_get_thread_area
12147      case TARGET_NR_get_thread_area:
12148  #if defined(TARGET_I386) && defined(TARGET_ABI32)
12149          return do_get_thread_area(cpu_env, arg1);
12150  #elif defined(TARGET_M68K)
12151          {
12152              TaskState *ts = cpu->opaque;
12153              return ts->tp_value;
12154          }
12155  #else
12156          return -TARGET_ENOSYS;
12157  #endif
12158  #endif
12159  #ifdef TARGET_NR_getdomainname
12160      case TARGET_NR_getdomainname:
12161          return -TARGET_ENOSYS;
12162  #endif
12163  
12164  #ifdef TARGET_NR_clock_settime
12165      case TARGET_NR_clock_settime:
12166      {
12167          struct timespec ts;
12168  
12169          ret = target_to_host_timespec(&ts, arg2);
12170          if (!is_error(ret)) {
12171              ret = get_errno(clock_settime(arg1, &ts));
12172          }
12173          return ret;
12174      }
12175  #endif
12176  #ifdef TARGET_NR_clock_settime64
12177      case TARGET_NR_clock_settime64:
12178      {
12179          struct timespec ts;
12180  
12181          ret = target_to_host_timespec64(&ts, arg2);
12182          if (!is_error(ret)) {
12183              ret = get_errno(clock_settime(arg1, &ts));
12184          }
12185          return ret;
12186      }
12187  #endif
12188  #ifdef TARGET_NR_clock_gettime
12189      case TARGET_NR_clock_gettime:
12190      {
12191          struct timespec ts;
12192          ret = get_errno(clock_gettime(arg1, &ts));
12193          if (!is_error(ret)) {
12194              ret = host_to_target_timespec(arg2, &ts);
12195          }
12196          return ret;
12197      }
12198  #endif
12199  #ifdef TARGET_NR_clock_gettime64
12200      case TARGET_NR_clock_gettime64:
12201      {
12202          struct timespec ts;
12203          ret = get_errno(clock_gettime(arg1, &ts));
12204          if (!is_error(ret)) {
12205              ret = host_to_target_timespec64(arg2, &ts);
12206          }
12207          return ret;
12208      }
12209  #endif
12210  #ifdef TARGET_NR_clock_getres
12211      case TARGET_NR_clock_getres:
12212      {
12213          struct timespec ts;
12214          ret = get_errno(clock_getres(arg1, &ts));
12215          if (!is_error(ret)) {
12216              host_to_target_timespec(arg2, &ts);
12217          }
12218          return ret;
12219      }
12220  #endif
12221  #ifdef TARGET_NR_clock_getres_time64
12222      case TARGET_NR_clock_getres_time64:
12223      {
12224          struct timespec ts;
12225          ret = get_errno(clock_getres(arg1, &ts));
12226          if (!is_error(ret)) {
12227              host_to_target_timespec64(arg2, &ts);
12228          }
12229          return ret;
12230      }
12231  #endif
12232  #ifdef TARGET_NR_clock_nanosleep
12233      case TARGET_NR_clock_nanosleep:
12234      {
12235          struct timespec ts;
12236          if (target_to_host_timespec(&ts, arg3)) {
12237              return -TARGET_EFAULT;
12238          }
12239          ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12240                                               &ts, arg4 ? &ts : NULL));
12241          /*
12242           * if the call is interrupted by a signal handler, it fails
12243           * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12244           * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12245           */
12246          if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12247              host_to_target_timespec(arg4, &ts)) {
12248                return -TARGET_EFAULT;
12249          }
12250  
12251          return ret;
12252      }
12253  #endif
12254  #ifdef TARGET_NR_clock_nanosleep_time64
12255      case TARGET_NR_clock_nanosleep_time64:
12256      {
12257          struct timespec ts;
12258  
12259          if (target_to_host_timespec64(&ts, arg3)) {
12260              return -TARGET_EFAULT;
12261          }
12262  
12263          ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12264                                               &ts, arg4 ? &ts : NULL));
12265  
12266          if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12267              host_to_target_timespec64(arg4, &ts)) {
12268              return -TARGET_EFAULT;
12269          }
12270          return ret;
12271      }
12272  #endif
12273  
12274  #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12275      case TARGET_NR_set_tid_address:
12276          return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12277  #endif
12278  
12279      case TARGET_NR_tkill:
12280          return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12281  
12282      case TARGET_NR_tgkill:
12283          return get_errno(safe_tgkill((int)arg1, (int)arg2,
12284                           target_to_host_signal(arg3)));
12285  
12286  #ifdef TARGET_NR_set_robust_list
12287      case TARGET_NR_set_robust_list:
12288      case TARGET_NR_get_robust_list:
12289          /* The ABI for supporting robust futexes has userspace pass
12290           * the kernel a pointer to a linked list which is updated by
12291           * userspace after the syscall; the list is walked by the kernel
12292           * when the thread exits. Since the linked list in QEMU guest
12293           * memory isn't a valid linked list for the host and we have
12294           * no way to reliably intercept the thread-death event, we can't
12295           * support these. Silently return ENOSYS so that guest userspace
12296           * falls back to a non-robust futex implementation (which should
12297           * be OK except in the corner case of the guest crashing while
12298           * holding a mutex that is shared with another process via
12299           * shared memory).
12300           */
12301          return -TARGET_ENOSYS;
12302  #endif
12303  
12304  #if defined(TARGET_NR_utimensat)
12305      case TARGET_NR_utimensat:
12306          {
12307              struct timespec *tsp, ts[2];
12308              if (!arg3) {
12309                  tsp = NULL;
12310              } else {
12311                  if (target_to_host_timespec(ts, arg3)) {
12312                      return -TARGET_EFAULT;
12313                  }
12314                  if (target_to_host_timespec(ts + 1, arg3 +
12315                                              sizeof(struct target_timespec))) {
12316                      return -TARGET_EFAULT;
12317                  }
12318                  tsp = ts;
12319              }
12320              if (!arg2)
12321                  ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12322              else {
12323                  if (!(p = lock_user_string(arg2))) {
12324                      return -TARGET_EFAULT;
12325                  }
12326                  ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12327                  unlock_user(p, arg2, 0);
12328              }
12329          }
12330          return ret;
12331  #endif
12332  #ifdef TARGET_NR_utimensat_time64
12333      case TARGET_NR_utimensat_time64:
12334          {
12335              struct timespec *tsp, ts[2];
12336              if (!arg3) {
12337                  tsp = NULL;
12338              } else {
12339                  if (target_to_host_timespec64(ts, arg3)) {
12340                      return -TARGET_EFAULT;
12341                  }
12342                  if (target_to_host_timespec64(ts + 1, arg3 +
12343                                       sizeof(struct target__kernel_timespec))) {
12344                      return -TARGET_EFAULT;
12345                  }
12346                  tsp = ts;
12347              }
12348              if (!arg2)
12349                  ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12350              else {
12351                  p = lock_user_string(arg2);
12352                  if (!p) {
12353                      return -TARGET_EFAULT;
12354                  }
12355                  ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12356                  unlock_user(p, arg2, 0);
12357              }
12358          }
12359          return ret;
12360  #endif
12361  #ifdef TARGET_NR_futex
12362      case TARGET_NR_futex:
12363          return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12364  #endif
12365  #ifdef TARGET_NR_futex_time64
12366      case TARGET_NR_futex_time64:
12367          return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12368  #endif
12369  #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12370      case TARGET_NR_inotify_init:
12371          ret = get_errno(sys_inotify_init());
12372          if (ret >= 0) {
12373              fd_trans_register(ret, &target_inotify_trans);
12374          }
12375          return ret;
12376  #endif
12377  #ifdef CONFIG_INOTIFY1
12378  #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12379      case TARGET_NR_inotify_init1:
12380          ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12381                                            fcntl_flags_tbl)));
12382          if (ret >= 0) {
12383              fd_trans_register(ret, &target_inotify_trans);
12384          }
12385          return ret;
12386  #endif
12387  #endif
12388  #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12389      case TARGET_NR_inotify_add_watch:
12390          p = lock_user_string(arg2);
12391          ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12392          unlock_user(p, arg2, 0);
12393          return ret;
12394  #endif
12395  #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12396      case TARGET_NR_inotify_rm_watch:
12397          return get_errno(sys_inotify_rm_watch(arg1, arg2));
12398  #endif
12399  
12400  #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12401      case TARGET_NR_mq_open:
12402          {
12403              struct mq_attr posix_mq_attr;
12404              struct mq_attr *pposix_mq_attr;
12405              int host_flags;
12406  
12407              host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12408              pposix_mq_attr = NULL;
12409              if (arg4) {
12410                  if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12411                      return -TARGET_EFAULT;
12412                  }
12413                  pposix_mq_attr = &posix_mq_attr;
12414              }
12415              p = lock_user_string(arg1 - 1);
12416              if (!p) {
12417                  return -TARGET_EFAULT;
12418              }
12419              ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12420              unlock_user (p, arg1, 0);
12421          }
12422          return ret;
12423  
12424      case TARGET_NR_mq_unlink:
12425          p = lock_user_string(arg1 - 1);
12426          if (!p) {
12427              return -TARGET_EFAULT;
12428          }
12429          ret = get_errno(mq_unlink(p));
12430          unlock_user (p, arg1, 0);
12431          return ret;
12432  
12433  #ifdef TARGET_NR_mq_timedsend
12434      case TARGET_NR_mq_timedsend:
12435          {
12436              struct timespec ts;
12437  
12438              p = lock_user (VERIFY_READ, arg2, arg3, 1);
12439              if (arg5 != 0) {
12440                  if (target_to_host_timespec(&ts, arg5)) {
12441                      return -TARGET_EFAULT;
12442                  }
12443                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12444                  if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12445                      return -TARGET_EFAULT;
12446                  }
12447              } else {
12448                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12449              }
12450              unlock_user (p, arg2, arg3);
12451          }
12452          return ret;
12453  #endif
12454  #ifdef TARGET_NR_mq_timedsend_time64
12455      case TARGET_NR_mq_timedsend_time64:
12456          {
12457              struct timespec ts;
12458  
12459              p = lock_user(VERIFY_READ, arg2, arg3, 1);
12460              if (arg5 != 0) {
12461                  if (target_to_host_timespec64(&ts, arg5)) {
12462                      return -TARGET_EFAULT;
12463                  }
12464                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12465                  if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12466                      return -TARGET_EFAULT;
12467                  }
12468              } else {
12469                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12470              }
12471              unlock_user(p, arg2, arg3);
12472          }
12473          return ret;
12474  #endif
12475  
12476  #ifdef TARGET_NR_mq_timedreceive
12477      case TARGET_NR_mq_timedreceive:
12478          {
12479              struct timespec ts;
12480              unsigned int prio;
12481  
12482              p = lock_user (VERIFY_READ, arg2, arg3, 1);
12483              if (arg5 != 0) {
12484                  if (target_to_host_timespec(&ts, arg5)) {
12485                      return -TARGET_EFAULT;
12486                  }
12487                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12488                                                       &prio, &ts));
12489                  if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12490                      return -TARGET_EFAULT;
12491                  }
12492              } else {
12493                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12494                                                       &prio, NULL));
12495              }
12496              unlock_user (p, arg2, arg3);
12497              if (arg4 != 0)
12498                  put_user_u32(prio, arg4);
12499          }
12500          return ret;
12501  #endif
12502  #ifdef TARGET_NR_mq_timedreceive_time64
12503      case TARGET_NR_mq_timedreceive_time64:
12504          {
12505              struct timespec ts;
12506              unsigned int prio;
12507  
12508              p = lock_user(VERIFY_READ, arg2, arg3, 1);
12509              if (arg5 != 0) {
12510                  if (target_to_host_timespec64(&ts, arg5)) {
12511                      return -TARGET_EFAULT;
12512                  }
12513                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12514                                                       &prio, &ts));
12515                  if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12516                      return -TARGET_EFAULT;
12517                  }
12518              } else {
12519                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12520                                                       &prio, NULL));
12521              }
12522              unlock_user(p, arg2, arg3);
12523              if (arg4 != 0) {
12524                  put_user_u32(prio, arg4);
12525              }
12526          }
12527          return ret;
12528  #endif
12529  
12530      /* Not implemented for now... */
12531  /*     case TARGET_NR_mq_notify: */
12532  /*         break; */
12533  
12534      case TARGET_NR_mq_getsetattr:
12535          {
12536              struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12537              ret = 0;
12538              if (arg2 != 0) {
12539                  copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12540                  ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12541                                             &posix_mq_attr_out));
12542              } else if (arg3 != 0) {
12543                  ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12544              }
12545              if (ret == 0 && arg3 != 0) {
12546                  copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12547              }
12548          }
12549          return ret;
12550  #endif
12551  
12552  #ifdef CONFIG_SPLICE
12553  #ifdef TARGET_NR_tee
12554      case TARGET_NR_tee:
12555          {
12556              ret = get_errno(tee(arg1,arg2,arg3,arg4));
12557          }
12558          return ret;
12559  #endif
12560  #ifdef TARGET_NR_splice
12561      case TARGET_NR_splice:
12562          {
12563              loff_t loff_in, loff_out;
12564              loff_t *ploff_in = NULL, *ploff_out = NULL;
12565              if (arg2) {
12566                  if (get_user_u64(loff_in, arg2)) {
12567                      return -TARGET_EFAULT;
12568                  }
12569                  ploff_in = &loff_in;
12570              }
12571              if (arg4) {
12572                  if (get_user_u64(loff_out, arg4)) {
12573                      return -TARGET_EFAULT;
12574                  }
12575                  ploff_out = &loff_out;
12576              }
12577              ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12578              if (arg2) {
12579                  if (put_user_u64(loff_in, arg2)) {
12580                      return -TARGET_EFAULT;
12581                  }
12582              }
12583              if (arg4) {
12584                  if (put_user_u64(loff_out, arg4)) {
12585                      return -TARGET_EFAULT;
12586                  }
12587              }
12588          }
12589          return ret;
12590  #endif
12591  #ifdef TARGET_NR_vmsplice
12592  	case TARGET_NR_vmsplice:
12593          {
12594              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12595              if (vec != NULL) {
12596                  ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12597                  unlock_iovec(vec, arg2, arg3, 0);
12598              } else {
12599                  ret = -host_to_target_errno(errno);
12600              }
12601          }
12602          return ret;
12603  #endif
12604  #endif /* CONFIG_SPLICE */
12605  #ifdef CONFIG_EVENTFD
12606  #if defined(TARGET_NR_eventfd)
12607      case TARGET_NR_eventfd:
12608          ret = get_errno(eventfd(arg1, 0));
12609          if (ret >= 0) {
12610              fd_trans_register(ret, &target_eventfd_trans);
12611          }
12612          return ret;
12613  #endif
12614  #if defined(TARGET_NR_eventfd2)
12615      case TARGET_NR_eventfd2:
12616      {
12617          int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12618          if (arg2 & TARGET_O_NONBLOCK) {
12619              host_flags |= O_NONBLOCK;
12620          }
12621          if (arg2 & TARGET_O_CLOEXEC) {
12622              host_flags |= O_CLOEXEC;
12623          }
12624          ret = get_errno(eventfd(arg1, host_flags));
12625          if (ret >= 0) {
12626              fd_trans_register(ret, &target_eventfd_trans);
12627          }
12628          return ret;
12629      }
12630  #endif
12631  #endif /* CONFIG_EVENTFD  */
12632  #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12633      case TARGET_NR_fallocate:
12634  #if TARGET_ABI_BITS == 32
12635          ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12636                                    target_offset64(arg5, arg6)));
12637  #else
12638          ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12639  #endif
12640          return ret;
12641  #endif
12642  #if defined(CONFIG_SYNC_FILE_RANGE)
12643  #if defined(TARGET_NR_sync_file_range)
12644      case TARGET_NR_sync_file_range:
12645  #if TARGET_ABI_BITS == 32
12646  #if defined(TARGET_MIPS)
12647          ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12648                                          target_offset64(arg5, arg6), arg7));
12649  #else
12650          ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12651                                          target_offset64(arg4, arg5), arg6));
12652  #endif /* !TARGET_MIPS */
12653  #else
12654          ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12655  #endif
12656          return ret;
12657  #endif
12658  #if defined(TARGET_NR_sync_file_range2) || \
12659      defined(TARGET_NR_arm_sync_file_range)
12660  #if defined(TARGET_NR_sync_file_range2)
12661      case TARGET_NR_sync_file_range2:
12662  #endif
12663  #if defined(TARGET_NR_arm_sync_file_range)
12664      case TARGET_NR_arm_sync_file_range:
12665  #endif
12666          /* This is like sync_file_range but the arguments are reordered */
12667  #if TARGET_ABI_BITS == 32
12668          ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12669                                          target_offset64(arg5, arg6), arg2));
12670  #else
12671          ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12672  #endif
12673          return ret;
12674  #endif
12675  #endif
12676  #if defined(TARGET_NR_signalfd4)
12677      case TARGET_NR_signalfd4:
12678          return do_signalfd4(arg1, arg2, arg4);
12679  #endif
12680  #if defined(TARGET_NR_signalfd)
12681      case TARGET_NR_signalfd:
12682          return do_signalfd4(arg1, arg2, 0);
12683  #endif
12684  #if defined(CONFIG_EPOLL)
12685  #if defined(TARGET_NR_epoll_create)
12686      case TARGET_NR_epoll_create:
12687          return get_errno(epoll_create(arg1));
12688  #endif
12689  #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12690      case TARGET_NR_epoll_create1:
12691          return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12692  #endif
12693  #if defined(TARGET_NR_epoll_ctl)
12694      case TARGET_NR_epoll_ctl:
12695      {
12696          struct epoll_event ep;
12697          struct epoll_event *epp = 0;
12698          if (arg4) {
12699              if (arg2 != EPOLL_CTL_DEL) {
12700                  struct target_epoll_event *target_ep;
12701                  if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12702                      return -TARGET_EFAULT;
12703                  }
12704                  ep.events = tswap32(target_ep->events);
12705                  /*
12706                   * The epoll_data_t union is just opaque data to the kernel,
12707                   * so we transfer all 64 bits across and need not worry what
12708                   * actual data type it is.
12709                   */
12710                  ep.data.u64 = tswap64(target_ep->data.u64);
12711                  unlock_user_struct(target_ep, arg4, 0);
12712              }
12713              /*
12714               * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12715               * non-null pointer, even though this argument is ignored.
12716               *
12717               */
12718              epp = &ep;
12719          }
12720          return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12721      }
12722  #endif
12723  
12724  #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12725  #if defined(TARGET_NR_epoll_wait)
12726      case TARGET_NR_epoll_wait:
12727  #endif
12728  #if defined(TARGET_NR_epoll_pwait)
12729      case TARGET_NR_epoll_pwait:
12730  #endif
12731      {
12732          struct target_epoll_event *target_ep;
12733          struct epoll_event *ep;
12734          int epfd = arg1;
12735          int maxevents = arg3;
12736          int timeout = arg4;
12737  
12738          if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12739              return -TARGET_EINVAL;
12740          }
12741  
12742          target_ep = lock_user(VERIFY_WRITE, arg2,
12743                                maxevents * sizeof(struct target_epoll_event), 1);
12744          if (!target_ep) {
12745              return -TARGET_EFAULT;
12746          }
12747  
12748          ep = g_try_new(struct epoll_event, maxevents);
12749          if (!ep) {
12750              unlock_user(target_ep, arg2, 0);
12751              return -TARGET_ENOMEM;
12752          }
12753  
12754          switch (num) {
12755  #if defined(TARGET_NR_epoll_pwait)
12756          case TARGET_NR_epoll_pwait:
12757          {
12758              target_sigset_t *target_set;
12759              sigset_t _set, *set = &_set;
12760  
12761              if (arg5) {
12762                  if (arg6 != sizeof(target_sigset_t)) {
12763                      ret = -TARGET_EINVAL;
12764                      break;
12765                  }
12766  
12767                  target_set = lock_user(VERIFY_READ, arg5,
12768                                         sizeof(target_sigset_t), 1);
12769                  if (!target_set) {
12770                      ret = -TARGET_EFAULT;
12771                      break;
12772                  }
12773                  target_to_host_sigset(set, target_set);
12774                  unlock_user(target_set, arg5, 0);
12775              } else {
12776                  set = NULL;
12777              }
12778  
12779              ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12780                                               set, SIGSET_T_SIZE));
12781              break;
12782          }
12783  #endif
12784  #if defined(TARGET_NR_epoll_wait)
12785          case TARGET_NR_epoll_wait:
12786              ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12787                                               NULL, 0));
12788              break;
12789  #endif
12790          default:
12791              ret = -TARGET_ENOSYS;
12792          }
12793          if (!is_error(ret)) {
12794              int i;
12795              for (i = 0; i < ret; i++) {
12796                  target_ep[i].events = tswap32(ep[i].events);
12797                  target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12798              }
12799              unlock_user(target_ep, arg2,
12800                          ret * sizeof(struct target_epoll_event));
12801          } else {
12802              unlock_user(target_ep, arg2, 0);
12803          }
12804          g_free(ep);
12805          return ret;
12806      }
12807  #endif
12808  #endif
12809  #ifdef TARGET_NR_prlimit64
12810      case TARGET_NR_prlimit64:
12811      {
12812          /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12813          struct target_rlimit64 *target_rnew, *target_rold;
12814          struct host_rlimit64 rnew, rold, *rnewp = 0;
12815          int resource = target_to_host_resource(arg2);
12816  
12817          if (arg3 && (resource != RLIMIT_AS &&
12818                       resource != RLIMIT_DATA &&
12819                       resource != RLIMIT_STACK)) {
12820              if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12821                  return -TARGET_EFAULT;
12822              }
12823              rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12824              rnew.rlim_max = tswap64(target_rnew->rlim_max);
12825              unlock_user_struct(target_rnew, arg3, 0);
12826              rnewp = &rnew;
12827          }
12828  
12829          ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12830          if (!is_error(ret) && arg4) {
12831              if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12832                  return -TARGET_EFAULT;
12833              }
12834              target_rold->rlim_cur = tswap64(rold.rlim_cur);
12835              target_rold->rlim_max = tswap64(rold.rlim_max);
12836              unlock_user_struct(target_rold, arg4, 1);
12837          }
12838          return ret;
12839      }
12840  #endif
12841  #ifdef TARGET_NR_gethostname
12842      case TARGET_NR_gethostname:
12843      {
12844          char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12845          if (name) {
12846              ret = get_errno(gethostname(name, arg2));
12847              unlock_user(name, arg1, arg2);
12848          } else {
12849              ret = -TARGET_EFAULT;
12850          }
12851          return ret;
12852      }
12853  #endif
12854  #ifdef TARGET_NR_atomic_cmpxchg_32
12855      case TARGET_NR_atomic_cmpxchg_32:
12856      {
12857          /* should use start_exclusive from main.c */
12858          abi_ulong mem_value;
12859          if (get_user_u32(mem_value, arg6)) {
12860              target_siginfo_t info;
12861              info.si_signo = SIGSEGV;
12862              info.si_errno = 0;
12863              info.si_code = TARGET_SEGV_MAPERR;
12864              info._sifields._sigfault._addr = arg6;
12865              queue_signal((CPUArchState *)cpu_env, info.si_signo,
12866                           QEMU_SI_FAULT, &info);
12867              ret = 0xdeadbeef;
12868  
12869          }
12870          if (mem_value == arg2)
12871              put_user_u32(arg1, arg6);
12872          return mem_value;
12873      }
12874  #endif
12875  #ifdef TARGET_NR_atomic_barrier
12876      case TARGET_NR_atomic_barrier:
12877          /* Like the kernel implementation and the
12878             qemu arm barrier, no-op this? */
12879          return 0;
12880  #endif
12881  
12882  #ifdef TARGET_NR_timer_create
12883      case TARGET_NR_timer_create:
12884      {
12885          /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12886  
12887          struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12888  
12889          int clkid = arg1;
12890          int timer_index = next_free_host_timer();
12891  
12892          if (timer_index < 0) {
12893              ret = -TARGET_EAGAIN;
12894          } else {
12895              timer_t *phtimer = g_posix_timers  + timer_index;
12896  
12897              if (arg2) {
12898                  phost_sevp = &host_sevp;
12899                  ret = target_to_host_sigevent(phost_sevp, arg2);
12900                  if (ret != 0) {
12901                      return ret;
12902                  }
12903              }
12904  
12905              ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12906              if (ret) {
12907                  phtimer = NULL;
12908              } else {
12909                  if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12910                      return -TARGET_EFAULT;
12911                  }
12912              }
12913          }
12914          return ret;
12915      }
12916  #endif
12917  
12918  #ifdef TARGET_NR_timer_settime
12919      case TARGET_NR_timer_settime:
12920      {
12921          /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12922           * struct itimerspec * old_value */
12923          target_timer_t timerid = get_timer_id(arg1);
12924  
12925          if (timerid < 0) {
12926              ret = timerid;
12927          } else if (arg3 == 0) {
12928              ret = -TARGET_EINVAL;
12929          } else {
12930              timer_t htimer = g_posix_timers[timerid];
12931              struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12932  
12933              if (target_to_host_itimerspec(&hspec_new, arg3)) {
12934                  return -TARGET_EFAULT;
12935              }
12936              ret = get_errno(
12937                            timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12938              if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12939                  return -TARGET_EFAULT;
12940              }
12941          }
12942          return ret;
12943      }
12944  #endif
12945  
12946  #ifdef TARGET_NR_timer_settime64
12947      case TARGET_NR_timer_settime64:
12948      {
12949          target_timer_t timerid = get_timer_id(arg1);
12950  
12951          if (timerid < 0) {
12952              ret = timerid;
12953          } else if (arg3 == 0) {
12954              ret = -TARGET_EINVAL;
12955          } else {
12956              timer_t htimer = g_posix_timers[timerid];
12957              struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12958  
12959              if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12960                  return -TARGET_EFAULT;
12961              }
12962              ret = get_errno(
12963                            timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12964              if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12965                  return -TARGET_EFAULT;
12966              }
12967          }
12968          return ret;
12969      }
12970  #endif
12971  
12972  #ifdef TARGET_NR_timer_gettime
12973      case TARGET_NR_timer_gettime:
12974      {
12975          /* args: timer_t timerid, struct itimerspec *curr_value */
12976          target_timer_t timerid = get_timer_id(arg1);
12977  
12978          if (timerid < 0) {
12979              ret = timerid;
12980          } else if (!arg2) {
12981              ret = -TARGET_EFAULT;
12982          } else {
12983              timer_t htimer = g_posix_timers[timerid];
12984              struct itimerspec hspec;
12985              ret = get_errno(timer_gettime(htimer, &hspec));
12986  
12987              if (host_to_target_itimerspec(arg2, &hspec)) {
12988                  ret = -TARGET_EFAULT;
12989              }
12990          }
12991          return ret;
12992      }
12993  #endif
12994  
12995  #ifdef TARGET_NR_timer_gettime64
12996      case TARGET_NR_timer_gettime64:
12997      {
12998          /* args: timer_t timerid, struct itimerspec64 *curr_value */
12999          target_timer_t timerid = get_timer_id(arg1);
13000  
13001          if (timerid < 0) {
13002              ret = timerid;
13003          } else if (!arg2) {
13004              ret = -TARGET_EFAULT;
13005          } else {
13006              timer_t htimer = g_posix_timers[timerid];
13007              struct itimerspec hspec;
13008              ret = get_errno(timer_gettime(htimer, &hspec));
13009  
13010              if (host_to_target_itimerspec64(arg2, &hspec)) {
13011                  ret = -TARGET_EFAULT;
13012              }
13013          }
13014          return ret;
13015      }
13016  #endif
13017  
13018  #ifdef TARGET_NR_timer_getoverrun
13019      case TARGET_NR_timer_getoverrun:
13020      {
13021          /* args: timer_t timerid */
13022          target_timer_t timerid = get_timer_id(arg1);
13023  
13024          if (timerid < 0) {
13025              ret = timerid;
13026          } else {
13027              timer_t htimer = g_posix_timers[timerid];
13028              ret = get_errno(timer_getoverrun(htimer));
13029          }
13030          return ret;
13031      }
13032  #endif
13033  
13034  #ifdef TARGET_NR_timer_delete
13035      case TARGET_NR_timer_delete:
13036      {
13037          /* args: timer_t timerid */
13038          target_timer_t timerid = get_timer_id(arg1);
13039  
13040          if (timerid < 0) {
13041              ret = timerid;
13042          } else {
13043              timer_t htimer = g_posix_timers[timerid];
13044              ret = get_errno(timer_delete(htimer));
13045              g_posix_timers[timerid] = 0;
13046          }
13047          return ret;
13048      }
13049  #endif
13050  
13051  #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13052      case TARGET_NR_timerfd_create:
13053          return get_errno(timerfd_create(arg1,
13054                            target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13055  #endif
13056  
13057  #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13058      case TARGET_NR_timerfd_gettime:
13059          {
13060              struct itimerspec its_curr;
13061  
13062              ret = get_errno(timerfd_gettime(arg1, &its_curr));
13063  
13064              if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13065                  return -TARGET_EFAULT;
13066              }
13067          }
13068          return ret;
13069  #endif
13070  
13071  #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13072      case TARGET_NR_timerfd_gettime64:
13073          {
13074              struct itimerspec its_curr;
13075  
13076              ret = get_errno(timerfd_gettime(arg1, &its_curr));
13077  
13078              if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13079                  return -TARGET_EFAULT;
13080              }
13081          }
13082          return ret;
13083  #endif
13084  
13085  #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13086      case TARGET_NR_timerfd_settime:
13087          {
13088              struct itimerspec its_new, its_old, *p_new;
13089  
13090              if (arg3) {
13091                  if (target_to_host_itimerspec(&its_new, arg3)) {
13092                      return -TARGET_EFAULT;
13093                  }
13094                  p_new = &its_new;
13095              } else {
13096                  p_new = NULL;
13097              }
13098  
13099              ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13100  
13101              if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13102                  return -TARGET_EFAULT;
13103              }
13104          }
13105          return ret;
13106  #endif
13107  
13108  #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13109      case TARGET_NR_timerfd_settime64:
13110          {
13111              struct itimerspec its_new, its_old, *p_new;
13112  
13113              if (arg3) {
13114                  if (target_to_host_itimerspec64(&its_new, arg3)) {
13115                      return -TARGET_EFAULT;
13116                  }
13117                  p_new = &its_new;
13118              } else {
13119                  p_new = NULL;
13120              }
13121  
13122              ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13123  
13124              if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13125                  return -TARGET_EFAULT;
13126              }
13127          }
13128          return ret;
13129  #endif
13130  
13131  #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13132      case TARGET_NR_ioprio_get:
13133          return get_errno(ioprio_get(arg1, arg2));
13134  #endif
13135  
13136  #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13137      case TARGET_NR_ioprio_set:
13138          return get_errno(ioprio_set(arg1, arg2, arg3));
13139  #endif
13140  
13141  #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13142      case TARGET_NR_setns:
13143          return get_errno(setns(arg1, arg2));
13144  #endif
13145  #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13146      case TARGET_NR_unshare:
13147          return get_errno(unshare(arg1));
13148  #endif
13149  #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13150      case TARGET_NR_kcmp:
13151          return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13152  #endif
13153  #ifdef TARGET_NR_swapcontext
13154      case TARGET_NR_swapcontext:
13155          /* PowerPC specific.  */
13156          return do_swapcontext(cpu_env, arg1, arg2, arg3);
13157  #endif
13158  #ifdef TARGET_NR_memfd_create
13159      case TARGET_NR_memfd_create:
13160          p = lock_user_string(arg1);
13161          if (!p) {
13162              return -TARGET_EFAULT;
13163          }
13164          ret = get_errno(memfd_create(p, arg2));
13165          fd_trans_unregister(ret);
13166          unlock_user(p, arg1, 0);
13167          return ret;
13168  #endif
13169  #if defined TARGET_NR_membarrier && defined __NR_membarrier
13170      case TARGET_NR_membarrier:
13171          return get_errno(membarrier(arg1, arg2));
13172  #endif
13173  
13174  #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13175      case TARGET_NR_copy_file_range:
13176          {
13177              loff_t inoff, outoff;
13178              loff_t *pinoff = NULL, *poutoff = NULL;
13179  
13180              if (arg2) {
13181                  if (get_user_u64(inoff, arg2)) {
13182                      return -TARGET_EFAULT;
13183                  }
13184                  pinoff = &inoff;
13185              }
13186              if (arg4) {
13187                  if (get_user_u64(outoff, arg4)) {
13188                      return -TARGET_EFAULT;
13189                  }
13190                  poutoff = &outoff;
13191              }
13192              /* Do not sign-extend the count parameter. */
13193              ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13194                                                   (abi_ulong)arg5, arg6));
13195              if (!is_error(ret) && ret > 0) {
13196                  if (arg2) {
13197                      if (put_user_u64(inoff, arg2)) {
13198                          return -TARGET_EFAULT;
13199                      }
13200                  }
13201                  if (arg4) {
13202                      if (put_user_u64(outoff, arg4)) {
13203                          return -TARGET_EFAULT;
13204                      }
13205                  }
13206              }
13207          }
13208          return ret;
13209  #endif
13210  
13211      default:
13212          qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13213          return -TARGET_ENOSYS;
13214      }
13215      return ret;
13216  }
13217  
13218  abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13219                      abi_long arg2, abi_long arg3, abi_long arg4,
13220                      abi_long arg5, abi_long arg6, abi_long arg7,
13221                      abi_long arg8)
13222  {
13223      CPUState *cpu = env_cpu(cpu_env);
13224      abi_long ret;
13225  
13226  #ifdef DEBUG_ERESTARTSYS
13227      /* Debug-only code for exercising the syscall-restart code paths
13228       * in the per-architecture cpu main loops: restart every syscall
13229       * the guest makes once before letting it through.
13230       */
13231      {
13232          static bool flag;
13233          flag = !flag;
13234          if (flag) {
13235              return -TARGET_ERESTARTSYS;
13236          }
13237      }
13238  #endif
13239  
13240      record_syscall_start(cpu, num, arg1,
13241                           arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13242  
13243      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13244          print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13245      }
13246  
13247      ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13248                        arg5, arg6, arg7, arg8);
13249  
13250      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13251          print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13252                            arg3, arg4, arg5, arg6);
13253      }
13254  
13255      record_syscall_return(cpu, num, ret);
13256      return ret;
13257  }
13258