xref: /openbmc/qemu/linux-user/syscall.c (revision 8417e1378cadb8928c24755a95ff267def53922f)
1  /*
2   *  Linux syscalls
3   *
4   *  Copyright (c) 2003 Fabrice Bellard
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
8   *  the Free Software Foundation; either version 2 of the License, or
9   *  (at your option) any later version.
10   *
11   *  This program is distributed in the hope that it will be useful,
12   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   *  GNU General Public License for more details.
15   *
16   *  You should have received a copy of the GNU General Public License
17   *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18   */
19  #define _ATFILE_SOURCE
20  #include "qemu/osdep.h"
21  #include "qemu/cutils.h"
22  #include "qemu/path.h"
23  #include "qemu/memfd.h"
24  #include "qemu/queue.h"
25  #include <elf.h>
26  #include <endian.h>
27  #include <grp.h>
28  #include <sys/ipc.h>
29  #include <sys/msg.h>
30  #include <sys/wait.h>
31  #include <sys/mount.h>
32  #include <sys/file.h>
33  #include <sys/fsuid.h>
34  #include <sys/personality.h>
35  #include <sys/prctl.h>
36  #include <sys/resource.h>
37  #include <sys/swap.h>
38  #include <linux/capability.h>
39  #include <sched.h>
40  #include <sys/timex.h>
41  #include <sys/socket.h>
42  #include <linux/sockios.h>
43  #include <sys/un.h>
44  #include <sys/uio.h>
45  #include <poll.h>
46  #include <sys/times.h>
47  #include <sys/shm.h>
48  #include <sys/sem.h>
49  #include <sys/statfs.h>
50  #include <utime.h>
51  #include <sys/sysinfo.h>
52  #include <sys/signalfd.h>
53  //#include <sys/user.h>
54  #include <netinet/in.h>
55  #include <netinet/ip.h>
56  #include <netinet/tcp.h>
57  #include <netinet/udp.h>
58  #include <linux/wireless.h>
59  #include <linux/icmp.h>
60  #include <linux/icmpv6.h>
61  #include <linux/if_tun.h>
62  #include <linux/in6.h>
63  #include <linux/errqueue.h>
64  #include <linux/random.h>
65  #ifdef CONFIG_TIMERFD
66  #include <sys/timerfd.h>
67  #endif
68  #ifdef CONFIG_EVENTFD
69  #include <sys/eventfd.h>
70  #endif
71  #ifdef CONFIG_EPOLL
72  #include <sys/epoll.h>
73  #endif
74  #ifdef CONFIG_ATTR
75  #include "qemu/xattr.h"
76  #endif
77  #ifdef CONFIG_SENDFILE
78  #include <sys/sendfile.h>
79  #endif
80  #ifdef HAVE_SYS_KCOV_H
81  #include <sys/kcov.h>
82  #endif
83  
84  #define termios host_termios
85  #define winsize host_winsize
86  #define termio host_termio
87  #define sgttyb host_sgttyb /* same as target */
88  #define tchars host_tchars /* same as target */
89  #define ltchars host_ltchars /* same as target */
90  
91  #include <linux/termios.h>
92  #include <linux/unistd.h>
93  #include <linux/cdrom.h>
94  #include <linux/hdreg.h>
95  #include <linux/soundcard.h>
96  #include <linux/kd.h>
97  #include <linux/mtio.h>
98  #include <linux/fs.h>
99  #include <linux/fd.h>
100  #if defined(CONFIG_FIEMAP)
101  #include <linux/fiemap.h>
102  #endif
103  #include <linux/fb.h>
104  #if defined(CONFIG_USBFS)
105  #include <linux/usbdevice_fs.h>
106  #include <linux/usb/ch9.h>
107  #endif
108  #include <linux/vt.h>
109  #include <linux/dm-ioctl.h>
110  #include <linux/reboot.h>
111  #include <linux/route.h>
112  #include <linux/filter.h>
113  #include <linux/blkpg.h>
114  #include <netpacket/packet.h>
115  #include <linux/netlink.h>
116  #include <linux/if_alg.h>
117  #include <linux/rtc.h>
118  #include <sound/asound.h>
119  #ifdef HAVE_BTRFS_H
120  #include <linux/btrfs.h>
121  #endif
122  #ifdef HAVE_DRM_H
123  #include <libdrm/drm.h>
124  #include <libdrm/i915_drm.h>
125  #endif
126  #include "linux_loop.h"
127  #include "uname.h"
128  
129  #include "qemu.h"
130  #include "qemu/guest-random.h"
131  #include "qemu/selfmap.h"
132  #include "user/syscall-trace.h"
133  #include "qapi/error.h"
134  #include "fd-trans.h"
135  #include "tcg/tcg.h"
136  
137  #ifndef CLONE_IO
138  #define CLONE_IO                0x80000000      /* Clone io context */
139  #endif
140  
141  /* We can't directly call the host clone syscall, because this will
142   * badly confuse libc (breaking mutexes, for example). So we must
143   * divide clone flags into:
144   *  * flag combinations that look like pthread_create()
145   *  * flag combinations that look like fork()
146   *  * flags we can implement within QEMU itself
147   *  * flags we can't support and will return an error for
148   */
149  /* For thread creation, all these flags must be present; for
150   * fork, none must be present.
151   */
152  #define CLONE_THREAD_FLAGS                              \
153      (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154       CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155  
156  /* These flags are ignored:
157   * CLONE_DETACHED is now ignored by the kernel;
158   * CLONE_IO is just an optimisation hint to the I/O scheduler
159   */
160  #define CLONE_IGNORED_FLAGS                     \
161      (CLONE_DETACHED | CLONE_IO)
162  
163  /* Flags for fork which we can implement within QEMU itself */
164  #define CLONE_OPTIONAL_FORK_FLAGS               \
165      (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166       CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167  
168  /* Flags for thread creation which we can implement within QEMU itself */
169  #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170      (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171       CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172  
173  #define CLONE_INVALID_FORK_FLAGS                                        \
174      (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175  
176  #define CLONE_INVALID_THREAD_FLAGS                                      \
177      (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178         CLONE_IGNORED_FLAGS))
179  
180  /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181   * have almost all been allocated. We cannot support any of
182   * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183   * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184   * The checks against the invalid thread masks above will catch these.
185   * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186   */
187  
188  /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189   * once. This exercises the codepaths for restart.
190   */
191  //#define DEBUG_ERESTARTSYS
192  
193  //#include <linux/msdos_fs.h>
194  #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195  #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196  
197  #undef _syscall0
198  #undef _syscall1
199  #undef _syscall2
200  #undef _syscall3
201  #undef _syscall4
202  #undef _syscall5
203  #undef _syscall6
204  
205  #define _syscall0(type,name)		\
206  static type name (void)			\
207  {					\
208  	return syscall(__NR_##name);	\
209  }
210  
211  #define _syscall1(type,name,type1,arg1)		\
212  static type name (type1 arg1)			\
213  {						\
214  	return syscall(__NR_##name, arg1);	\
215  }
216  
217  #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218  static type name (type1 arg1,type2 arg2)		\
219  {							\
220  	return syscall(__NR_##name, arg1, arg2);	\
221  }
222  
223  #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224  static type name (type1 arg1,type2 arg2,type3 arg3)		\
225  {								\
226  	return syscall(__NR_##name, arg1, arg2, arg3);		\
227  }
228  
229  #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231  {										\
232  	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233  }
234  
235  #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236  		  type5,arg5)							\
237  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238  {										\
239  	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240  }
241  
242  
243  #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244  		  type5,arg5,type6,arg6)					\
245  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                    type6 arg6)							\
247  {										\
248  	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249  }
250  
251  
252  #define __NR_sys_uname __NR_uname
253  #define __NR_sys_getcwd1 __NR_getcwd
254  #define __NR_sys_getdents __NR_getdents
255  #define __NR_sys_getdents64 __NR_getdents64
256  #define __NR_sys_getpriority __NR_getpriority
257  #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258  #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259  #define __NR_sys_syslog __NR_syslog
260  #if defined(__NR_futex)
261  # define __NR_sys_futex __NR_futex
262  #endif
263  #if defined(__NR_futex_time64)
264  # define __NR_sys_futex_time64 __NR_futex_time64
265  #endif
266  #define __NR_sys_inotify_init __NR_inotify_init
267  #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268  #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269  #define __NR_sys_statx __NR_statx
270  
271  #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272  #define __NR__llseek __NR_lseek
273  #endif
274  
275  /* Newer kernel ports have llseek() instead of _llseek() */
276  #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277  #define TARGET_NR__llseek TARGET_NR_llseek
278  #endif
279  
280  /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281  #ifndef TARGET_O_NONBLOCK_MASK
282  #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283  #endif
284  
285  #define __NR_sys_gettid __NR_gettid
286  _syscall0(int, sys_gettid)
287  
288  /* For the 64-bit guest on 32-bit host case we must emulate
289   * getdents using getdents64, because otherwise the host
290   * might hand us back more dirent records than we can fit
291   * into the guest buffer after structure format conversion.
292   * Otherwise we emulate getdents with getdents if the host has it.
293   */
294  #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295  #define EMULATE_GETDENTS_WITH_GETDENTS
296  #endif
297  
298  #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299  _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300  #endif
301  #if (defined(TARGET_NR_getdents) && \
302        !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303      (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304  _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305  #endif
306  #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307  _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308            loff_t *, res, uint, wh);
309  #endif
310  _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311  _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312            siginfo_t *, uinfo)
313  _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314  #ifdef __NR_exit_group
315  _syscall1(int,exit_group,int,error_code)
316  #endif
317  #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318  _syscall1(int,set_tid_address,int *,tidptr)
319  #endif
320  #if defined(__NR_futex)
321  _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322            const struct timespec *,timeout,int *,uaddr2,int,val3)
323  #endif
324  #if defined(__NR_futex_time64)
325  _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326            const struct timespec *,timeout,int *,uaddr2,int,val3)
327  #endif
328  #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329  _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330            unsigned long *, user_mask_ptr);
331  #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332  _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333            unsigned long *, user_mask_ptr);
334  #define __NR_sys_getcpu __NR_getcpu
335  _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336  _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337            void *, arg);
338  _syscall2(int, capget, struct __user_cap_header_struct *, header,
339            struct __user_cap_data_struct *, data);
340  _syscall2(int, capset, struct __user_cap_header_struct *, header,
341            struct __user_cap_data_struct *, data);
342  #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343  _syscall2(int, ioprio_get, int, which, int, who)
344  #endif
345  #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346  _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347  #endif
348  #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349  _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350  #endif
351  
352  #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353  _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354            unsigned long, idx1, unsigned long, idx2)
355  #endif
356  
357  /*
358   * It is assumed that struct statx is architecture independent.
359   */
360  #if defined(TARGET_NR_statx) && defined(__NR_statx)
361  _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362            unsigned int, mask, struct target_statx *, statxbuf)
363  #endif
364  #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365  _syscall2(int, membarrier, int, cmd, int, flags)
366  #endif
367  
368  static const bitmask_transtbl fcntl_flags_tbl[] = {
369    { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370    { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371    { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372    { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373    { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374    { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375    { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376    { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377    { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378    { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379    { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380    { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381    { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382  #if defined(O_DIRECT)
383    { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384  #endif
385  #if defined(O_NOATIME)
386    { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387  #endif
388  #if defined(O_CLOEXEC)
389    { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390  #endif
391  #if defined(O_PATH)
392    { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393  #endif
394  #if defined(O_TMPFILE)
395    { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396  #endif
397    /* Don't terminate the list prematurely on 64-bit host+guest.  */
398  #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399    { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400  #endif
401    { 0, 0, 0, 0 }
402  };
403  
404  _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405  
406  #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407  #if defined(__NR_utimensat)
408  #define __NR_sys_utimensat __NR_utimensat
409  _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410            const struct timespec *,tsp,int,flags)
411  #else
412  static int sys_utimensat(int dirfd, const char *pathname,
413                           const struct timespec times[2], int flags)
414  {
415      errno = ENOSYS;
416      return -1;
417  }
418  #endif
419  #endif /* TARGET_NR_utimensat */
420  
421  #ifdef TARGET_NR_renameat2
422  #if defined(__NR_renameat2)
423  #define __NR_sys_renameat2 __NR_renameat2
424  _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425            const char *, new, unsigned int, flags)
426  #else
427  static int sys_renameat2(int oldfd, const char *old,
428                           int newfd, const char *new, int flags)
429  {
430      if (flags == 0) {
431          return renameat(oldfd, old, newfd, new);
432      }
433      errno = ENOSYS;
434      return -1;
435  }
436  #endif
437  #endif /* TARGET_NR_renameat2 */
438  
439  #ifdef CONFIG_INOTIFY
440  #include <sys/inotify.h>
441  
442  #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443  static int sys_inotify_init(void)
444  {
445    return (inotify_init());
446  }
447  #endif
448  #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449  static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450  {
451    return (inotify_add_watch(fd, pathname, mask));
452  }
453  #endif
454  #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455  static int sys_inotify_rm_watch(int fd, int32_t wd)
456  {
457    return (inotify_rm_watch(fd, wd));
458  }
459  #endif
460  #ifdef CONFIG_INOTIFY1
461  #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462  static int sys_inotify_init1(int flags)
463  {
464    return (inotify_init1(flags));
465  }
466  #endif
467  #endif
468  #else
469  /* Userspace can usually survive runtime without inotify */
470  #undef TARGET_NR_inotify_init
471  #undef TARGET_NR_inotify_init1
472  #undef TARGET_NR_inotify_add_watch
473  #undef TARGET_NR_inotify_rm_watch
474  #endif /* CONFIG_INOTIFY  */
475  
476  #if defined(TARGET_NR_prlimit64)
477  #ifndef __NR_prlimit64
478  # define __NR_prlimit64 -1
479  #endif
480  #define __NR_sys_prlimit64 __NR_prlimit64
481  /* The glibc rlimit structure may not be that used by the underlying syscall */
482  struct host_rlimit64 {
483      uint64_t rlim_cur;
484      uint64_t rlim_max;
485  };
486  _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487            const struct host_rlimit64 *, new_limit,
488            struct host_rlimit64 *, old_limit)
489  #endif
490  
491  
492  #if defined(TARGET_NR_timer_create)
493  /* Maximum of 32 active POSIX timers allowed at any one time. */
494  static timer_t g_posix_timers[32] = { 0, } ;
495  
496  static inline int next_free_host_timer(void)
497  {
498      int k ;
499      /* FIXME: Does finding the next free slot require a lock? */
500      for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501          if (g_posix_timers[k] == 0) {
502              g_posix_timers[k] = (timer_t) 1;
503              return k;
504          }
505      }
506      return -1;
507  }
508  #endif
509  
510  #define ERRNO_TABLE_SIZE 1200
511  
512  /* target_to_host_errno_table[] is initialized from
513   * host_to_target_errno_table[] in syscall_init(). */
514  static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515  };
516  
517  /*
518   * This list is the union of errno values overridden in asm-<arch>/errno.h
519   * minus the errnos that are not actually generic to all archs.
520   */
521  static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
522      [EAGAIN]            = TARGET_EAGAIN,
523      [EIDRM]             = TARGET_EIDRM,
524      [ECHRNG]            = TARGET_ECHRNG,
525      [EL2NSYNC]          = TARGET_EL2NSYNC,
526      [EL3HLT]            = TARGET_EL3HLT,
527      [EL3RST]            = TARGET_EL3RST,
528      [ELNRNG]            = TARGET_ELNRNG,
529      [EUNATCH]           = TARGET_EUNATCH,
530      [ENOCSI]            = TARGET_ENOCSI,
531      [EL2HLT]            = TARGET_EL2HLT,
532      [EDEADLK]           = TARGET_EDEADLK,
533      [ENOLCK]            = TARGET_ENOLCK,
534      [EBADE]             = TARGET_EBADE,
535      [EBADR]             = TARGET_EBADR,
536      [EXFULL]            = TARGET_EXFULL,
537      [ENOANO]            = TARGET_ENOANO,
538      [EBADRQC]           = TARGET_EBADRQC,
539      [EBADSLT]           = TARGET_EBADSLT,
540      [EBFONT]            = TARGET_EBFONT,
541      [ENOSTR]            = TARGET_ENOSTR,
542      [ENODATA]           = TARGET_ENODATA,
543      [ETIME]             = TARGET_ETIME,
544      [ENOSR]             = TARGET_ENOSR,
545      [ENONET]            = TARGET_ENONET,
546      [ENOPKG]            = TARGET_ENOPKG,
547      [EREMOTE]           = TARGET_EREMOTE,
548      [ENOLINK]           = TARGET_ENOLINK,
549      [EADV]              = TARGET_EADV,
550      [ESRMNT]            = TARGET_ESRMNT,
551      [ECOMM]             = TARGET_ECOMM,
552      [EPROTO]            = TARGET_EPROTO,
553      [EDOTDOT]           = TARGET_EDOTDOT,
554      [EMULTIHOP]         = TARGET_EMULTIHOP,
555      [EBADMSG]           = TARGET_EBADMSG,
556      [ENAMETOOLONG]      = TARGET_ENAMETOOLONG,
557      [EOVERFLOW]         = TARGET_EOVERFLOW,
558      [ENOTUNIQ]          = TARGET_ENOTUNIQ,
559      [EBADFD]            = TARGET_EBADFD,
560      [EREMCHG]           = TARGET_EREMCHG,
561      [ELIBACC]           = TARGET_ELIBACC,
562      [ELIBBAD]           = TARGET_ELIBBAD,
563      [ELIBSCN]           = TARGET_ELIBSCN,
564      [ELIBMAX]           = TARGET_ELIBMAX,
565      [ELIBEXEC]          = TARGET_ELIBEXEC,
566      [EILSEQ]            = TARGET_EILSEQ,
567      [ENOSYS]            = TARGET_ENOSYS,
568      [ELOOP]             = TARGET_ELOOP,
569      [ERESTART]          = TARGET_ERESTART,
570      [ESTRPIPE]          = TARGET_ESTRPIPE,
571      [ENOTEMPTY]         = TARGET_ENOTEMPTY,
572      [EUSERS]            = TARGET_EUSERS,
573      [ENOTSOCK]          = TARGET_ENOTSOCK,
574      [EDESTADDRREQ]      = TARGET_EDESTADDRREQ,
575      [EMSGSIZE]          = TARGET_EMSGSIZE,
576      [EPROTOTYPE]        = TARGET_EPROTOTYPE,
577      [ENOPROTOOPT]       = TARGET_ENOPROTOOPT,
578      [EPROTONOSUPPORT]   = TARGET_EPROTONOSUPPORT,
579      [ESOCKTNOSUPPORT]   = TARGET_ESOCKTNOSUPPORT,
580      [EOPNOTSUPP]        = TARGET_EOPNOTSUPP,
581      [EPFNOSUPPORT]      = TARGET_EPFNOSUPPORT,
582      [EAFNOSUPPORT]      = TARGET_EAFNOSUPPORT,
583      [EADDRINUSE]        = TARGET_EADDRINUSE,
584      [EADDRNOTAVAIL]     = TARGET_EADDRNOTAVAIL,
585      [ENETDOWN]          = TARGET_ENETDOWN,
586      [ENETUNREACH]       = TARGET_ENETUNREACH,
587      [ENETRESET]         = TARGET_ENETRESET,
588      [ECONNABORTED]      = TARGET_ECONNABORTED,
589      [ECONNRESET]        = TARGET_ECONNRESET,
590      [ENOBUFS]           = TARGET_ENOBUFS,
591      [EISCONN]           = TARGET_EISCONN,
592      [ENOTCONN]          = TARGET_ENOTCONN,
593      [EUCLEAN]           = TARGET_EUCLEAN,
594      [ENOTNAM]           = TARGET_ENOTNAM,
595      [ENAVAIL]           = TARGET_ENAVAIL,
596      [EISNAM]            = TARGET_EISNAM,
597      [EREMOTEIO]         = TARGET_EREMOTEIO,
598      [EDQUOT]            = TARGET_EDQUOT,
599      [ESHUTDOWN]         = TARGET_ESHUTDOWN,
600      [ETOOMANYREFS]      = TARGET_ETOOMANYREFS,
601      [ETIMEDOUT]         = TARGET_ETIMEDOUT,
602      [ECONNREFUSED]      = TARGET_ECONNREFUSED,
603      [EHOSTDOWN]         = TARGET_EHOSTDOWN,
604      [EHOSTUNREACH]      = TARGET_EHOSTUNREACH,
605      [EALREADY]          = TARGET_EALREADY,
606      [EINPROGRESS]       = TARGET_EINPROGRESS,
607      [ESTALE]            = TARGET_ESTALE,
608      [ECANCELED]         = TARGET_ECANCELED,
609      [ENOMEDIUM]         = TARGET_ENOMEDIUM,
610      [EMEDIUMTYPE]       = TARGET_EMEDIUMTYPE,
611  #ifdef ENOKEY
612      [ENOKEY]            = TARGET_ENOKEY,
613  #endif
614  #ifdef EKEYEXPIRED
615      [EKEYEXPIRED]       = TARGET_EKEYEXPIRED,
616  #endif
617  #ifdef EKEYREVOKED
618      [EKEYREVOKED]       = TARGET_EKEYREVOKED,
619  #endif
620  #ifdef EKEYREJECTED
621      [EKEYREJECTED]      = TARGET_EKEYREJECTED,
622  #endif
623  #ifdef EOWNERDEAD
624      [EOWNERDEAD]        = TARGET_EOWNERDEAD,
625  #endif
626  #ifdef ENOTRECOVERABLE
627      [ENOTRECOVERABLE]   = TARGET_ENOTRECOVERABLE,
628  #endif
629  #ifdef ENOMSG
630      [ENOMSG]            = TARGET_ENOMSG,
631  #endif
632  #ifdef ERKFILL
633      [ERFKILL]           = TARGET_ERFKILL,
634  #endif
635  #ifdef EHWPOISON
636      [EHWPOISON]         = TARGET_EHWPOISON,
637  #endif
638  };
639  
640  static inline int host_to_target_errno(int err)
641  {
642      if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643          host_to_target_errno_table[err]) {
644          return host_to_target_errno_table[err];
645      }
646      return err;
647  }
648  
649  static inline int target_to_host_errno(int err)
650  {
651      if (err >= 0 && err < ERRNO_TABLE_SIZE &&
652          target_to_host_errno_table[err]) {
653          return target_to_host_errno_table[err];
654      }
655      return err;
656  }
657  
658  static inline abi_long get_errno(abi_long ret)
659  {
660      if (ret == -1)
661          return -host_to_target_errno(errno);
662      else
663          return ret;
664  }
665  
666  const char *target_strerror(int err)
667  {
668      if (err == TARGET_ERESTARTSYS) {
669          return "To be restarted";
670      }
671      if (err == TARGET_QEMU_ESIGRETURN) {
672          return "Successful exit from sigreturn";
673      }
674  
675      if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
676          return NULL;
677      }
678      return strerror(target_to_host_errno(err));
679  }
680  
681  #define safe_syscall0(type, name) \
682  static type safe_##name(void) \
683  { \
684      return safe_syscall(__NR_##name); \
685  }
686  
687  #define safe_syscall1(type, name, type1, arg1) \
688  static type safe_##name(type1 arg1) \
689  { \
690      return safe_syscall(__NR_##name, arg1); \
691  }
692  
693  #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
694  static type safe_##name(type1 arg1, type2 arg2) \
695  { \
696      return safe_syscall(__NR_##name, arg1, arg2); \
697  }
698  
699  #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
700  static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
701  { \
702      return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703  }
704  
705  #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
706      type4, arg4) \
707  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
708  { \
709      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710  }
711  
712  #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
713      type4, arg4, type5, arg5) \
714  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
715      type5 arg5) \
716  { \
717      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718  }
719  
720  #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
721      type4, arg4, type5, arg5, type6, arg6) \
722  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
723      type5 arg5, type6 arg6) \
724  { \
725      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726  }
727  
728  safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
729  safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
730  safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
731                int, flags, mode_t, mode)
732  #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
733  safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
734                struct rusage *, rusage)
735  #endif
736  safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
737                int, options, struct rusage *, rusage)
738  safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
739  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
740      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
741  safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
742                fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
743  #endif
744  #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
745  safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
746                struct timespec *, tsp, const sigset_t *, sigmask,
747                size_t, sigsetsize)
748  #endif
749  safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
750                int, maxevents, int, timeout, const sigset_t *, sigmask,
751                size_t, sigsetsize)
752  #if defined(__NR_futex)
753  safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
754                const struct timespec *,timeout,int *,uaddr2,int,val3)
755  #endif
756  #if defined(__NR_futex_time64)
757  safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
758                const struct timespec *,timeout,int *,uaddr2,int,val3)
759  #endif
760  safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
761  safe_syscall2(int, kill, pid_t, pid, int, sig)
762  safe_syscall2(int, tkill, int, tid, int, sig)
763  safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
764  safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
765  safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
766  safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
767                unsigned long, pos_l, unsigned long, pos_h)
768  safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
769                unsigned long, pos_l, unsigned long, pos_h)
770  safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
771                socklen_t, addrlen)
772  safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
773                int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
774  safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
775                int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
776  safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
777  safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
778  safe_syscall2(int, flock, int, fd, int, operation)
779  #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
780  safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
781                const struct timespec *, uts, size_t, sigsetsize)
782  #endif
783  safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
784                int, flags)
785  #if defined(TARGET_NR_nanosleep)
786  safe_syscall2(int, nanosleep, const struct timespec *, req,
787                struct timespec *, rem)
788  #endif
789  #if defined(TARGET_NR_clock_nanosleep) || \
790      defined(TARGET_NR_clock_nanosleep_time64)
791  safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
792                const struct timespec *, req, struct timespec *, rem)
793  #endif
794  #ifdef __NR_ipc
795  #ifdef __s390x__
796  safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
797                void *, ptr)
798  #else
799  safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
800                void *, ptr, long, fifth)
801  #endif
802  #endif
803  #ifdef __NR_msgsnd
804  safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
805                int, flags)
806  #endif
807  #ifdef __NR_msgrcv
808  safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
809                long, msgtype, int, flags)
810  #endif
811  #ifdef __NR_semtimedop
812  safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
813                unsigned, nsops, const struct timespec *, timeout)
814  #endif
815  #if defined(TARGET_NR_mq_timedsend) || \
816      defined(TARGET_NR_mq_timedsend_time64)
817  safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
818                size_t, len, unsigned, prio, const struct timespec *, timeout)
819  #endif
820  #if defined(TARGET_NR_mq_timedreceive) || \
821      defined(TARGET_NR_mq_timedreceive_time64)
822  safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
823                size_t, len, unsigned *, prio, const struct timespec *, timeout)
824  #endif
825  #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
826  safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
827                int, outfd, loff_t *, poutoff, size_t, length,
828                unsigned int, flags)
829  #endif
830  
831  /* We do ioctl like this rather than via safe_syscall3 to preserve the
832   * "third argument might be integer or pointer or not present" behaviour of
833   * the libc function.
834   */
835  #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
836  /* Similarly for fcntl. Note that callers must always:
837   *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
838   *  use the flock64 struct rather than unsuffixed flock
839   * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
840   */
841  #ifdef __NR_fcntl64
842  #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
843  #else
844  #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
845  #endif
846  
847  static inline int host_to_target_sock_type(int host_type)
848  {
849      int target_type;
850  
851      switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
852      case SOCK_DGRAM:
853          target_type = TARGET_SOCK_DGRAM;
854          break;
855      case SOCK_STREAM:
856          target_type = TARGET_SOCK_STREAM;
857          break;
858      default:
859          target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
860          break;
861      }
862  
863  #if defined(SOCK_CLOEXEC)
864      if (host_type & SOCK_CLOEXEC) {
865          target_type |= TARGET_SOCK_CLOEXEC;
866      }
867  #endif
868  
869  #if defined(SOCK_NONBLOCK)
870      if (host_type & SOCK_NONBLOCK) {
871          target_type |= TARGET_SOCK_NONBLOCK;
872      }
873  #endif
874  
875      return target_type;
876  }
877  
878  static abi_ulong target_brk;
879  static abi_ulong target_original_brk;
880  static abi_ulong brk_page;
881  
882  void target_set_brk(abi_ulong new_brk)
883  {
884      target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
885      brk_page = HOST_PAGE_ALIGN(target_brk);
886  }
887  
888  //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
889  #define DEBUGF_BRK(message, args...)
890  
891  /* do_brk() must return target values and target errnos. */
892  abi_long do_brk(abi_ulong new_brk)
893  {
894      abi_long mapped_addr;
895      abi_ulong new_alloc_size;
896  
897      /* brk pointers are always untagged */
898  
899      DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
900  
901      if (!new_brk) {
902          DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
903          return target_brk;
904      }
905      if (new_brk < target_original_brk) {
906          DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
907                     target_brk);
908          return target_brk;
909      }
910  
911      /* If the new brk is less than the highest page reserved to the
912       * target heap allocation, set it and we're almost done...  */
913      if (new_brk <= brk_page) {
914          /* Heap contents are initialized to zero, as for anonymous
915           * mapped pages.  */
916          if (new_brk > target_brk) {
917              memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
918          }
919  	target_brk = new_brk;
920          DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
921  	return target_brk;
922      }
923  
924      /* We need to allocate more memory after the brk... Note that
925       * we don't use MAP_FIXED because that will map over the top of
926       * any existing mapping (like the one with the host libc or qemu
927       * itself); instead we treat "mapped but at wrong address" as
928       * a failure and unmap again.
929       */
930      new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
931      mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
932                                          PROT_READ|PROT_WRITE,
933                                          MAP_ANON|MAP_PRIVATE, 0, 0));
934  
935      if (mapped_addr == brk_page) {
936          /* Heap contents are initialized to zero, as for anonymous
937           * mapped pages.  Technically the new pages are already
938           * initialized to zero since they *are* anonymous mapped
939           * pages, however we have to take care with the contents that
940           * come from the remaining part of the previous page: it may
941           * contains garbage data due to a previous heap usage (grown
942           * then shrunken).  */
943          memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
944  
945          target_brk = new_brk;
946          brk_page = HOST_PAGE_ALIGN(target_brk);
947          DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
948              target_brk);
949          return target_brk;
950      } else if (mapped_addr != -1) {
951          /* Mapped but at wrong address, meaning there wasn't actually
952           * enough space for this brk.
953           */
954          target_munmap(mapped_addr, new_alloc_size);
955          mapped_addr = -1;
956          DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
957      }
958      else {
959          DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
960      }
961  
962  #if defined(TARGET_ALPHA)
963      /* We (partially) emulate OSF/1 on Alpha, which requires we
964         return a proper errno, not an unchanged brk value.  */
965      return -TARGET_ENOMEM;
966  #endif
967      /* For everything else, return the previous break. */
968      return target_brk;
969  }
970  
971  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
972      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
973  static inline abi_long copy_from_user_fdset(fd_set *fds,
974                                              abi_ulong target_fds_addr,
975                                              int n)
976  {
977      int i, nw, j, k;
978      abi_ulong b, *target_fds;
979  
980      nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
981      if (!(target_fds = lock_user(VERIFY_READ,
982                                   target_fds_addr,
983                                   sizeof(abi_ulong) * nw,
984                                   1)))
985          return -TARGET_EFAULT;
986  
987      FD_ZERO(fds);
988      k = 0;
989      for (i = 0; i < nw; i++) {
990          /* grab the abi_ulong */
991          __get_user(b, &target_fds[i]);
992          for (j = 0; j < TARGET_ABI_BITS; j++) {
993              /* check the bit inside the abi_ulong */
994              if ((b >> j) & 1)
995                  FD_SET(k, fds);
996              k++;
997          }
998      }
999  
1000      unlock_user(target_fds, target_fds_addr, 0);
1001  
1002      return 0;
1003  }
1004  
1005  static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1006                                                   abi_ulong target_fds_addr,
1007                                                   int n)
1008  {
1009      if (target_fds_addr) {
1010          if (copy_from_user_fdset(fds, target_fds_addr, n))
1011              return -TARGET_EFAULT;
1012          *fds_ptr = fds;
1013      } else {
1014          *fds_ptr = NULL;
1015      }
1016      return 0;
1017  }
1018  
1019  static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1020                                            const fd_set *fds,
1021                                            int n)
1022  {
1023      int i, nw, j, k;
1024      abi_long v;
1025      abi_ulong *target_fds;
1026  
1027      nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1028      if (!(target_fds = lock_user(VERIFY_WRITE,
1029                                   target_fds_addr,
1030                                   sizeof(abi_ulong) * nw,
1031                                   0)))
1032          return -TARGET_EFAULT;
1033  
1034      k = 0;
1035      for (i = 0; i < nw; i++) {
1036          v = 0;
1037          for (j = 0; j < TARGET_ABI_BITS; j++) {
1038              v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1039              k++;
1040          }
1041          __put_user(v, &target_fds[i]);
1042      }
1043  
1044      unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1045  
1046      return 0;
1047  }
1048  #endif
1049  
1050  #if defined(__alpha__)
1051  #define HOST_HZ 1024
1052  #else
1053  #define HOST_HZ 100
1054  #endif
1055  
1056  static inline abi_long host_to_target_clock_t(long ticks)
1057  {
1058  #if HOST_HZ == TARGET_HZ
1059      return ticks;
1060  #else
1061      return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1062  #endif
1063  }
1064  
1065  static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1066                                               const struct rusage *rusage)
1067  {
1068      struct target_rusage *target_rusage;
1069  
1070      if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1071          return -TARGET_EFAULT;
1072      target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1073      target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1074      target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1075      target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1076      target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1077      target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1078      target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1079      target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1080      target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1081      target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1082      target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1083      target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1084      target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1085      target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1086      target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1087      target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1088      target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1089      target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1090      unlock_user_struct(target_rusage, target_addr, 1);
1091  
1092      return 0;
1093  }
1094  
1095  #ifdef TARGET_NR_setrlimit
1096  static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1097  {
1098      abi_ulong target_rlim_swap;
1099      rlim_t result;
1100  
1101      target_rlim_swap = tswapal(target_rlim);
1102      if (target_rlim_swap == TARGET_RLIM_INFINITY)
1103          return RLIM_INFINITY;
1104  
1105      result = target_rlim_swap;
1106      if (target_rlim_swap != (rlim_t)result)
1107          return RLIM_INFINITY;
1108  
1109      return result;
1110  }
1111  #endif
1112  
1113  #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1114  static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1115  {
1116      abi_ulong target_rlim_swap;
1117      abi_ulong result;
1118  
1119      if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1120          target_rlim_swap = TARGET_RLIM_INFINITY;
1121      else
1122          target_rlim_swap = rlim;
1123      result = tswapal(target_rlim_swap);
1124  
1125      return result;
1126  }
1127  #endif
1128  
1129  static inline int target_to_host_resource(int code)
1130  {
1131      switch (code) {
1132      case TARGET_RLIMIT_AS:
1133          return RLIMIT_AS;
1134      case TARGET_RLIMIT_CORE:
1135          return RLIMIT_CORE;
1136      case TARGET_RLIMIT_CPU:
1137          return RLIMIT_CPU;
1138      case TARGET_RLIMIT_DATA:
1139          return RLIMIT_DATA;
1140      case TARGET_RLIMIT_FSIZE:
1141          return RLIMIT_FSIZE;
1142      case TARGET_RLIMIT_LOCKS:
1143          return RLIMIT_LOCKS;
1144      case TARGET_RLIMIT_MEMLOCK:
1145          return RLIMIT_MEMLOCK;
1146      case TARGET_RLIMIT_MSGQUEUE:
1147          return RLIMIT_MSGQUEUE;
1148      case TARGET_RLIMIT_NICE:
1149          return RLIMIT_NICE;
1150      case TARGET_RLIMIT_NOFILE:
1151          return RLIMIT_NOFILE;
1152      case TARGET_RLIMIT_NPROC:
1153          return RLIMIT_NPROC;
1154      case TARGET_RLIMIT_RSS:
1155          return RLIMIT_RSS;
1156      case TARGET_RLIMIT_RTPRIO:
1157          return RLIMIT_RTPRIO;
1158      case TARGET_RLIMIT_SIGPENDING:
1159          return RLIMIT_SIGPENDING;
1160      case TARGET_RLIMIT_STACK:
1161          return RLIMIT_STACK;
1162      default:
1163          return code;
1164      }
1165  }
1166  
1167  static inline abi_long copy_from_user_timeval(struct timeval *tv,
1168                                                abi_ulong target_tv_addr)
1169  {
1170      struct target_timeval *target_tv;
1171  
1172      if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1173          return -TARGET_EFAULT;
1174      }
1175  
1176      __get_user(tv->tv_sec, &target_tv->tv_sec);
1177      __get_user(tv->tv_usec, &target_tv->tv_usec);
1178  
1179      unlock_user_struct(target_tv, target_tv_addr, 0);
1180  
1181      return 0;
1182  }
1183  
1184  static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1185                                              const struct timeval *tv)
1186  {
1187      struct target_timeval *target_tv;
1188  
1189      if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1190          return -TARGET_EFAULT;
1191      }
1192  
1193      __put_user(tv->tv_sec, &target_tv->tv_sec);
1194      __put_user(tv->tv_usec, &target_tv->tv_usec);
1195  
1196      unlock_user_struct(target_tv, target_tv_addr, 1);
1197  
1198      return 0;
1199  }
1200  
1201  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1202  static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1203                                                  abi_ulong target_tv_addr)
1204  {
1205      struct target__kernel_sock_timeval *target_tv;
1206  
1207      if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1208          return -TARGET_EFAULT;
1209      }
1210  
1211      __get_user(tv->tv_sec, &target_tv->tv_sec);
1212      __get_user(tv->tv_usec, &target_tv->tv_usec);
1213  
1214      unlock_user_struct(target_tv, target_tv_addr, 0);
1215  
1216      return 0;
1217  }
1218  #endif
1219  
1220  static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1221                                                const struct timeval *tv)
1222  {
1223      struct target__kernel_sock_timeval *target_tv;
1224  
1225      if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1226          return -TARGET_EFAULT;
1227      }
1228  
1229      __put_user(tv->tv_sec, &target_tv->tv_sec);
1230      __put_user(tv->tv_usec, &target_tv->tv_usec);
1231  
1232      unlock_user_struct(target_tv, target_tv_addr, 1);
1233  
1234      return 0;
1235  }
1236  
1237  #if defined(TARGET_NR_futex) || \
1238      defined(TARGET_NR_rt_sigtimedwait) || \
1239      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1240      defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1241      defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1242      defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1243      defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1244      defined(TARGET_NR_timer_settime) || \
1245      (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1246  static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1247                                                 abi_ulong target_addr)
1248  {
1249      struct target_timespec *target_ts;
1250  
1251      if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1252          return -TARGET_EFAULT;
1253      }
1254      __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1255      __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1256      unlock_user_struct(target_ts, target_addr, 0);
1257      return 0;
1258  }
1259  #endif
1260  
1261  #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1262      defined(TARGET_NR_timer_settime64) || \
1263      defined(TARGET_NR_mq_timedsend_time64) || \
1264      defined(TARGET_NR_mq_timedreceive_time64) || \
1265      (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1266      defined(TARGET_NR_clock_nanosleep_time64) || \
1267      defined(TARGET_NR_rt_sigtimedwait_time64) || \
1268      defined(TARGET_NR_utimensat) || \
1269      defined(TARGET_NR_utimensat_time64) || \
1270      defined(TARGET_NR_semtimedop_time64) || \
1271      defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1272  static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1273                                                   abi_ulong target_addr)
1274  {
1275      struct target__kernel_timespec *target_ts;
1276  
1277      if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1278          return -TARGET_EFAULT;
1279      }
1280      __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1281      __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1282      /* in 32bit mode, this drops the padding */
1283      host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1284      unlock_user_struct(target_ts, target_addr, 0);
1285      return 0;
1286  }
1287  #endif
1288  
1289  static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1290                                                 struct timespec *host_ts)
1291  {
1292      struct target_timespec *target_ts;
1293  
1294      if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1295          return -TARGET_EFAULT;
1296      }
1297      __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1298      __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1299      unlock_user_struct(target_ts, target_addr, 1);
1300      return 0;
1301  }
1302  
1303  static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1304                                                   struct timespec *host_ts)
1305  {
1306      struct target__kernel_timespec *target_ts;
1307  
1308      if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1309          return -TARGET_EFAULT;
1310      }
1311      __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1312      __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1313      unlock_user_struct(target_ts, target_addr, 1);
1314      return 0;
1315  }
1316  
1317  #if defined(TARGET_NR_gettimeofday)
1318  static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1319                                               struct timezone *tz)
1320  {
1321      struct target_timezone *target_tz;
1322  
1323      if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1324          return -TARGET_EFAULT;
1325      }
1326  
1327      __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1328      __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1329  
1330      unlock_user_struct(target_tz, target_tz_addr, 1);
1331  
1332      return 0;
1333  }
1334  #endif
1335  
1336  #if defined(TARGET_NR_settimeofday)
1337  static inline abi_long copy_from_user_timezone(struct timezone *tz,
1338                                                 abi_ulong target_tz_addr)
1339  {
1340      struct target_timezone *target_tz;
1341  
1342      if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1343          return -TARGET_EFAULT;
1344      }
1345  
1346      __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1347      __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1348  
1349      unlock_user_struct(target_tz, target_tz_addr, 0);
1350  
1351      return 0;
1352  }
1353  #endif
1354  
1355  #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356  #include <mqueue.h>
1357  
1358  static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1359                                                abi_ulong target_mq_attr_addr)
1360  {
1361      struct target_mq_attr *target_mq_attr;
1362  
1363      if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1364                            target_mq_attr_addr, 1))
1365          return -TARGET_EFAULT;
1366  
1367      __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1368      __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1369      __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1370      __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1371  
1372      unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1373  
1374      return 0;
1375  }
1376  
1377  static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1378                                              const struct mq_attr *attr)
1379  {
1380      struct target_mq_attr *target_mq_attr;
1381  
1382      if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1383                            target_mq_attr_addr, 0))
1384          return -TARGET_EFAULT;
1385  
1386      __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1387      __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1388      __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1389      __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1390  
1391      unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1392  
1393      return 0;
1394  }
1395  #endif
1396  
1397  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1398  /* do_select() must return target values and target errnos. */
1399  static abi_long do_select(int n,
1400                            abi_ulong rfd_addr, abi_ulong wfd_addr,
1401                            abi_ulong efd_addr, abi_ulong target_tv_addr)
1402  {
1403      fd_set rfds, wfds, efds;
1404      fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1405      struct timeval tv;
1406      struct timespec ts, *ts_ptr;
1407      abi_long ret;
1408  
1409      ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410      if (ret) {
1411          return ret;
1412      }
1413      ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414      if (ret) {
1415          return ret;
1416      }
1417      ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418      if (ret) {
1419          return ret;
1420      }
1421  
1422      if (target_tv_addr) {
1423          if (copy_from_user_timeval(&tv, target_tv_addr))
1424              return -TARGET_EFAULT;
1425          ts.tv_sec = tv.tv_sec;
1426          ts.tv_nsec = tv.tv_usec * 1000;
1427          ts_ptr = &ts;
1428      } else {
1429          ts_ptr = NULL;
1430      }
1431  
1432      ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1433                                    ts_ptr, NULL));
1434  
1435      if (!is_error(ret)) {
1436          if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1437              return -TARGET_EFAULT;
1438          if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1439              return -TARGET_EFAULT;
1440          if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1441              return -TARGET_EFAULT;
1442  
1443          if (target_tv_addr) {
1444              tv.tv_sec = ts.tv_sec;
1445              tv.tv_usec = ts.tv_nsec / 1000;
1446              if (copy_to_user_timeval(target_tv_addr, &tv)) {
1447                  return -TARGET_EFAULT;
1448              }
1449          }
1450      }
1451  
1452      return ret;
1453  }
1454  
1455  #if defined(TARGET_WANT_OLD_SYS_SELECT)
1456  static abi_long do_old_select(abi_ulong arg1)
1457  {
1458      struct target_sel_arg_struct *sel;
1459      abi_ulong inp, outp, exp, tvp;
1460      long nsel;
1461  
1462      if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1463          return -TARGET_EFAULT;
1464      }
1465  
1466      nsel = tswapal(sel->n);
1467      inp = tswapal(sel->inp);
1468      outp = tswapal(sel->outp);
1469      exp = tswapal(sel->exp);
1470      tvp = tswapal(sel->tvp);
1471  
1472      unlock_user_struct(sel, arg1, 0);
1473  
1474      return do_select(nsel, inp, outp, exp, tvp);
1475  }
1476  #endif
1477  #endif
1478  
1479  #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1480  static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1481                              abi_long arg4, abi_long arg5, abi_long arg6,
1482                              bool time64)
1483  {
1484      abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1485      fd_set rfds, wfds, efds;
1486      fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1487      struct timespec ts, *ts_ptr;
1488      abi_long ret;
1489  
1490      /*
1491       * The 6th arg is actually two args smashed together,
1492       * so we cannot use the C library.
1493       */
1494      sigset_t set;
1495      struct {
1496          sigset_t *set;
1497          size_t size;
1498      } sig, *sig_ptr;
1499  
1500      abi_ulong arg_sigset, arg_sigsize, *arg7;
1501      target_sigset_t *target_sigset;
1502  
1503      n = arg1;
1504      rfd_addr = arg2;
1505      wfd_addr = arg3;
1506      efd_addr = arg4;
1507      ts_addr = arg5;
1508  
1509      ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1510      if (ret) {
1511          return ret;
1512      }
1513      ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1514      if (ret) {
1515          return ret;
1516      }
1517      ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1518      if (ret) {
1519          return ret;
1520      }
1521  
1522      /*
1523       * This takes a timespec, and not a timeval, so we cannot
1524       * use the do_select() helper ...
1525       */
1526      if (ts_addr) {
1527          if (time64) {
1528              if (target_to_host_timespec64(&ts, ts_addr)) {
1529                  return -TARGET_EFAULT;
1530              }
1531          } else {
1532              if (target_to_host_timespec(&ts, ts_addr)) {
1533                  return -TARGET_EFAULT;
1534              }
1535          }
1536              ts_ptr = &ts;
1537      } else {
1538          ts_ptr = NULL;
1539      }
1540  
1541      /* Extract the two packed args for the sigset */
1542      if (arg6) {
1543          sig_ptr = &sig;
1544          sig.size = SIGSET_T_SIZE;
1545  
1546          arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1547          if (!arg7) {
1548              return -TARGET_EFAULT;
1549          }
1550          arg_sigset = tswapal(arg7[0]);
1551          arg_sigsize = tswapal(arg7[1]);
1552          unlock_user(arg7, arg6, 0);
1553  
1554          if (arg_sigset) {
1555              sig.set = &set;
1556              if (arg_sigsize != sizeof(*target_sigset)) {
1557                  /* Like the kernel, we enforce correct size sigsets */
1558                  return -TARGET_EINVAL;
1559              }
1560              target_sigset = lock_user(VERIFY_READ, arg_sigset,
1561                                        sizeof(*target_sigset), 1);
1562              if (!target_sigset) {
1563                  return -TARGET_EFAULT;
1564              }
1565              target_to_host_sigset(&set, target_sigset);
1566              unlock_user(target_sigset, arg_sigset, 0);
1567          } else {
1568              sig.set = NULL;
1569          }
1570      } else {
1571          sig_ptr = NULL;
1572      }
1573  
1574      ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1575                                    ts_ptr, sig_ptr));
1576  
1577      if (!is_error(ret)) {
1578          if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1579              return -TARGET_EFAULT;
1580          }
1581          if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1582              return -TARGET_EFAULT;
1583          }
1584          if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1585              return -TARGET_EFAULT;
1586          }
1587          if (time64) {
1588              if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1589                  return -TARGET_EFAULT;
1590              }
1591          } else {
1592              if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1593                  return -TARGET_EFAULT;
1594              }
1595          }
1596      }
1597      return ret;
1598  }
1599  #endif
1600  
1601  #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1602      defined(TARGET_NR_ppoll_time64)
1603  static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1604                           abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1605  {
1606      struct target_pollfd *target_pfd;
1607      unsigned int nfds = arg2;
1608      struct pollfd *pfd;
1609      unsigned int i;
1610      abi_long ret;
1611  
1612      pfd = NULL;
1613      target_pfd = NULL;
1614      if (nfds) {
1615          if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1616              return -TARGET_EINVAL;
1617          }
1618          target_pfd = lock_user(VERIFY_WRITE, arg1,
1619                                 sizeof(struct target_pollfd) * nfds, 1);
1620          if (!target_pfd) {
1621              return -TARGET_EFAULT;
1622          }
1623  
1624          pfd = alloca(sizeof(struct pollfd) * nfds);
1625          for (i = 0; i < nfds; i++) {
1626              pfd[i].fd = tswap32(target_pfd[i].fd);
1627              pfd[i].events = tswap16(target_pfd[i].events);
1628          }
1629      }
1630      if (ppoll) {
1631          struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1632          target_sigset_t *target_set;
1633          sigset_t _set, *set = &_set;
1634  
1635          if (arg3) {
1636              if (time64) {
1637                  if (target_to_host_timespec64(timeout_ts, arg3)) {
1638                      unlock_user(target_pfd, arg1, 0);
1639                      return -TARGET_EFAULT;
1640                  }
1641              } else {
1642                  if (target_to_host_timespec(timeout_ts, arg3)) {
1643                      unlock_user(target_pfd, arg1, 0);
1644                      return -TARGET_EFAULT;
1645                  }
1646              }
1647          } else {
1648              timeout_ts = NULL;
1649          }
1650  
1651          if (arg4) {
1652              if (arg5 != sizeof(target_sigset_t)) {
1653                  unlock_user(target_pfd, arg1, 0);
1654                  return -TARGET_EINVAL;
1655              }
1656  
1657              target_set = lock_user(VERIFY_READ, arg4,
1658                                     sizeof(target_sigset_t), 1);
1659              if (!target_set) {
1660                  unlock_user(target_pfd, arg1, 0);
1661                  return -TARGET_EFAULT;
1662              }
1663              target_to_host_sigset(set, target_set);
1664          } else {
1665              set = NULL;
1666          }
1667  
1668          ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1669                                     set, SIGSET_T_SIZE));
1670  
1671          if (!is_error(ret) && arg3) {
1672              if (time64) {
1673                  if (host_to_target_timespec64(arg3, timeout_ts)) {
1674                      return -TARGET_EFAULT;
1675                  }
1676              } else {
1677                  if (host_to_target_timespec(arg3, timeout_ts)) {
1678                      return -TARGET_EFAULT;
1679                  }
1680              }
1681          }
1682          if (arg4) {
1683              unlock_user(target_set, arg4, 0);
1684          }
1685      } else {
1686            struct timespec ts, *pts;
1687  
1688            if (arg3 >= 0) {
1689                /* Convert ms to secs, ns */
1690                ts.tv_sec = arg3 / 1000;
1691                ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1692                pts = &ts;
1693            } else {
1694                /* -ve poll() timeout means "infinite" */
1695                pts = NULL;
1696            }
1697            ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1698      }
1699  
1700      if (!is_error(ret)) {
1701          for (i = 0; i < nfds; i++) {
1702              target_pfd[i].revents = tswap16(pfd[i].revents);
1703          }
1704      }
1705      unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1706      return ret;
1707  }
1708  #endif
1709  
1710  static abi_long do_pipe2(int host_pipe[], int flags)
1711  {
1712  #ifdef CONFIG_PIPE2
1713      return pipe2(host_pipe, flags);
1714  #else
1715      return -ENOSYS;
1716  #endif
1717  }
1718  
1719  static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1720                          int flags, int is_pipe2)
1721  {
1722      int host_pipe[2];
1723      abi_long ret;
1724      ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1725  
1726      if (is_error(ret))
1727          return get_errno(ret);
1728  
1729      /* Several targets have special calling conventions for the original
1730         pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1731      if (!is_pipe2) {
1732  #if defined(TARGET_ALPHA)
1733          ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1734          return host_pipe[0];
1735  #elif defined(TARGET_MIPS)
1736          ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1737          return host_pipe[0];
1738  #elif defined(TARGET_SH4)
1739          ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1740          return host_pipe[0];
1741  #elif defined(TARGET_SPARC)
1742          ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1743          return host_pipe[0];
1744  #endif
1745      }
1746  
1747      if (put_user_s32(host_pipe[0], pipedes)
1748          || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1749          return -TARGET_EFAULT;
1750      return get_errno(ret);
1751  }
1752  
1753  static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1754                                                abi_ulong target_addr,
1755                                                socklen_t len)
1756  {
1757      struct target_ip_mreqn *target_smreqn;
1758  
1759      target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1760      if (!target_smreqn)
1761          return -TARGET_EFAULT;
1762      mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1763      mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1764      if (len == sizeof(struct target_ip_mreqn))
1765          mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1766      unlock_user(target_smreqn, target_addr, 0);
1767  
1768      return 0;
1769  }
1770  
1771  static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1772                                                 abi_ulong target_addr,
1773                                                 socklen_t len)
1774  {
1775      const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1776      sa_family_t sa_family;
1777      struct target_sockaddr *target_saddr;
1778  
1779      if (fd_trans_target_to_host_addr(fd)) {
1780          return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1781      }
1782  
1783      target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1784      if (!target_saddr)
1785          return -TARGET_EFAULT;
1786  
1787      sa_family = tswap16(target_saddr->sa_family);
1788  
1789      /* Oops. The caller might send a incomplete sun_path; sun_path
1790       * must be terminated by \0 (see the manual page), but
1791       * unfortunately it is quite common to specify sockaddr_un
1792       * length as "strlen(x->sun_path)" while it should be
1793       * "strlen(...) + 1". We'll fix that here if needed.
1794       * Linux kernel has a similar feature.
1795       */
1796  
1797      if (sa_family == AF_UNIX) {
1798          if (len < unix_maxlen && len > 0) {
1799              char *cp = (char*)target_saddr;
1800  
1801              if ( cp[len-1] && !cp[len] )
1802                  len++;
1803          }
1804          if (len > unix_maxlen)
1805              len = unix_maxlen;
1806      }
1807  
1808      memcpy(addr, target_saddr, len);
1809      addr->sa_family = sa_family;
1810      if (sa_family == AF_NETLINK) {
1811          struct sockaddr_nl *nladdr;
1812  
1813          nladdr = (struct sockaddr_nl *)addr;
1814          nladdr->nl_pid = tswap32(nladdr->nl_pid);
1815          nladdr->nl_groups = tswap32(nladdr->nl_groups);
1816      } else if (sa_family == AF_PACKET) {
1817  	struct target_sockaddr_ll *lladdr;
1818  
1819  	lladdr = (struct target_sockaddr_ll *)addr;
1820  	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1821  	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1822      }
1823      unlock_user(target_saddr, target_addr, 0);
1824  
1825      return 0;
1826  }
1827  
1828  static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1829                                                 struct sockaddr *addr,
1830                                                 socklen_t len)
1831  {
1832      struct target_sockaddr *target_saddr;
1833  
1834      if (len == 0) {
1835          return 0;
1836      }
1837      assert(addr);
1838  
1839      target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1840      if (!target_saddr)
1841          return -TARGET_EFAULT;
1842      memcpy(target_saddr, addr, len);
1843      if (len >= offsetof(struct target_sockaddr, sa_family) +
1844          sizeof(target_saddr->sa_family)) {
1845          target_saddr->sa_family = tswap16(addr->sa_family);
1846      }
1847      if (addr->sa_family == AF_NETLINK &&
1848          len >= sizeof(struct target_sockaddr_nl)) {
1849          struct target_sockaddr_nl *target_nl =
1850                 (struct target_sockaddr_nl *)target_saddr;
1851          target_nl->nl_pid = tswap32(target_nl->nl_pid);
1852          target_nl->nl_groups = tswap32(target_nl->nl_groups);
1853      } else if (addr->sa_family == AF_PACKET) {
1854          struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1855          target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1856          target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1857      } else if (addr->sa_family == AF_INET6 &&
1858                 len >= sizeof(struct target_sockaddr_in6)) {
1859          struct target_sockaddr_in6 *target_in6 =
1860                 (struct target_sockaddr_in6 *)target_saddr;
1861          target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1862      }
1863      unlock_user(target_saddr, target_addr, len);
1864  
1865      return 0;
1866  }
1867  
1868  static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1869                                             struct target_msghdr *target_msgh)
1870  {
1871      struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872      abi_long msg_controllen;
1873      abi_ulong target_cmsg_addr;
1874      struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875      socklen_t space = 0;
1876  
1877      msg_controllen = tswapal(target_msgh->msg_controllen);
1878      if (msg_controllen < sizeof (struct target_cmsghdr))
1879          goto the_end;
1880      target_cmsg_addr = tswapal(target_msgh->msg_control);
1881      target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1882      target_cmsg_start = target_cmsg;
1883      if (!target_cmsg)
1884          return -TARGET_EFAULT;
1885  
1886      while (cmsg && target_cmsg) {
1887          void *data = CMSG_DATA(cmsg);
1888          void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889  
1890          int len = tswapal(target_cmsg->cmsg_len)
1891              - sizeof(struct target_cmsghdr);
1892  
1893          space += CMSG_SPACE(len);
1894          if (space > msgh->msg_controllen) {
1895              space -= CMSG_SPACE(len);
1896              /* This is a QEMU bug, since we allocated the payload
1897               * area ourselves (unlike overflow in host-to-target
1898               * conversion, which is just the guest giving us a buffer
1899               * that's too small). It can't happen for the payload types
1900               * we currently support; if it becomes an issue in future
1901               * we would need to improve our allocation strategy to
1902               * something more intelligent than "twice the size of the
1903               * target buffer we're reading from".
1904               */
1905              qemu_log_mask(LOG_UNIMP,
1906                            ("Unsupported ancillary data %d/%d: "
1907                             "unhandled msg size\n"),
1908                            tswap32(target_cmsg->cmsg_level),
1909                            tswap32(target_cmsg->cmsg_type));
1910              break;
1911          }
1912  
1913          if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1914              cmsg->cmsg_level = SOL_SOCKET;
1915          } else {
1916              cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1917          }
1918          cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1919          cmsg->cmsg_len = CMSG_LEN(len);
1920  
1921          if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1922              int *fd = (int *)data;
1923              int *target_fd = (int *)target_data;
1924              int i, numfds = len / sizeof(int);
1925  
1926              for (i = 0; i < numfds; i++) {
1927                  __get_user(fd[i], target_fd + i);
1928              }
1929          } else if (cmsg->cmsg_level == SOL_SOCKET
1930                 &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1931              struct ucred *cred = (struct ucred *)data;
1932              struct target_ucred *target_cred =
1933                  (struct target_ucred *)target_data;
1934  
1935              __get_user(cred->pid, &target_cred->pid);
1936              __get_user(cred->uid, &target_cred->uid);
1937              __get_user(cred->gid, &target_cred->gid);
1938          } else {
1939              qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1940                            cmsg->cmsg_level, cmsg->cmsg_type);
1941              memcpy(data, target_data, len);
1942          }
1943  
1944          cmsg = CMSG_NXTHDR(msgh, cmsg);
1945          target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1946                                           target_cmsg_start);
1947      }
1948      unlock_user(target_cmsg, target_cmsg_addr, 0);
1949   the_end:
1950      msgh->msg_controllen = space;
1951      return 0;
1952  }
1953  
1954  static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1955                                             struct msghdr *msgh)
1956  {
1957      struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1958      abi_long msg_controllen;
1959      abi_ulong target_cmsg_addr;
1960      struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1961      socklen_t space = 0;
1962  
1963      msg_controllen = tswapal(target_msgh->msg_controllen);
1964      if (msg_controllen < sizeof (struct target_cmsghdr))
1965          goto the_end;
1966      target_cmsg_addr = tswapal(target_msgh->msg_control);
1967      target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1968      target_cmsg_start = target_cmsg;
1969      if (!target_cmsg)
1970          return -TARGET_EFAULT;
1971  
1972      while (cmsg && target_cmsg) {
1973          void *data = CMSG_DATA(cmsg);
1974          void *target_data = TARGET_CMSG_DATA(target_cmsg);
1975  
1976          int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1977          int tgt_len, tgt_space;
1978  
1979          /* We never copy a half-header but may copy half-data;
1980           * this is Linux's behaviour in put_cmsg(). Note that
1981           * truncation here is a guest problem (which we report
1982           * to the guest via the CTRUNC bit), unlike truncation
1983           * in target_to_host_cmsg, which is a QEMU bug.
1984           */
1985          if (msg_controllen < sizeof(struct target_cmsghdr)) {
1986              target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1987              break;
1988          }
1989  
1990          if (cmsg->cmsg_level == SOL_SOCKET) {
1991              target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1992          } else {
1993              target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1994          }
1995          target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1996  
1997          /* Payload types which need a different size of payload on
1998           * the target must adjust tgt_len here.
1999           */
2000          tgt_len = len;
2001          switch (cmsg->cmsg_level) {
2002          case SOL_SOCKET:
2003              switch (cmsg->cmsg_type) {
2004              case SO_TIMESTAMP:
2005                  tgt_len = sizeof(struct target_timeval);
2006                  break;
2007              default:
2008                  break;
2009              }
2010              break;
2011          default:
2012              break;
2013          }
2014  
2015          if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
2016              target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2017              tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2018          }
2019  
2020          /* We must now copy-and-convert len bytes of payload
2021           * into tgt_len bytes of destination space. Bear in mind
2022           * that in both source and destination we may be dealing
2023           * with a truncated value!
2024           */
2025          switch (cmsg->cmsg_level) {
2026          case SOL_SOCKET:
2027              switch (cmsg->cmsg_type) {
2028              case SCM_RIGHTS:
2029              {
2030                  int *fd = (int *)data;
2031                  int *target_fd = (int *)target_data;
2032                  int i, numfds = tgt_len / sizeof(int);
2033  
2034                  for (i = 0; i < numfds; i++) {
2035                      __put_user(fd[i], target_fd + i);
2036                  }
2037                  break;
2038              }
2039              case SO_TIMESTAMP:
2040              {
2041                  struct timeval *tv = (struct timeval *)data;
2042                  struct target_timeval *target_tv =
2043                      (struct target_timeval *)target_data;
2044  
2045                  if (len != sizeof(struct timeval) ||
2046                      tgt_len != sizeof(struct target_timeval)) {
2047                      goto unimplemented;
2048                  }
2049  
2050                  /* copy struct timeval to target */
2051                  __put_user(tv->tv_sec, &target_tv->tv_sec);
2052                  __put_user(tv->tv_usec, &target_tv->tv_usec);
2053                  break;
2054              }
2055              case SCM_CREDENTIALS:
2056              {
2057                  struct ucred *cred = (struct ucred *)data;
2058                  struct target_ucred *target_cred =
2059                      (struct target_ucred *)target_data;
2060  
2061                  __put_user(cred->pid, &target_cred->pid);
2062                  __put_user(cred->uid, &target_cred->uid);
2063                  __put_user(cred->gid, &target_cred->gid);
2064                  break;
2065              }
2066              default:
2067                  goto unimplemented;
2068              }
2069              break;
2070  
2071          case SOL_IP:
2072              switch (cmsg->cmsg_type) {
2073              case IP_TTL:
2074              {
2075                  uint32_t *v = (uint32_t *)data;
2076                  uint32_t *t_int = (uint32_t *)target_data;
2077  
2078                  if (len != sizeof(uint32_t) ||
2079                      tgt_len != sizeof(uint32_t)) {
2080                      goto unimplemented;
2081                  }
2082                  __put_user(*v, t_int);
2083                  break;
2084              }
2085              case IP_RECVERR:
2086              {
2087                  struct errhdr_t {
2088                     struct sock_extended_err ee;
2089                     struct sockaddr_in offender;
2090                  };
2091                  struct errhdr_t *errh = (struct errhdr_t *)data;
2092                  struct errhdr_t *target_errh =
2093                      (struct errhdr_t *)target_data;
2094  
2095                  if (len != sizeof(struct errhdr_t) ||
2096                      tgt_len != sizeof(struct errhdr_t)) {
2097                      goto unimplemented;
2098                  }
2099                  __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2100                  __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2101                  __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2102                  __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2103                  __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2104                  __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2105                  __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2106                  host_to_target_sockaddr((unsigned long) &target_errh->offender,
2107                      (void *) &errh->offender, sizeof(errh->offender));
2108                  break;
2109              }
2110              default:
2111                  goto unimplemented;
2112              }
2113              break;
2114  
2115          case SOL_IPV6:
2116              switch (cmsg->cmsg_type) {
2117              case IPV6_HOPLIMIT:
2118              {
2119                  uint32_t *v = (uint32_t *)data;
2120                  uint32_t *t_int = (uint32_t *)target_data;
2121  
2122                  if (len != sizeof(uint32_t) ||
2123                      tgt_len != sizeof(uint32_t)) {
2124                      goto unimplemented;
2125                  }
2126                  __put_user(*v, t_int);
2127                  break;
2128              }
2129              case IPV6_RECVERR:
2130              {
2131                  struct errhdr6_t {
2132                     struct sock_extended_err ee;
2133                     struct sockaddr_in6 offender;
2134                  };
2135                  struct errhdr6_t *errh = (struct errhdr6_t *)data;
2136                  struct errhdr6_t *target_errh =
2137                      (struct errhdr6_t *)target_data;
2138  
2139                  if (len != sizeof(struct errhdr6_t) ||
2140                      tgt_len != sizeof(struct errhdr6_t)) {
2141                      goto unimplemented;
2142                  }
2143                  __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2144                  __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2145                  __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2146                  __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2147                  __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2148                  __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2149                  __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2150                  host_to_target_sockaddr((unsigned long) &target_errh->offender,
2151                      (void *) &errh->offender, sizeof(errh->offender));
2152                  break;
2153              }
2154              default:
2155                  goto unimplemented;
2156              }
2157              break;
2158  
2159          default:
2160          unimplemented:
2161              qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2162                            cmsg->cmsg_level, cmsg->cmsg_type);
2163              memcpy(target_data, data, MIN(len, tgt_len));
2164              if (tgt_len > len) {
2165                  memset(target_data + len, 0, tgt_len - len);
2166              }
2167          }
2168  
2169          target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2170          tgt_space = TARGET_CMSG_SPACE(tgt_len);
2171          if (msg_controllen < tgt_space) {
2172              tgt_space = msg_controllen;
2173          }
2174          msg_controllen -= tgt_space;
2175          space += tgt_space;
2176          cmsg = CMSG_NXTHDR(msgh, cmsg);
2177          target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2178                                           target_cmsg_start);
2179      }
2180      unlock_user(target_cmsg, target_cmsg_addr, space);
2181   the_end:
2182      target_msgh->msg_controllen = tswapal(space);
2183      return 0;
2184  }
2185  
2186  /* do_setsockopt() Must return target values and target errnos. */
2187  static abi_long do_setsockopt(int sockfd, int level, int optname,
2188                                abi_ulong optval_addr, socklen_t optlen)
2189  {
2190      abi_long ret;
2191      int val;
2192      struct ip_mreqn *ip_mreq;
2193      struct ip_mreq_source *ip_mreq_source;
2194  
2195      switch(level) {
2196      case SOL_TCP:
2197      case SOL_UDP:
2198          /* TCP and UDP options all take an 'int' value.  */
2199          if (optlen < sizeof(uint32_t))
2200              return -TARGET_EINVAL;
2201  
2202          if (get_user_u32(val, optval_addr))
2203              return -TARGET_EFAULT;
2204          ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2205          break;
2206      case SOL_IP:
2207          switch(optname) {
2208          case IP_TOS:
2209          case IP_TTL:
2210          case IP_HDRINCL:
2211          case IP_ROUTER_ALERT:
2212          case IP_RECVOPTS:
2213          case IP_RETOPTS:
2214          case IP_PKTINFO:
2215          case IP_MTU_DISCOVER:
2216          case IP_RECVERR:
2217          case IP_RECVTTL:
2218          case IP_RECVTOS:
2219  #ifdef IP_FREEBIND
2220          case IP_FREEBIND:
2221  #endif
2222          case IP_MULTICAST_TTL:
2223          case IP_MULTICAST_LOOP:
2224              val = 0;
2225              if (optlen >= sizeof(uint32_t)) {
2226                  if (get_user_u32(val, optval_addr))
2227                      return -TARGET_EFAULT;
2228              } else if (optlen >= 1) {
2229                  if (get_user_u8(val, optval_addr))
2230                      return -TARGET_EFAULT;
2231              }
2232              ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2233              break;
2234          case IP_ADD_MEMBERSHIP:
2235          case IP_DROP_MEMBERSHIP:
2236              if (optlen < sizeof (struct target_ip_mreq) ||
2237                  optlen > sizeof (struct target_ip_mreqn))
2238                  return -TARGET_EINVAL;
2239  
2240              ip_mreq = (struct ip_mreqn *) alloca(optlen);
2241              target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2242              ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2243              break;
2244  
2245          case IP_BLOCK_SOURCE:
2246          case IP_UNBLOCK_SOURCE:
2247          case IP_ADD_SOURCE_MEMBERSHIP:
2248          case IP_DROP_SOURCE_MEMBERSHIP:
2249              if (optlen != sizeof (struct target_ip_mreq_source))
2250                  return -TARGET_EINVAL;
2251  
2252              ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2253              ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2254              unlock_user (ip_mreq_source, optval_addr, 0);
2255              break;
2256  
2257          default:
2258              goto unimplemented;
2259          }
2260          break;
2261      case SOL_IPV6:
2262          switch (optname) {
2263          case IPV6_MTU_DISCOVER:
2264          case IPV6_MTU:
2265          case IPV6_V6ONLY:
2266          case IPV6_RECVPKTINFO:
2267          case IPV6_UNICAST_HOPS:
2268          case IPV6_MULTICAST_HOPS:
2269          case IPV6_MULTICAST_LOOP:
2270          case IPV6_RECVERR:
2271          case IPV6_RECVHOPLIMIT:
2272          case IPV6_2292HOPLIMIT:
2273          case IPV6_CHECKSUM:
2274          case IPV6_ADDRFORM:
2275          case IPV6_2292PKTINFO:
2276          case IPV6_RECVTCLASS:
2277          case IPV6_RECVRTHDR:
2278          case IPV6_2292RTHDR:
2279          case IPV6_RECVHOPOPTS:
2280          case IPV6_2292HOPOPTS:
2281          case IPV6_RECVDSTOPTS:
2282          case IPV6_2292DSTOPTS:
2283          case IPV6_TCLASS:
2284          case IPV6_ADDR_PREFERENCES:
2285  #ifdef IPV6_RECVPATHMTU
2286          case IPV6_RECVPATHMTU:
2287  #endif
2288  #ifdef IPV6_TRANSPARENT
2289          case IPV6_TRANSPARENT:
2290  #endif
2291  #ifdef IPV6_FREEBIND
2292          case IPV6_FREEBIND:
2293  #endif
2294  #ifdef IPV6_RECVORIGDSTADDR
2295          case IPV6_RECVORIGDSTADDR:
2296  #endif
2297              val = 0;
2298              if (optlen < sizeof(uint32_t)) {
2299                  return -TARGET_EINVAL;
2300              }
2301              if (get_user_u32(val, optval_addr)) {
2302                  return -TARGET_EFAULT;
2303              }
2304              ret = get_errno(setsockopt(sockfd, level, optname,
2305                                         &val, sizeof(val)));
2306              break;
2307          case IPV6_PKTINFO:
2308          {
2309              struct in6_pktinfo pki;
2310  
2311              if (optlen < sizeof(pki)) {
2312                  return -TARGET_EINVAL;
2313              }
2314  
2315              if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2316                  return -TARGET_EFAULT;
2317              }
2318  
2319              pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2320  
2321              ret = get_errno(setsockopt(sockfd, level, optname,
2322                                         &pki, sizeof(pki)));
2323              break;
2324          }
2325          case IPV6_ADD_MEMBERSHIP:
2326          case IPV6_DROP_MEMBERSHIP:
2327          {
2328              struct ipv6_mreq ipv6mreq;
2329  
2330              if (optlen < sizeof(ipv6mreq)) {
2331                  return -TARGET_EINVAL;
2332              }
2333  
2334              if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2335                  return -TARGET_EFAULT;
2336              }
2337  
2338              ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2339  
2340              ret = get_errno(setsockopt(sockfd, level, optname,
2341                                         &ipv6mreq, sizeof(ipv6mreq)));
2342              break;
2343          }
2344          default:
2345              goto unimplemented;
2346          }
2347          break;
2348      case SOL_ICMPV6:
2349          switch (optname) {
2350          case ICMPV6_FILTER:
2351          {
2352              struct icmp6_filter icmp6f;
2353  
2354              if (optlen > sizeof(icmp6f)) {
2355                  optlen = sizeof(icmp6f);
2356              }
2357  
2358              if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2359                  return -TARGET_EFAULT;
2360              }
2361  
2362              for (val = 0; val < 8; val++) {
2363                  icmp6f.data[val] = tswap32(icmp6f.data[val]);
2364              }
2365  
2366              ret = get_errno(setsockopt(sockfd, level, optname,
2367                                         &icmp6f, optlen));
2368              break;
2369          }
2370          default:
2371              goto unimplemented;
2372          }
2373          break;
2374      case SOL_RAW:
2375          switch (optname) {
2376          case ICMP_FILTER:
2377          case IPV6_CHECKSUM:
2378              /* those take an u32 value */
2379              if (optlen < sizeof(uint32_t)) {
2380                  return -TARGET_EINVAL;
2381              }
2382  
2383              if (get_user_u32(val, optval_addr)) {
2384                  return -TARGET_EFAULT;
2385              }
2386              ret = get_errno(setsockopt(sockfd, level, optname,
2387                                         &val, sizeof(val)));
2388              break;
2389  
2390          default:
2391              goto unimplemented;
2392          }
2393          break;
2394  #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2395      case SOL_ALG:
2396          switch (optname) {
2397          case ALG_SET_KEY:
2398          {
2399              char *alg_key = g_malloc(optlen);
2400  
2401              if (!alg_key) {
2402                  return -TARGET_ENOMEM;
2403              }
2404              if (copy_from_user(alg_key, optval_addr, optlen)) {
2405                  g_free(alg_key);
2406                  return -TARGET_EFAULT;
2407              }
2408              ret = get_errno(setsockopt(sockfd, level, optname,
2409                                         alg_key, optlen));
2410              g_free(alg_key);
2411              break;
2412          }
2413          case ALG_SET_AEAD_AUTHSIZE:
2414          {
2415              ret = get_errno(setsockopt(sockfd, level, optname,
2416                                         NULL, optlen));
2417              break;
2418          }
2419          default:
2420              goto unimplemented;
2421          }
2422          break;
2423  #endif
2424      case TARGET_SOL_SOCKET:
2425          switch (optname) {
2426          case TARGET_SO_RCVTIMEO:
2427          {
2428                  struct timeval tv;
2429  
2430                  optname = SO_RCVTIMEO;
2431  
2432  set_timeout:
2433                  if (optlen != sizeof(struct target_timeval)) {
2434                      return -TARGET_EINVAL;
2435                  }
2436  
2437                  if (copy_from_user_timeval(&tv, optval_addr)) {
2438                      return -TARGET_EFAULT;
2439                  }
2440  
2441                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2442                                  &tv, sizeof(tv)));
2443                  return ret;
2444          }
2445          case TARGET_SO_SNDTIMEO:
2446                  optname = SO_SNDTIMEO;
2447                  goto set_timeout;
2448          case TARGET_SO_ATTACH_FILTER:
2449          {
2450                  struct target_sock_fprog *tfprog;
2451                  struct target_sock_filter *tfilter;
2452                  struct sock_fprog fprog;
2453                  struct sock_filter *filter;
2454                  int i;
2455  
2456                  if (optlen != sizeof(*tfprog)) {
2457                      return -TARGET_EINVAL;
2458                  }
2459                  if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2460                      return -TARGET_EFAULT;
2461                  }
2462                  if (!lock_user_struct(VERIFY_READ, tfilter,
2463                                        tswapal(tfprog->filter), 0)) {
2464                      unlock_user_struct(tfprog, optval_addr, 1);
2465                      return -TARGET_EFAULT;
2466                  }
2467  
2468                  fprog.len = tswap16(tfprog->len);
2469                  filter = g_try_new(struct sock_filter, fprog.len);
2470                  if (filter == NULL) {
2471                      unlock_user_struct(tfilter, tfprog->filter, 1);
2472                      unlock_user_struct(tfprog, optval_addr, 1);
2473                      return -TARGET_ENOMEM;
2474                  }
2475                  for (i = 0; i < fprog.len; i++) {
2476                      filter[i].code = tswap16(tfilter[i].code);
2477                      filter[i].jt = tfilter[i].jt;
2478                      filter[i].jf = tfilter[i].jf;
2479                      filter[i].k = tswap32(tfilter[i].k);
2480                  }
2481                  fprog.filter = filter;
2482  
2483                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2484                                  SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2485                  g_free(filter);
2486  
2487                  unlock_user_struct(tfilter, tfprog->filter, 1);
2488                  unlock_user_struct(tfprog, optval_addr, 1);
2489                  return ret;
2490          }
2491  	case TARGET_SO_BINDTODEVICE:
2492  	{
2493  		char *dev_ifname, *addr_ifname;
2494  
2495  		if (optlen > IFNAMSIZ - 1) {
2496  		    optlen = IFNAMSIZ - 1;
2497  		}
2498  		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2499  		if (!dev_ifname) {
2500  		    return -TARGET_EFAULT;
2501  		}
2502  		optname = SO_BINDTODEVICE;
2503  		addr_ifname = alloca(IFNAMSIZ);
2504  		memcpy(addr_ifname, dev_ifname, optlen);
2505  		addr_ifname[optlen] = 0;
2506  		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2507                                             addr_ifname, optlen));
2508  		unlock_user (dev_ifname, optval_addr, 0);
2509  		return ret;
2510  	}
2511          case TARGET_SO_LINGER:
2512          {
2513                  struct linger lg;
2514                  struct target_linger *tlg;
2515  
2516                  if (optlen != sizeof(struct target_linger)) {
2517                      return -TARGET_EINVAL;
2518                  }
2519                  if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2520                      return -TARGET_EFAULT;
2521                  }
2522                  __get_user(lg.l_onoff, &tlg->l_onoff);
2523                  __get_user(lg.l_linger, &tlg->l_linger);
2524                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2525                                  &lg, sizeof(lg)));
2526                  unlock_user_struct(tlg, optval_addr, 0);
2527                  return ret;
2528          }
2529              /* Options with 'int' argument.  */
2530          case TARGET_SO_DEBUG:
2531  		optname = SO_DEBUG;
2532  		break;
2533          case TARGET_SO_REUSEADDR:
2534  		optname = SO_REUSEADDR;
2535  		break;
2536  #ifdef SO_REUSEPORT
2537          case TARGET_SO_REUSEPORT:
2538                  optname = SO_REUSEPORT;
2539                  break;
2540  #endif
2541          case TARGET_SO_TYPE:
2542  		optname = SO_TYPE;
2543  		break;
2544          case TARGET_SO_ERROR:
2545  		optname = SO_ERROR;
2546  		break;
2547          case TARGET_SO_DONTROUTE:
2548  		optname = SO_DONTROUTE;
2549  		break;
2550          case TARGET_SO_BROADCAST:
2551  		optname = SO_BROADCAST;
2552  		break;
2553          case TARGET_SO_SNDBUF:
2554  		optname = SO_SNDBUF;
2555  		break;
2556          case TARGET_SO_SNDBUFFORCE:
2557                  optname = SO_SNDBUFFORCE;
2558                  break;
2559          case TARGET_SO_RCVBUF:
2560  		optname = SO_RCVBUF;
2561  		break;
2562          case TARGET_SO_RCVBUFFORCE:
2563                  optname = SO_RCVBUFFORCE;
2564                  break;
2565          case TARGET_SO_KEEPALIVE:
2566  		optname = SO_KEEPALIVE;
2567  		break;
2568          case TARGET_SO_OOBINLINE:
2569  		optname = SO_OOBINLINE;
2570  		break;
2571          case TARGET_SO_NO_CHECK:
2572  		optname = SO_NO_CHECK;
2573  		break;
2574          case TARGET_SO_PRIORITY:
2575  		optname = SO_PRIORITY;
2576  		break;
2577  #ifdef SO_BSDCOMPAT
2578          case TARGET_SO_BSDCOMPAT:
2579  		optname = SO_BSDCOMPAT;
2580  		break;
2581  #endif
2582          case TARGET_SO_PASSCRED:
2583  		optname = SO_PASSCRED;
2584  		break;
2585          case TARGET_SO_PASSSEC:
2586                  optname = SO_PASSSEC;
2587                  break;
2588          case TARGET_SO_TIMESTAMP:
2589  		optname = SO_TIMESTAMP;
2590  		break;
2591          case TARGET_SO_RCVLOWAT:
2592  		optname = SO_RCVLOWAT;
2593  		break;
2594          default:
2595              goto unimplemented;
2596          }
2597  	if (optlen < sizeof(uint32_t))
2598              return -TARGET_EINVAL;
2599  
2600  	if (get_user_u32(val, optval_addr))
2601              return -TARGET_EFAULT;
2602  	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2603          break;
2604  #ifdef SOL_NETLINK
2605      case SOL_NETLINK:
2606          switch (optname) {
2607          case NETLINK_PKTINFO:
2608          case NETLINK_ADD_MEMBERSHIP:
2609          case NETLINK_DROP_MEMBERSHIP:
2610          case NETLINK_BROADCAST_ERROR:
2611          case NETLINK_NO_ENOBUFS:
2612  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2613          case NETLINK_LISTEN_ALL_NSID:
2614          case NETLINK_CAP_ACK:
2615  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2616  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2617          case NETLINK_EXT_ACK:
2618  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2619  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2620          case NETLINK_GET_STRICT_CHK:
2621  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2622              break;
2623          default:
2624              goto unimplemented;
2625          }
2626          val = 0;
2627          if (optlen < sizeof(uint32_t)) {
2628              return -TARGET_EINVAL;
2629          }
2630          if (get_user_u32(val, optval_addr)) {
2631              return -TARGET_EFAULT;
2632          }
2633          ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2634                                     sizeof(val)));
2635          break;
2636  #endif /* SOL_NETLINK */
2637      default:
2638      unimplemented:
2639          qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2640                        level, optname);
2641          ret = -TARGET_ENOPROTOOPT;
2642      }
2643      return ret;
2644  }
2645  
2646  /* do_getsockopt() Must return target values and target errnos. */
2647  static abi_long do_getsockopt(int sockfd, int level, int optname,
2648                                abi_ulong optval_addr, abi_ulong optlen)
2649  {
2650      abi_long ret;
2651      int len, val;
2652      socklen_t lv;
2653  
2654      switch(level) {
2655      case TARGET_SOL_SOCKET:
2656          level = SOL_SOCKET;
2657          switch (optname) {
2658          /* These don't just return a single integer */
2659          case TARGET_SO_PEERNAME:
2660              goto unimplemented;
2661          case TARGET_SO_RCVTIMEO: {
2662              struct timeval tv;
2663              socklen_t tvlen;
2664  
2665              optname = SO_RCVTIMEO;
2666  
2667  get_timeout:
2668              if (get_user_u32(len, optlen)) {
2669                  return -TARGET_EFAULT;
2670              }
2671              if (len < 0) {
2672                  return -TARGET_EINVAL;
2673              }
2674  
2675              tvlen = sizeof(tv);
2676              ret = get_errno(getsockopt(sockfd, level, optname,
2677                                         &tv, &tvlen));
2678              if (ret < 0) {
2679                  return ret;
2680              }
2681              if (len > sizeof(struct target_timeval)) {
2682                  len = sizeof(struct target_timeval);
2683              }
2684              if (copy_to_user_timeval(optval_addr, &tv)) {
2685                  return -TARGET_EFAULT;
2686              }
2687              if (put_user_u32(len, optlen)) {
2688                  return -TARGET_EFAULT;
2689              }
2690              break;
2691          }
2692          case TARGET_SO_SNDTIMEO:
2693              optname = SO_SNDTIMEO;
2694              goto get_timeout;
2695          case TARGET_SO_PEERCRED: {
2696              struct ucred cr;
2697              socklen_t crlen;
2698              struct target_ucred *tcr;
2699  
2700              if (get_user_u32(len, optlen)) {
2701                  return -TARGET_EFAULT;
2702              }
2703              if (len < 0) {
2704                  return -TARGET_EINVAL;
2705              }
2706  
2707              crlen = sizeof(cr);
2708              ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2709                                         &cr, &crlen));
2710              if (ret < 0) {
2711                  return ret;
2712              }
2713              if (len > crlen) {
2714                  len = crlen;
2715              }
2716              if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2717                  return -TARGET_EFAULT;
2718              }
2719              __put_user(cr.pid, &tcr->pid);
2720              __put_user(cr.uid, &tcr->uid);
2721              __put_user(cr.gid, &tcr->gid);
2722              unlock_user_struct(tcr, optval_addr, 1);
2723              if (put_user_u32(len, optlen)) {
2724                  return -TARGET_EFAULT;
2725              }
2726              break;
2727          }
2728          case TARGET_SO_PEERSEC: {
2729              char *name;
2730  
2731              if (get_user_u32(len, optlen)) {
2732                  return -TARGET_EFAULT;
2733              }
2734              if (len < 0) {
2735                  return -TARGET_EINVAL;
2736              }
2737              name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2738              if (!name) {
2739                  return -TARGET_EFAULT;
2740              }
2741              lv = len;
2742              ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2743                                         name, &lv));
2744              if (put_user_u32(lv, optlen)) {
2745                  ret = -TARGET_EFAULT;
2746              }
2747              unlock_user(name, optval_addr, lv);
2748              break;
2749          }
2750          case TARGET_SO_LINGER:
2751          {
2752              struct linger lg;
2753              socklen_t lglen;
2754              struct target_linger *tlg;
2755  
2756              if (get_user_u32(len, optlen)) {
2757                  return -TARGET_EFAULT;
2758              }
2759              if (len < 0) {
2760                  return -TARGET_EINVAL;
2761              }
2762  
2763              lglen = sizeof(lg);
2764              ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2765                                         &lg, &lglen));
2766              if (ret < 0) {
2767                  return ret;
2768              }
2769              if (len > lglen) {
2770                  len = lglen;
2771              }
2772              if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2773                  return -TARGET_EFAULT;
2774              }
2775              __put_user(lg.l_onoff, &tlg->l_onoff);
2776              __put_user(lg.l_linger, &tlg->l_linger);
2777              unlock_user_struct(tlg, optval_addr, 1);
2778              if (put_user_u32(len, optlen)) {
2779                  return -TARGET_EFAULT;
2780              }
2781              break;
2782          }
2783          /* Options with 'int' argument.  */
2784          case TARGET_SO_DEBUG:
2785              optname = SO_DEBUG;
2786              goto int_case;
2787          case TARGET_SO_REUSEADDR:
2788              optname = SO_REUSEADDR;
2789              goto int_case;
2790  #ifdef SO_REUSEPORT
2791          case TARGET_SO_REUSEPORT:
2792              optname = SO_REUSEPORT;
2793              goto int_case;
2794  #endif
2795          case TARGET_SO_TYPE:
2796              optname = SO_TYPE;
2797              goto int_case;
2798          case TARGET_SO_ERROR:
2799              optname = SO_ERROR;
2800              goto int_case;
2801          case TARGET_SO_DONTROUTE:
2802              optname = SO_DONTROUTE;
2803              goto int_case;
2804          case TARGET_SO_BROADCAST:
2805              optname = SO_BROADCAST;
2806              goto int_case;
2807          case TARGET_SO_SNDBUF:
2808              optname = SO_SNDBUF;
2809              goto int_case;
2810          case TARGET_SO_RCVBUF:
2811              optname = SO_RCVBUF;
2812              goto int_case;
2813          case TARGET_SO_KEEPALIVE:
2814              optname = SO_KEEPALIVE;
2815              goto int_case;
2816          case TARGET_SO_OOBINLINE:
2817              optname = SO_OOBINLINE;
2818              goto int_case;
2819          case TARGET_SO_NO_CHECK:
2820              optname = SO_NO_CHECK;
2821              goto int_case;
2822          case TARGET_SO_PRIORITY:
2823              optname = SO_PRIORITY;
2824              goto int_case;
2825  #ifdef SO_BSDCOMPAT
2826          case TARGET_SO_BSDCOMPAT:
2827              optname = SO_BSDCOMPAT;
2828              goto int_case;
2829  #endif
2830          case TARGET_SO_PASSCRED:
2831              optname = SO_PASSCRED;
2832              goto int_case;
2833          case TARGET_SO_TIMESTAMP:
2834              optname = SO_TIMESTAMP;
2835              goto int_case;
2836          case TARGET_SO_RCVLOWAT:
2837              optname = SO_RCVLOWAT;
2838              goto int_case;
2839          case TARGET_SO_ACCEPTCONN:
2840              optname = SO_ACCEPTCONN;
2841              goto int_case;
2842          case TARGET_SO_PROTOCOL:
2843              optname = SO_PROTOCOL;
2844              goto int_case;
2845          case TARGET_SO_DOMAIN:
2846              optname = SO_DOMAIN;
2847              goto int_case;
2848          default:
2849              goto int_case;
2850          }
2851          break;
2852      case SOL_TCP:
2853      case SOL_UDP:
2854          /* TCP and UDP options all take an 'int' value.  */
2855      int_case:
2856          if (get_user_u32(len, optlen))
2857              return -TARGET_EFAULT;
2858          if (len < 0)
2859              return -TARGET_EINVAL;
2860          lv = sizeof(lv);
2861          ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2862          if (ret < 0)
2863              return ret;
2864          if (optname == SO_TYPE) {
2865              val = host_to_target_sock_type(val);
2866          }
2867          if (len > lv)
2868              len = lv;
2869          if (len == 4) {
2870              if (put_user_u32(val, optval_addr))
2871                  return -TARGET_EFAULT;
2872          } else {
2873              if (put_user_u8(val, optval_addr))
2874                  return -TARGET_EFAULT;
2875          }
2876          if (put_user_u32(len, optlen))
2877              return -TARGET_EFAULT;
2878          break;
2879      case SOL_IP:
2880          switch(optname) {
2881          case IP_TOS:
2882          case IP_TTL:
2883          case IP_HDRINCL:
2884          case IP_ROUTER_ALERT:
2885          case IP_RECVOPTS:
2886          case IP_RETOPTS:
2887          case IP_PKTINFO:
2888          case IP_MTU_DISCOVER:
2889          case IP_RECVERR:
2890          case IP_RECVTOS:
2891  #ifdef IP_FREEBIND
2892          case IP_FREEBIND:
2893  #endif
2894          case IP_MULTICAST_TTL:
2895          case IP_MULTICAST_LOOP:
2896              if (get_user_u32(len, optlen))
2897                  return -TARGET_EFAULT;
2898              if (len < 0)
2899                  return -TARGET_EINVAL;
2900              lv = sizeof(lv);
2901              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2902              if (ret < 0)
2903                  return ret;
2904              if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2905                  len = 1;
2906                  if (put_user_u32(len, optlen)
2907                      || put_user_u8(val, optval_addr))
2908                      return -TARGET_EFAULT;
2909              } else {
2910                  if (len > sizeof(int))
2911                      len = sizeof(int);
2912                  if (put_user_u32(len, optlen)
2913                      || put_user_u32(val, optval_addr))
2914                      return -TARGET_EFAULT;
2915              }
2916              break;
2917          default:
2918              ret = -TARGET_ENOPROTOOPT;
2919              break;
2920          }
2921          break;
2922      case SOL_IPV6:
2923          switch (optname) {
2924          case IPV6_MTU_DISCOVER:
2925          case IPV6_MTU:
2926          case IPV6_V6ONLY:
2927          case IPV6_RECVPKTINFO:
2928          case IPV6_UNICAST_HOPS:
2929          case IPV6_MULTICAST_HOPS:
2930          case IPV6_MULTICAST_LOOP:
2931          case IPV6_RECVERR:
2932          case IPV6_RECVHOPLIMIT:
2933          case IPV6_2292HOPLIMIT:
2934          case IPV6_CHECKSUM:
2935          case IPV6_ADDRFORM:
2936          case IPV6_2292PKTINFO:
2937          case IPV6_RECVTCLASS:
2938          case IPV6_RECVRTHDR:
2939          case IPV6_2292RTHDR:
2940          case IPV6_RECVHOPOPTS:
2941          case IPV6_2292HOPOPTS:
2942          case IPV6_RECVDSTOPTS:
2943          case IPV6_2292DSTOPTS:
2944          case IPV6_TCLASS:
2945          case IPV6_ADDR_PREFERENCES:
2946  #ifdef IPV6_RECVPATHMTU
2947          case IPV6_RECVPATHMTU:
2948  #endif
2949  #ifdef IPV6_TRANSPARENT
2950          case IPV6_TRANSPARENT:
2951  #endif
2952  #ifdef IPV6_FREEBIND
2953          case IPV6_FREEBIND:
2954  #endif
2955  #ifdef IPV6_RECVORIGDSTADDR
2956          case IPV6_RECVORIGDSTADDR:
2957  #endif
2958              if (get_user_u32(len, optlen))
2959                  return -TARGET_EFAULT;
2960              if (len < 0)
2961                  return -TARGET_EINVAL;
2962              lv = sizeof(lv);
2963              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2964              if (ret < 0)
2965                  return ret;
2966              if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2967                  len = 1;
2968                  if (put_user_u32(len, optlen)
2969                      || put_user_u8(val, optval_addr))
2970                      return -TARGET_EFAULT;
2971              } else {
2972                  if (len > sizeof(int))
2973                      len = sizeof(int);
2974                  if (put_user_u32(len, optlen)
2975                      || put_user_u32(val, optval_addr))
2976                      return -TARGET_EFAULT;
2977              }
2978              break;
2979          default:
2980              ret = -TARGET_ENOPROTOOPT;
2981              break;
2982          }
2983          break;
2984  #ifdef SOL_NETLINK
2985      case SOL_NETLINK:
2986          switch (optname) {
2987          case NETLINK_PKTINFO:
2988          case NETLINK_BROADCAST_ERROR:
2989          case NETLINK_NO_ENOBUFS:
2990  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2991          case NETLINK_LISTEN_ALL_NSID:
2992          case NETLINK_CAP_ACK:
2993  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2994  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2995          case NETLINK_EXT_ACK:
2996  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2997  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2998          case NETLINK_GET_STRICT_CHK:
2999  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
3000              if (get_user_u32(len, optlen)) {
3001                  return -TARGET_EFAULT;
3002              }
3003              if (len != sizeof(val)) {
3004                  return -TARGET_EINVAL;
3005              }
3006              lv = len;
3007              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3008              if (ret < 0) {
3009                  return ret;
3010              }
3011              if (put_user_u32(lv, optlen)
3012                  || put_user_u32(val, optval_addr)) {
3013                  return -TARGET_EFAULT;
3014              }
3015              break;
3016  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
3017          case NETLINK_LIST_MEMBERSHIPS:
3018          {
3019              uint32_t *results;
3020              int i;
3021              if (get_user_u32(len, optlen)) {
3022                  return -TARGET_EFAULT;
3023              }
3024              if (len < 0) {
3025                  return -TARGET_EINVAL;
3026              }
3027              results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3028              if (!results && len > 0) {
3029                  return -TARGET_EFAULT;
3030              }
3031              lv = len;
3032              ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3033              if (ret < 0) {
3034                  unlock_user(results, optval_addr, 0);
3035                  return ret;
3036              }
3037              /* swap host endianess to target endianess. */
3038              for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3039                  results[i] = tswap32(results[i]);
3040              }
3041              if (put_user_u32(lv, optlen)) {
3042                  return -TARGET_EFAULT;
3043              }
3044              unlock_user(results, optval_addr, 0);
3045              break;
3046          }
3047  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3048          default:
3049              goto unimplemented;
3050          }
3051          break;
3052  #endif /* SOL_NETLINK */
3053      default:
3054      unimplemented:
3055          qemu_log_mask(LOG_UNIMP,
3056                        "getsockopt level=%d optname=%d not yet supported\n",
3057                        level, optname);
3058          ret = -TARGET_EOPNOTSUPP;
3059          break;
3060      }
3061      return ret;
3062  }
3063  
3064  /* Convert target low/high pair representing file offset into the host
3065   * low/high pair. This function doesn't handle offsets bigger than 64 bits
3066   * as the kernel doesn't handle them either.
3067   */
3068  static void target_to_host_low_high(abi_ulong tlow,
3069                                      abi_ulong thigh,
3070                                      unsigned long *hlow,
3071                                      unsigned long *hhigh)
3072  {
3073      uint64_t off = tlow |
3074          ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3075          TARGET_LONG_BITS / 2;
3076  
3077      *hlow = off;
3078      *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3079  }
3080  
3081  static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3082                                  abi_ulong count, int copy)
3083  {
3084      struct target_iovec *target_vec;
3085      struct iovec *vec;
3086      abi_ulong total_len, max_len;
3087      int i;
3088      int err = 0;
3089      bool bad_address = false;
3090  
3091      if (count == 0) {
3092          errno = 0;
3093          return NULL;
3094      }
3095      if (count > IOV_MAX) {
3096          errno = EINVAL;
3097          return NULL;
3098      }
3099  
3100      vec = g_try_new0(struct iovec, count);
3101      if (vec == NULL) {
3102          errno = ENOMEM;
3103          return NULL;
3104      }
3105  
3106      target_vec = lock_user(VERIFY_READ, target_addr,
3107                             count * sizeof(struct target_iovec), 1);
3108      if (target_vec == NULL) {
3109          err = EFAULT;
3110          goto fail2;
3111      }
3112  
3113      /* ??? If host page size > target page size, this will result in a
3114         value larger than what we can actually support.  */
3115      max_len = 0x7fffffff & TARGET_PAGE_MASK;
3116      total_len = 0;
3117  
3118      for (i = 0; i < count; i++) {
3119          abi_ulong base = tswapal(target_vec[i].iov_base);
3120          abi_long len = tswapal(target_vec[i].iov_len);
3121  
3122          if (len < 0) {
3123              err = EINVAL;
3124              goto fail;
3125          } else if (len == 0) {
3126              /* Zero length pointer is ignored.  */
3127              vec[i].iov_base = 0;
3128          } else {
3129              vec[i].iov_base = lock_user(type, base, len, copy);
3130              /* If the first buffer pointer is bad, this is a fault.  But
3131               * subsequent bad buffers will result in a partial write; this
3132               * is realized by filling the vector with null pointers and
3133               * zero lengths. */
3134              if (!vec[i].iov_base) {
3135                  if (i == 0) {
3136                      err = EFAULT;
3137                      goto fail;
3138                  } else {
3139                      bad_address = true;
3140                  }
3141              }
3142              if (bad_address) {
3143                  len = 0;
3144              }
3145              if (len > max_len - total_len) {
3146                  len = max_len - total_len;
3147              }
3148          }
3149          vec[i].iov_len = len;
3150          total_len += len;
3151      }
3152  
3153      unlock_user(target_vec, target_addr, 0);
3154      return vec;
3155  
3156   fail:
3157      while (--i >= 0) {
3158          if (tswapal(target_vec[i].iov_len) > 0) {
3159              unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3160          }
3161      }
3162      unlock_user(target_vec, target_addr, 0);
3163   fail2:
3164      g_free(vec);
3165      errno = err;
3166      return NULL;
3167  }
3168  
3169  static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3170                           abi_ulong count, int copy)
3171  {
3172      struct target_iovec *target_vec;
3173      int i;
3174  
3175      target_vec = lock_user(VERIFY_READ, target_addr,
3176                             count * sizeof(struct target_iovec), 1);
3177      if (target_vec) {
3178          for (i = 0; i < count; i++) {
3179              abi_ulong base = tswapal(target_vec[i].iov_base);
3180              abi_long len = tswapal(target_vec[i].iov_len);
3181              if (len < 0) {
3182                  break;
3183              }
3184              unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3185          }
3186          unlock_user(target_vec, target_addr, 0);
3187      }
3188  
3189      g_free(vec);
3190  }
3191  
3192  static inline int target_to_host_sock_type(int *type)
3193  {
3194      int host_type = 0;
3195      int target_type = *type;
3196  
3197      switch (target_type & TARGET_SOCK_TYPE_MASK) {
3198      case TARGET_SOCK_DGRAM:
3199          host_type = SOCK_DGRAM;
3200          break;
3201      case TARGET_SOCK_STREAM:
3202          host_type = SOCK_STREAM;
3203          break;
3204      default:
3205          host_type = target_type & TARGET_SOCK_TYPE_MASK;
3206          break;
3207      }
3208      if (target_type & TARGET_SOCK_CLOEXEC) {
3209  #if defined(SOCK_CLOEXEC)
3210          host_type |= SOCK_CLOEXEC;
3211  #else
3212          return -TARGET_EINVAL;
3213  #endif
3214      }
3215      if (target_type & TARGET_SOCK_NONBLOCK) {
3216  #if defined(SOCK_NONBLOCK)
3217          host_type |= SOCK_NONBLOCK;
3218  #elif !defined(O_NONBLOCK)
3219          return -TARGET_EINVAL;
3220  #endif
3221      }
3222      *type = host_type;
3223      return 0;
3224  }
3225  
3226  /* Try to emulate socket type flags after socket creation.  */
3227  static int sock_flags_fixup(int fd, int target_type)
3228  {
3229  #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3230      if (target_type & TARGET_SOCK_NONBLOCK) {
3231          int flags = fcntl(fd, F_GETFL);
3232          if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3233              close(fd);
3234              return -TARGET_EINVAL;
3235          }
3236      }
3237  #endif
3238      return fd;
3239  }
3240  
3241  /* do_socket() Must return target values and target errnos. */
3242  static abi_long do_socket(int domain, int type, int protocol)
3243  {
3244      int target_type = type;
3245      int ret;
3246  
3247      ret = target_to_host_sock_type(&type);
3248      if (ret) {
3249          return ret;
3250      }
3251  
3252      if (domain == PF_NETLINK && !(
3253  #ifdef CONFIG_RTNETLINK
3254           protocol == NETLINK_ROUTE ||
3255  #endif
3256           protocol == NETLINK_KOBJECT_UEVENT ||
3257           protocol == NETLINK_AUDIT)) {
3258          return -TARGET_EPROTONOSUPPORT;
3259      }
3260  
3261      if (domain == AF_PACKET ||
3262          (domain == AF_INET && type == SOCK_PACKET)) {
3263          protocol = tswap16(protocol);
3264      }
3265  
3266      ret = get_errno(socket(domain, type, protocol));
3267      if (ret >= 0) {
3268          ret = sock_flags_fixup(ret, target_type);
3269          if (type == SOCK_PACKET) {
3270              /* Manage an obsolete case :
3271               * if socket type is SOCK_PACKET, bind by name
3272               */
3273              fd_trans_register(ret, &target_packet_trans);
3274          } else if (domain == PF_NETLINK) {
3275              switch (protocol) {
3276  #ifdef CONFIG_RTNETLINK
3277              case NETLINK_ROUTE:
3278                  fd_trans_register(ret, &target_netlink_route_trans);
3279                  break;
3280  #endif
3281              case NETLINK_KOBJECT_UEVENT:
3282                  /* nothing to do: messages are strings */
3283                  break;
3284              case NETLINK_AUDIT:
3285                  fd_trans_register(ret, &target_netlink_audit_trans);
3286                  break;
3287              default:
3288                  g_assert_not_reached();
3289              }
3290          }
3291      }
3292      return ret;
3293  }
3294  
3295  /* do_bind() Must return target values and target errnos. */
3296  static abi_long do_bind(int sockfd, abi_ulong target_addr,
3297                          socklen_t addrlen)
3298  {
3299      void *addr;
3300      abi_long ret;
3301  
3302      if ((int)addrlen < 0) {
3303          return -TARGET_EINVAL;
3304      }
3305  
3306      addr = alloca(addrlen+1);
3307  
3308      ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3309      if (ret)
3310          return ret;
3311  
3312      return get_errno(bind(sockfd, addr, addrlen));
3313  }
3314  
3315  /* do_connect() Must return target values and target errnos. */
3316  static abi_long do_connect(int sockfd, abi_ulong target_addr,
3317                             socklen_t addrlen)
3318  {
3319      void *addr;
3320      abi_long ret;
3321  
3322      if ((int)addrlen < 0) {
3323          return -TARGET_EINVAL;
3324      }
3325  
3326      addr = alloca(addrlen+1);
3327  
3328      ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3329      if (ret)
3330          return ret;
3331  
3332      return get_errno(safe_connect(sockfd, addr, addrlen));
3333  }
3334  
3335  /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3336  static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3337                                        int flags, int send)
3338  {
3339      abi_long ret, len;
3340      struct msghdr msg;
3341      abi_ulong count;
3342      struct iovec *vec;
3343      abi_ulong target_vec;
3344  
3345      if (msgp->msg_name) {
3346          msg.msg_namelen = tswap32(msgp->msg_namelen);
3347          msg.msg_name = alloca(msg.msg_namelen+1);
3348          ret = target_to_host_sockaddr(fd, msg.msg_name,
3349                                        tswapal(msgp->msg_name),
3350                                        msg.msg_namelen);
3351          if (ret == -TARGET_EFAULT) {
3352              /* For connected sockets msg_name and msg_namelen must
3353               * be ignored, so returning EFAULT immediately is wrong.
3354               * Instead, pass a bad msg_name to the host kernel, and
3355               * let it decide whether to return EFAULT or not.
3356               */
3357              msg.msg_name = (void *)-1;
3358          } else if (ret) {
3359              goto out2;
3360          }
3361      } else {
3362          msg.msg_name = NULL;
3363          msg.msg_namelen = 0;
3364      }
3365      msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3366      msg.msg_control = alloca(msg.msg_controllen);
3367      memset(msg.msg_control, 0, msg.msg_controllen);
3368  
3369      msg.msg_flags = tswap32(msgp->msg_flags);
3370  
3371      count = tswapal(msgp->msg_iovlen);
3372      target_vec = tswapal(msgp->msg_iov);
3373  
3374      if (count > IOV_MAX) {
3375          /* sendrcvmsg returns a different errno for this condition than
3376           * readv/writev, so we must catch it here before lock_iovec() does.
3377           */
3378          ret = -TARGET_EMSGSIZE;
3379          goto out2;
3380      }
3381  
3382      vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3383                       target_vec, count, send);
3384      if (vec == NULL) {
3385          ret = -host_to_target_errno(errno);
3386          goto out2;
3387      }
3388      msg.msg_iovlen = count;
3389      msg.msg_iov = vec;
3390  
3391      if (send) {
3392          if (fd_trans_target_to_host_data(fd)) {
3393              void *host_msg;
3394  
3395              host_msg = g_malloc(msg.msg_iov->iov_len);
3396              memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3397              ret = fd_trans_target_to_host_data(fd)(host_msg,
3398                                                     msg.msg_iov->iov_len);
3399              if (ret >= 0) {
3400                  msg.msg_iov->iov_base = host_msg;
3401                  ret = get_errno(safe_sendmsg(fd, &msg, flags));
3402              }
3403              g_free(host_msg);
3404          } else {
3405              ret = target_to_host_cmsg(&msg, msgp);
3406              if (ret == 0) {
3407                  ret = get_errno(safe_sendmsg(fd, &msg, flags));
3408              }
3409          }
3410      } else {
3411          ret = get_errno(safe_recvmsg(fd, &msg, flags));
3412          if (!is_error(ret)) {
3413              len = ret;
3414              if (fd_trans_host_to_target_data(fd)) {
3415                  ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3416                                                 MIN(msg.msg_iov->iov_len, len));
3417              } else {
3418                  ret = host_to_target_cmsg(msgp, &msg);
3419              }
3420              if (!is_error(ret)) {
3421                  msgp->msg_namelen = tswap32(msg.msg_namelen);
3422                  msgp->msg_flags = tswap32(msg.msg_flags);
3423                  if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3424                      ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3425                                      msg.msg_name, msg.msg_namelen);
3426                      if (ret) {
3427                          goto out;
3428                      }
3429                  }
3430  
3431                  ret = len;
3432              }
3433          }
3434      }
3435  
3436  out:
3437      unlock_iovec(vec, target_vec, count, !send);
3438  out2:
3439      return ret;
3440  }
3441  
3442  static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3443                                 int flags, int send)
3444  {
3445      abi_long ret;
3446      struct target_msghdr *msgp;
3447  
3448      if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3449                            msgp,
3450                            target_msg,
3451                            send ? 1 : 0)) {
3452          return -TARGET_EFAULT;
3453      }
3454      ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3455      unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3456      return ret;
3457  }
3458  
3459  /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3460   * so it might not have this *mmsg-specific flag either.
3461   */
3462  #ifndef MSG_WAITFORONE
3463  #define MSG_WAITFORONE 0x10000
3464  #endif
3465  
3466  static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3467                                  unsigned int vlen, unsigned int flags,
3468                                  int send)
3469  {
3470      struct target_mmsghdr *mmsgp;
3471      abi_long ret = 0;
3472      int i;
3473  
3474      if (vlen > UIO_MAXIOV) {
3475          vlen = UIO_MAXIOV;
3476      }
3477  
3478      mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3479      if (!mmsgp) {
3480          return -TARGET_EFAULT;
3481      }
3482  
3483      for (i = 0; i < vlen; i++) {
3484          ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3485          if (is_error(ret)) {
3486              break;
3487          }
3488          mmsgp[i].msg_len = tswap32(ret);
3489          /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3490          if (flags & MSG_WAITFORONE) {
3491              flags |= MSG_DONTWAIT;
3492          }
3493      }
3494  
3495      unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3496  
3497      /* Return number of datagrams sent if we sent any at all;
3498       * otherwise return the error.
3499       */
3500      if (i) {
3501          return i;
3502      }
3503      return ret;
3504  }
3505  
3506  /* do_accept4() Must return target values and target errnos. */
3507  static abi_long do_accept4(int fd, abi_ulong target_addr,
3508                             abi_ulong target_addrlen_addr, int flags)
3509  {
3510      socklen_t addrlen, ret_addrlen;
3511      void *addr;
3512      abi_long ret;
3513      int host_flags;
3514  
3515      host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3516  
3517      if (target_addr == 0) {
3518          return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3519      }
3520  
3521      /* linux returns EFAULT if addrlen pointer is invalid */
3522      if (get_user_u32(addrlen, target_addrlen_addr))
3523          return -TARGET_EFAULT;
3524  
3525      if ((int)addrlen < 0) {
3526          return -TARGET_EINVAL;
3527      }
3528  
3529      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3530          return -TARGET_EFAULT;
3531      }
3532  
3533      addr = alloca(addrlen);
3534  
3535      ret_addrlen = addrlen;
3536      ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3537      if (!is_error(ret)) {
3538          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3539          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3540              ret = -TARGET_EFAULT;
3541          }
3542      }
3543      return ret;
3544  }
3545  
3546  /* do_getpeername() Must return target values and target errnos. */
3547  static abi_long do_getpeername(int fd, abi_ulong target_addr,
3548                                 abi_ulong target_addrlen_addr)
3549  {
3550      socklen_t addrlen, ret_addrlen;
3551      void *addr;
3552      abi_long ret;
3553  
3554      if (get_user_u32(addrlen, target_addrlen_addr))
3555          return -TARGET_EFAULT;
3556  
3557      if ((int)addrlen < 0) {
3558          return -TARGET_EINVAL;
3559      }
3560  
3561      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3562          return -TARGET_EFAULT;
3563      }
3564  
3565      addr = alloca(addrlen);
3566  
3567      ret_addrlen = addrlen;
3568      ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3569      if (!is_error(ret)) {
3570          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3571          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3572              ret = -TARGET_EFAULT;
3573          }
3574      }
3575      return ret;
3576  }
3577  
3578  /* do_getsockname() Must return target values and target errnos. */
3579  static abi_long do_getsockname(int fd, abi_ulong target_addr,
3580                                 abi_ulong target_addrlen_addr)
3581  {
3582      socklen_t addrlen, ret_addrlen;
3583      void *addr;
3584      abi_long ret;
3585  
3586      if (get_user_u32(addrlen, target_addrlen_addr))
3587          return -TARGET_EFAULT;
3588  
3589      if ((int)addrlen < 0) {
3590          return -TARGET_EINVAL;
3591      }
3592  
3593      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3594          return -TARGET_EFAULT;
3595      }
3596  
3597      addr = alloca(addrlen);
3598  
3599      ret_addrlen = addrlen;
3600      ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3601      if (!is_error(ret)) {
3602          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3603          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3604              ret = -TARGET_EFAULT;
3605          }
3606      }
3607      return ret;
3608  }
3609  
3610  /* do_socketpair() Must return target values and target errnos. */
3611  static abi_long do_socketpair(int domain, int type, int protocol,
3612                                abi_ulong target_tab_addr)
3613  {
3614      int tab[2];
3615      abi_long ret;
3616  
3617      target_to_host_sock_type(&type);
3618  
3619      ret = get_errno(socketpair(domain, type, protocol, tab));
3620      if (!is_error(ret)) {
3621          if (put_user_s32(tab[0], target_tab_addr)
3622              || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3623              ret = -TARGET_EFAULT;
3624      }
3625      return ret;
3626  }
3627  
3628  /* do_sendto() Must return target values and target errnos. */
3629  static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3630                            abi_ulong target_addr, socklen_t addrlen)
3631  {
3632      void *addr;
3633      void *host_msg;
3634      void *copy_msg = NULL;
3635      abi_long ret;
3636  
3637      if ((int)addrlen < 0) {
3638          return -TARGET_EINVAL;
3639      }
3640  
3641      host_msg = lock_user(VERIFY_READ, msg, len, 1);
3642      if (!host_msg)
3643          return -TARGET_EFAULT;
3644      if (fd_trans_target_to_host_data(fd)) {
3645          copy_msg = host_msg;
3646          host_msg = g_malloc(len);
3647          memcpy(host_msg, copy_msg, len);
3648          ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3649          if (ret < 0) {
3650              goto fail;
3651          }
3652      }
3653      if (target_addr) {
3654          addr = alloca(addrlen+1);
3655          ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3656          if (ret) {
3657              goto fail;
3658          }
3659          ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3660      } else {
3661          ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3662      }
3663  fail:
3664      if (copy_msg) {
3665          g_free(host_msg);
3666          host_msg = copy_msg;
3667      }
3668      unlock_user(host_msg, msg, 0);
3669      return ret;
3670  }
3671  
3672  /* do_recvfrom() Must return target values and target errnos. */
3673  static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3674                              abi_ulong target_addr,
3675                              abi_ulong target_addrlen)
3676  {
3677      socklen_t addrlen, ret_addrlen;
3678      void *addr;
3679      void *host_msg;
3680      abi_long ret;
3681  
3682      if (!msg) {
3683          host_msg = NULL;
3684      } else {
3685          host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3686          if (!host_msg) {
3687              return -TARGET_EFAULT;
3688          }
3689      }
3690      if (target_addr) {
3691          if (get_user_u32(addrlen, target_addrlen)) {
3692              ret = -TARGET_EFAULT;
3693              goto fail;
3694          }
3695          if ((int)addrlen < 0) {
3696              ret = -TARGET_EINVAL;
3697              goto fail;
3698          }
3699          addr = alloca(addrlen);
3700          ret_addrlen = addrlen;
3701          ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3702                                        addr, &ret_addrlen));
3703      } else {
3704          addr = NULL; /* To keep compiler quiet.  */
3705          addrlen = 0; /* To keep compiler quiet.  */
3706          ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3707      }
3708      if (!is_error(ret)) {
3709          if (fd_trans_host_to_target_data(fd)) {
3710              abi_long trans;
3711              trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3712              if (is_error(trans)) {
3713                  ret = trans;
3714                  goto fail;
3715              }
3716          }
3717          if (target_addr) {
3718              host_to_target_sockaddr(target_addr, addr,
3719                                      MIN(addrlen, ret_addrlen));
3720              if (put_user_u32(ret_addrlen, target_addrlen)) {
3721                  ret = -TARGET_EFAULT;
3722                  goto fail;
3723              }
3724          }
3725          unlock_user(host_msg, msg, len);
3726      } else {
3727  fail:
3728          unlock_user(host_msg, msg, 0);
3729      }
3730      return ret;
3731  }
3732  
3733  #ifdef TARGET_NR_socketcall
3734  /* do_socketcall() must return target values and target errnos. */
3735  static abi_long do_socketcall(int num, abi_ulong vptr)
3736  {
3737      static const unsigned nargs[] = { /* number of arguments per operation */
3738          [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3739          [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3740          [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3741          [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3742          [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3743          [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3744          [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3745          [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3746          [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3747          [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3748          [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3749          [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3750          [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3751          [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3752          [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3753          [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3754          [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3755          [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3756          [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3757          [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3758      };
3759      abi_long a[6]; /* max 6 args */
3760      unsigned i;
3761  
3762      /* check the range of the first argument num */
3763      /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3764      if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3765          return -TARGET_EINVAL;
3766      }
3767      /* ensure we have space for args */
3768      if (nargs[num] > ARRAY_SIZE(a)) {
3769          return -TARGET_EINVAL;
3770      }
3771      /* collect the arguments in a[] according to nargs[] */
3772      for (i = 0; i < nargs[num]; ++i) {
3773          if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3774              return -TARGET_EFAULT;
3775          }
3776      }
3777      /* now when we have the args, invoke the appropriate underlying function */
3778      switch (num) {
3779      case TARGET_SYS_SOCKET: /* domain, type, protocol */
3780          return do_socket(a[0], a[1], a[2]);
3781      case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3782          return do_bind(a[0], a[1], a[2]);
3783      case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3784          return do_connect(a[0], a[1], a[2]);
3785      case TARGET_SYS_LISTEN: /* sockfd, backlog */
3786          return get_errno(listen(a[0], a[1]));
3787      case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3788          return do_accept4(a[0], a[1], a[2], 0);
3789      case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3790          return do_getsockname(a[0], a[1], a[2]);
3791      case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3792          return do_getpeername(a[0], a[1], a[2]);
3793      case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3794          return do_socketpair(a[0], a[1], a[2], a[3]);
3795      case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3796          return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3797      case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3798          return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3799      case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3800          return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3801      case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3802          return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3803      case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3804          return get_errno(shutdown(a[0], a[1]));
3805      case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3806          return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3807      case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3808          return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3809      case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3810          return do_sendrecvmsg(a[0], a[1], a[2], 1);
3811      case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3812          return do_sendrecvmsg(a[0], a[1], a[2], 0);
3813      case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3814          return do_accept4(a[0], a[1], a[2], a[3]);
3815      case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3816          return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3817      case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3818          return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3819      default:
3820          qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3821          return -TARGET_EINVAL;
3822      }
3823  }
3824  #endif
3825  
3826  #define N_SHM_REGIONS	32
3827  
3828  static struct shm_region {
3829      abi_ulong start;
3830      abi_ulong size;
3831      bool in_use;
3832  } shm_regions[N_SHM_REGIONS];
3833  
3834  #ifndef TARGET_SEMID64_DS
3835  /* asm-generic version of this struct */
3836  struct target_semid64_ds
3837  {
3838    struct target_ipc_perm sem_perm;
3839    abi_ulong sem_otime;
3840  #if TARGET_ABI_BITS == 32
3841    abi_ulong __unused1;
3842  #endif
3843    abi_ulong sem_ctime;
3844  #if TARGET_ABI_BITS == 32
3845    abi_ulong __unused2;
3846  #endif
3847    abi_ulong sem_nsems;
3848    abi_ulong __unused3;
3849    abi_ulong __unused4;
3850  };
3851  #endif
3852  
3853  static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3854                                                 abi_ulong target_addr)
3855  {
3856      struct target_ipc_perm *target_ip;
3857      struct target_semid64_ds *target_sd;
3858  
3859      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3860          return -TARGET_EFAULT;
3861      target_ip = &(target_sd->sem_perm);
3862      host_ip->__key = tswap32(target_ip->__key);
3863      host_ip->uid = tswap32(target_ip->uid);
3864      host_ip->gid = tswap32(target_ip->gid);
3865      host_ip->cuid = tswap32(target_ip->cuid);
3866      host_ip->cgid = tswap32(target_ip->cgid);
3867  #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3868      host_ip->mode = tswap32(target_ip->mode);
3869  #else
3870      host_ip->mode = tswap16(target_ip->mode);
3871  #endif
3872  #if defined(TARGET_PPC)
3873      host_ip->__seq = tswap32(target_ip->__seq);
3874  #else
3875      host_ip->__seq = tswap16(target_ip->__seq);
3876  #endif
3877      unlock_user_struct(target_sd, target_addr, 0);
3878      return 0;
3879  }
3880  
3881  static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3882                                                 struct ipc_perm *host_ip)
3883  {
3884      struct target_ipc_perm *target_ip;
3885      struct target_semid64_ds *target_sd;
3886  
3887      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3888          return -TARGET_EFAULT;
3889      target_ip = &(target_sd->sem_perm);
3890      target_ip->__key = tswap32(host_ip->__key);
3891      target_ip->uid = tswap32(host_ip->uid);
3892      target_ip->gid = tswap32(host_ip->gid);
3893      target_ip->cuid = tswap32(host_ip->cuid);
3894      target_ip->cgid = tswap32(host_ip->cgid);
3895  #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3896      target_ip->mode = tswap32(host_ip->mode);
3897  #else
3898      target_ip->mode = tswap16(host_ip->mode);
3899  #endif
3900  #if defined(TARGET_PPC)
3901      target_ip->__seq = tswap32(host_ip->__seq);
3902  #else
3903      target_ip->__seq = tswap16(host_ip->__seq);
3904  #endif
3905      unlock_user_struct(target_sd, target_addr, 1);
3906      return 0;
3907  }
3908  
3909  static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3910                                                 abi_ulong target_addr)
3911  {
3912      struct target_semid64_ds *target_sd;
3913  
3914      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3915          return -TARGET_EFAULT;
3916      if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3917          return -TARGET_EFAULT;
3918      host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3919      host_sd->sem_otime = tswapal(target_sd->sem_otime);
3920      host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3921      unlock_user_struct(target_sd, target_addr, 0);
3922      return 0;
3923  }
3924  
3925  static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3926                                                 struct semid_ds *host_sd)
3927  {
3928      struct target_semid64_ds *target_sd;
3929  
3930      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3931          return -TARGET_EFAULT;
3932      if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3933          return -TARGET_EFAULT;
3934      target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3935      target_sd->sem_otime = tswapal(host_sd->sem_otime);
3936      target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3937      unlock_user_struct(target_sd, target_addr, 1);
3938      return 0;
3939  }
3940  
3941  struct target_seminfo {
3942      int semmap;
3943      int semmni;
3944      int semmns;
3945      int semmnu;
3946      int semmsl;
3947      int semopm;
3948      int semume;
3949      int semusz;
3950      int semvmx;
3951      int semaem;
3952  };
3953  
3954  static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3955                                                struct seminfo *host_seminfo)
3956  {
3957      struct target_seminfo *target_seminfo;
3958      if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3959          return -TARGET_EFAULT;
3960      __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3961      __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3962      __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3963      __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3964      __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3965      __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3966      __put_user(host_seminfo->semume, &target_seminfo->semume);
3967      __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3968      __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3969      __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3970      unlock_user_struct(target_seminfo, target_addr, 1);
3971      return 0;
3972  }
3973  
3974  union semun {
3975  	int val;
3976  	struct semid_ds *buf;
3977  	unsigned short *array;
3978  	struct seminfo *__buf;
3979  };
3980  
3981  union target_semun {
3982  	int val;
3983  	abi_ulong buf;
3984  	abi_ulong array;
3985  	abi_ulong __buf;
3986  };
3987  
3988  static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3989                                                 abi_ulong target_addr)
3990  {
3991      int nsems;
3992      unsigned short *array;
3993      union semun semun;
3994      struct semid_ds semid_ds;
3995      int i, ret;
3996  
3997      semun.buf = &semid_ds;
3998  
3999      ret = semctl(semid, 0, IPC_STAT, semun);
4000      if (ret == -1)
4001          return get_errno(ret);
4002  
4003      nsems = semid_ds.sem_nsems;
4004  
4005      *host_array = g_try_new(unsigned short, nsems);
4006      if (!*host_array) {
4007          return -TARGET_ENOMEM;
4008      }
4009      array = lock_user(VERIFY_READ, target_addr,
4010                        nsems*sizeof(unsigned short), 1);
4011      if (!array) {
4012          g_free(*host_array);
4013          return -TARGET_EFAULT;
4014      }
4015  
4016      for(i=0; i<nsems; i++) {
4017          __get_user((*host_array)[i], &array[i]);
4018      }
4019      unlock_user(array, target_addr, 0);
4020  
4021      return 0;
4022  }
4023  
4024  static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4025                                                 unsigned short **host_array)
4026  {
4027      int nsems;
4028      unsigned short *array;
4029      union semun semun;
4030      struct semid_ds semid_ds;
4031      int i, ret;
4032  
4033      semun.buf = &semid_ds;
4034  
4035      ret = semctl(semid, 0, IPC_STAT, semun);
4036      if (ret == -1)
4037          return get_errno(ret);
4038  
4039      nsems = semid_ds.sem_nsems;
4040  
4041      array = lock_user(VERIFY_WRITE, target_addr,
4042                        nsems*sizeof(unsigned short), 0);
4043      if (!array)
4044          return -TARGET_EFAULT;
4045  
4046      for(i=0; i<nsems; i++) {
4047          __put_user((*host_array)[i], &array[i]);
4048      }
4049      g_free(*host_array);
4050      unlock_user(array, target_addr, 1);
4051  
4052      return 0;
4053  }
4054  
4055  static inline abi_long do_semctl(int semid, int semnum, int cmd,
4056                                   abi_ulong target_arg)
4057  {
4058      union target_semun target_su = { .buf = target_arg };
4059      union semun arg;
4060      struct semid_ds dsarg;
4061      unsigned short *array = NULL;
4062      struct seminfo seminfo;
4063      abi_long ret = -TARGET_EINVAL;
4064      abi_long err;
4065      cmd &= 0xff;
4066  
4067      switch( cmd ) {
4068  	case GETVAL:
4069  	case SETVAL:
4070              /* In 64 bit cross-endian situations, we will erroneously pick up
4071               * the wrong half of the union for the "val" element.  To rectify
4072               * this, the entire 8-byte structure is byteswapped, followed by
4073  	     * a swap of the 4 byte val field. In other cases, the data is
4074  	     * already in proper host byte order. */
4075  	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4076  		target_su.buf = tswapal(target_su.buf);
4077  		arg.val = tswap32(target_su.val);
4078  	    } else {
4079  		arg.val = target_su.val;
4080  	    }
4081              ret = get_errno(semctl(semid, semnum, cmd, arg));
4082              break;
4083  	case GETALL:
4084  	case SETALL:
4085              err = target_to_host_semarray(semid, &array, target_su.array);
4086              if (err)
4087                  return err;
4088              arg.array = array;
4089              ret = get_errno(semctl(semid, semnum, cmd, arg));
4090              err = host_to_target_semarray(semid, target_su.array, &array);
4091              if (err)
4092                  return err;
4093              break;
4094  	case IPC_STAT:
4095  	case IPC_SET:
4096  	case SEM_STAT:
4097              err = target_to_host_semid_ds(&dsarg, target_su.buf);
4098              if (err)
4099                  return err;
4100              arg.buf = &dsarg;
4101              ret = get_errno(semctl(semid, semnum, cmd, arg));
4102              err = host_to_target_semid_ds(target_su.buf, &dsarg);
4103              if (err)
4104                  return err;
4105              break;
4106  	case IPC_INFO:
4107  	case SEM_INFO:
4108              arg.__buf = &seminfo;
4109              ret = get_errno(semctl(semid, semnum, cmd, arg));
4110              err = host_to_target_seminfo(target_su.__buf, &seminfo);
4111              if (err)
4112                  return err;
4113              break;
4114  	case IPC_RMID:
4115  	case GETPID:
4116  	case GETNCNT:
4117  	case GETZCNT:
4118              ret = get_errno(semctl(semid, semnum, cmd, NULL));
4119              break;
4120      }
4121  
4122      return ret;
4123  }
4124  
4125  struct target_sembuf {
4126      unsigned short sem_num;
4127      short sem_op;
4128      short sem_flg;
4129  };
4130  
4131  static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4132                                               abi_ulong target_addr,
4133                                               unsigned nsops)
4134  {
4135      struct target_sembuf *target_sembuf;
4136      int i;
4137  
4138      target_sembuf = lock_user(VERIFY_READ, target_addr,
4139                                nsops*sizeof(struct target_sembuf), 1);
4140      if (!target_sembuf)
4141          return -TARGET_EFAULT;
4142  
4143      for(i=0; i<nsops; i++) {
4144          __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4145          __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4146          __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4147      }
4148  
4149      unlock_user(target_sembuf, target_addr, 0);
4150  
4151      return 0;
4152  }
4153  
4154  #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4155      defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4156  
4157  /*
4158   * This macro is required to handle the s390 variants, which passes the
4159   * arguments in a different order than default.
4160   */
4161  #ifdef __s390x__
4162  #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4163    (__nsops), (__timeout), (__sops)
4164  #else
4165  #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4166    (__nsops), 0, (__sops), (__timeout)
4167  #endif
4168  
4169  static inline abi_long do_semtimedop(int semid,
4170                                       abi_long ptr,
4171                                       unsigned nsops,
4172                                       abi_long timeout, bool time64)
4173  {
4174      struct sembuf *sops;
4175      struct timespec ts, *pts = NULL;
4176      abi_long ret;
4177  
4178      if (timeout) {
4179          pts = &ts;
4180          if (time64) {
4181              if (target_to_host_timespec64(pts, timeout)) {
4182                  return -TARGET_EFAULT;
4183              }
4184          } else {
4185              if (target_to_host_timespec(pts, timeout)) {
4186                  return -TARGET_EFAULT;
4187              }
4188          }
4189      }
4190  
4191      if (nsops > TARGET_SEMOPM) {
4192          return -TARGET_E2BIG;
4193      }
4194  
4195      sops = g_new(struct sembuf, nsops);
4196  
4197      if (target_to_host_sembuf(sops, ptr, nsops)) {
4198          g_free(sops);
4199          return -TARGET_EFAULT;
4200      }
4201  
4202      ret = -TARGET_ENOSYS;
4203  #ifdef __NR_semtimedop
4204      ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4205  #endif
4206  #ifdef __NR_ipc
4207      if (ret == -TARGET_ENOSYS) {
4208          ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4209                                   SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4210      }
4211  #endif
4212      g_free(sops);
4213      return ret;
4214  }
4215  #endif
4216  
4217  struct target_msqid_ds
4218  {
4219      struct target_ipc_perm msg_perm;
4220      abi_ulong msg_stime;
4221  #if TARGET_ABI_BITS == 32
4222      abi_ulong __unused1;
4223  #endif
4224      abi_ulong msg_rtime;
4225  #if TARGET_ABI_BITS == 32
4226      abi_ulong __unused2;
4227  #endif
4228      abi_ulong msg_ctime;
4229  #if TARGET_ABI_BITS == 32
4230      abi_ulong __unused3;
4231  #endif
4232      abi_ulong __msg_cbytes;
4233      abi_ulong msg_qnum;
4234      abi_ulong msg_qbytes;
4235      abi_ulong msg_lspid;
4236      abi_ulong msg_lrpid;
4237      abi_ulong __unused4;
4238      abi_ulong __unused5;
4239  };
4240  
4241  static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4242                                                 abi_ulong target_addr)
4243  {
4244      struct target_msqid_ds *target_md;
4245  
4246      if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4247          return -TARGET_EFAULT;
4248      if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4249          return -TARGET_EFAULT;
4250      host_md->msg_stime = tswapal(target_md->msg_stime);
4251      host_md->msg_rtime = tswapal(target_md->msg_rtime);
4252      host_md->msg_ctime = tswapal(target_md->msg_ctime);
4253      host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4254      host_md->msg_qnum = tswapal(target_md->msg_qnum);
4255      host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4256      host_md->msg_lspid = tswapal(target_md->msg_lspid);
4257      host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4258      unlock_user_struct(target_md, target_addr, 0);
4259      return 0;
4260  }
4261  
4262  static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4263                                                 struct msqid_ds *host_md)
4264  {
4265      struct target_msqid_ds *target_md;
4266  
4267      if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4268          return -TARGET_EFAULT;
4269      if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4270          return -TARGET_EFAULT;
4271      target_md->msg_stime = tswapal(host_md->msg_stime);
4272      target_md->msg_rtime = tswapal(host_md->msg_rtime);
4273      target_md->msg_ctime = tswapal(host_md->msg_ctime);
4274      target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4275      target_md->msg_qnum = tswapal(host_md->msg_qnum);
4276      target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4277      target_md->msg_lspid = tswapal(host_md->msg_lspid);
4278      target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4279      unlock_user_struct(target_md, target_addr, 1);
4280      return 0;
4281  }
4282  
4283  struct target_msginfo {
4284      int msgpool;
4285      int msgmap;
4286      int msgmax;
4287      int msgmnb;
4288      int msgmni;
4289      int msgssz;
4290      int msgtql;
4291      unsigned short int msgseg;
4292  };
4293  
4294  static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4295                                                struct msginfo *host_msginfo)
4296  {
4297      struct target_msginfo *target_msginfo;
4298      if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4299          return -TARGET_EFAULT;
4300      __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4301      __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4302      __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4303      __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4304      __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4305      __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4306      __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4307      __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4308      unlock_user_struct(target_msginfo, target_addr, 1);
4309      return 0;
4310  }
4311  
4312  static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4313  {
4314      struct msqid_ds dsarg;
4315      struct msginfo msginfo;
4316      abi_long ret = -TARGET_EINVAL;
4317  
4318      cmd &= 0xff;
4319  
4320      switch (cmd) {
4321      case IPC_STAT:
4322      case IPC_SET:
4323      case MSG_STAT:
4324          if (target_to_host_msqid_ds(&dsarg,ptr))
4325              return -TARGET_EFAULT;
4326          ret = get_errno(msgctl(msgid, cmd, &dsarg));
4327          if (host_to_target_msqid_ds(ptr,&dsarg))
4328              return -TARGET_EFAULT;
4329          break;
4330      case IPC_RMID:
4331          ret = get_errno(msgctl(msgid, cmd, NULL));
4332          break;
4333      case IPC_INFO:
4334      case MSG_INFO:
4335          ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4336          if (host_to_target_msginfo(ptr, &msginfo))
4337              return -TARGET_EFAULT;
4338          break;
4339      }
4340  
4341      return ret;
4342  }
4343  
4344  struct target_msgbuf {
4345      abi_long mtype;
4346      char	mtext[1];
4347  };
4348  
4349  static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4350                                   ssize_t msgsz, int msgflg)
4351  {
4352      struct target_msgbuf *target_mb;
4353      struct msgbuf *host_mb;
4354      abi_long ret = 0;
4355  
4356      if (msgsz < 0) {
4357          return -TARGET_EINVAL;
4358      }
4359  
4360      if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4361          return -TARGET_EFAULT;
4362      host_mb = g_try_malloc(msgsz + sizeof(long));
4363      if (!host_mb) {
4364          unlock_user_struct(target_mb, msgp, 0);
4365          return -TARGET_ENOMEM;
4366      }
4367      host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4368      memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4369      ret = -TARGET_ENOSYS;
4370  #ifdef __NR_msgsnd
4371      ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4372  #endif
4373  #ifdef __NR_ipc
4374      if (ret == -TARGET_ENOSYS) {
4375  #ifdef __s390x__
4376          ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4377                                   host_mb));
4378  #else
4379          ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4380                                   host_mb, 0));
4381  #endif
4382      }
4383  #endif
4384      g_free(host_mb);
4385      unlock_user_struct(target_mb, msgp, 0);
4386  
4387      return ret;
4388  }
4389  
4390  #ifdef __NR_ipc
4391  #if defined(__sparc__)
4392  /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4393  #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4394  #elif defined(__s390x__)
4395  /* The s390 sys_ipc variant has only five parameters.  */
4396  #define MSGRCV_ARGS(__msgp, __msgtyp) \
4397      ((long int[]){(long int)__msgp, __msgtyp})
4398  #else
4399  #define MSGRCV_ARGS(__msgp, __msgtyp) \
4400      ((long int[]){(long int)__msgp, __msgtyp}), 0
4401  #endif
4402  #endif
4403  
4404  static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4405                                   ssize_t msgsz, abi_long msgtyp,
4406                                   int msgflg)
4407  {
4408      struct target_msgbuf *target_mb;
4409      char *target_mtext;
4410      struct msgbuf *host_mb;
4411      abi_long ret = 0;
4412  
4413      if (msgsz < 0) {
4414          return -TARGET_EINVAL;
4415      }
4416  
4417      if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4418          return -TARGET_EFAULT;
4419  
4420      host_mb = g_try_malloc(msgsz + sizeof(long));
4421      if (!host_mb) {
4422          ret = -TARGET_ENOMEM;
4423          goto end;
4424      }
4425      ret = -TARGET_ENOSYS;
4426  #ifdef __NR_msgrcv
4427      ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4428  #endif
4429  #ifdef __NR_ipc
4430      if (ret == -TARGET_ENOSYS) {
4431          ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4432                          msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4433      }
4434  #endif
4435  
4436      if (ret > 0) {
4437          abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4438          target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4439          if (!target_mtext) {
4440              ret = -TARGET_EFAULT;
4441              goto end;
4442          }
4443          memcpy(target_mb->mtext, host_mb->mtext, ret);
4444          unlock_user(target_mtext, target_mtext_addr, ret);
4445      }
4446  
4447      target_mb->mtype = tswapal(host_mb->mtype);
4448  
4449  end:
4450      if (target_mb)
4451          unlock_user_struct(target_mb, msgp, 1);
4452      g_free(host_mb);
4453      return ret;
4454  }
4455  
4456  static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4457                                                 abi_ulong target_addr)
4458  {
4459      struct target_shmid_ds *target_sd;
4460  
4461      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4462          return -TARGET_EFAULT;
4463      if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4464          return -TARGET_EFAULT;
4465      __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4466      __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4467      __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4468      __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4469      __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4470      __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4471      __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4472      unlock_user_struct(target_sd, target_addr, 0);
4473      return 0;
4474  }
4475  
4476  static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4477                                                 struct shmid_ds *host_sd)
4478  {
4479      struct target_shmid_ds *target_sd;
4480  
4481      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4482          return -TARGET_EFAULT;
4483      if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4484          return -TARGET_EFAULT;
4485      __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4486      __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4487      __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4488      __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4489      __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4490      __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4491      __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4492      unlock_user_struct(target_sd, target_addr, 1);
4493      return 0;
4494  }
4495  
4496  struct  target_shminfo {
4497      abi_ulong shmmax;
4498      abi_ulong shmmin;
4499      abi_ulong shmmni;
4500      abi_ulong shmseg;
4501      abi_ulong shmall;
4502  };
4503  
4504  static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4505                                                struct shminfo *host_shminfo)
4506  {
4507      struct target_shminfo *target_shminfo;
4508      if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4509          return -TARGET_EFAULT;
4510      __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4511      __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4512      __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4513      __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4514      __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4515      unlock_user_struct(target_shminfo, target_addr, 1);
4516      return 0;
4517  }
4518  
4519  struct target_shm_info {
4520      int used_ids;
4521      abi_ulong shm_tot;
4522      abi_ulong shm_rss;
4523      abi_ulong shm_swp;
4524      abi_ulong swap_attempts;
4525      abi_ulong swap_successes;
4526  };
4527  
4528  static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4529                                                 struct shm_info *host_shm_info)
4530  {
4531      struct target_shm_info *target_shm_info;
4532      if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4533          return -TARGET_EFAULT;
4534      __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4535      __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4536      __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4537      __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4538      __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4539      __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4540      unlock_user_struct(target_shm_info, target_addr, 1);
4541      return 0;
4542  }
4543  
4544  static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4545  {
4546      struct shmid_ds dsarg;
4547      struct shminfo shminfo;
4548      struct shm_info shm_info;
4549      abi_long ret = -TARGET_EINVAL;
4550  
4551      cmd &= 0xff;
4552  
4553      switch(cmd) {
4554      case IPC_STAT:
4555      case IPC_SET:
4556      case SHM_STAT:
4557          if (target_to_host_shmid_ds(&dsarg, buf))
4558              return -TARGET_EFAULT;
4559          ret = get_errno(shmctl(shmid, cmd, &dsarg));
4560          if (host_to_target_shmid_ds(buf, &dsarg))
4561              return -TARGET_EFAULT;
4562          break;
4563      case IPC_INFO:
4564          ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4565          if (host_to_target_shminfo(buf, &shminfo))
4566              return -TARGET_EFAULT;
4567          break;
4568      case SHM_INFO:
4569          ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4570          if (host_to_target_shm_info(buf, &shm_info))
4571              return -TARGET_EFAULT;
4572          break;
4573      case IPC_RMID:
4574      case SHM_LOCK:
4575      case SHM_UNLOCK:
4576          ret = get_errno(shmctl(shmid, cmd, NULL));
4577          break;
4578      }
4579  
4580      return ret;
4581  }
4582  
4583  #ifndef TARGET_FORCE_SHMLBA
4584  /* For most architectures, SHMLBA is the same as the page size;
4585   * some architectures have larger values, in which case they should
4586   * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4587   * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4588   * and defining its own value for SHMLBA.
4589   *
4590   * The kernel also permits SHMLBA to be set by the architecture to a
4591   * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4592   * this means that addresses are rounded to the large size if
4593   * SHM_RND is set but addresses not aligned to that size are not rejected
4594   * as long as they are at least page-aligned. Since the only architecture
4595   * which uses this is ia64 this code doesn't provide for that oddity.
4596   */
4597  static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4598  {
4599      return TARGET_PAGE_SIZE;
4600  }
4601  #endif
4602  
4603  static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4604                                   int shmid, abi_ulong shmaddr, int shmflg)
4605  {
4606      CPUState *cpu = env_cpu(cpu_env);
4607      abi_long raddr;
4608      void *host_raddr;
4609      struct shmid_ds shm_info;
4610      int i,ret;
4611      abi_ulong shmlba;
4612  
4613      /* shmat pointers are always untagged */
4614  
4615      /* find out the length of the shared memory segment */
4616      ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4617      if (is_error(ret)) {
4618          /* can't get length, bail out */
4619          return ret;
4620      }
4621  
4622      shmlba = target_shmlba(cpu_env);
4623  
4624      if (shmaddr & (shmlba - 1)) {
4625          if (shmflg & SHM_RND) {
4626              shmaddr &= ~(shmlba - 1);
4627          } else {
4628              return -TARGET_EINVAL;
4629          }
4630      }
4631      if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4632          return -TARGET_EINVAL;
4633      }
4634  
4635      mmap_lock();
4636  
4637      /*
4638       * We're mapping shared memory, so ensure we generate code for parallel
4639       * execution and flush old translations.  This will work up to the level
4640       * supported by the host -- anything that requires EXCP_ATOMIC will not
4641       * be atomic with respect to an external process.
4642       */
4643      if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4644          cpu->tcg_cflags |= CF_PARALLEL;
4645          tb_flush(cpu);
4646      }
4647  
4648      if (shmaddr)
4649          host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4650      else {
4651          abi_ulong mmap_start;
4652  
4653          /* In order to use the host shmat, we need to honor host SHMLBA.  */
4654          mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4655  
4656          if (mmap_start == -1) {
4657              errno = ENOMEM;
4658              host_raddr = (void *)-1;
4659          } else
4660              host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4661                                 shmflg | SHM_REMAP);
4662      }
4663  
4664      if (host_raddr == (void *)-1) {
4665          mmap_unlock();
4666          return get_errno((long)host_raddr);
4667      }
4668      raddr=h2g((unsigned long)host_raddr);
4669  
4670      page_set_flags(raddr, raddr + shm_info.shm_segsz,
4671                     PAGE_VALID | PAGE_RESET | PAGE_READ |
4672                     (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4673  
4674      for (i = 0; i < N_SHM_REGIONS; i++) {
4675          if (!shm_regions[i].in_use) {
4676              shm_regions[i].in_use = true;
4677              shm_regions[i].start = raddr;
4678              shm_regions[i].size = shm_info.shm_segsz;
4679              break;
4680          }
4681      }
4682  
4683      mmap_unlock();
4684      return raddr;
4685  
4686  }
4687  
4688  static inline abi_long do_shmdt(abi_ulong shmaddr)
4689  {
4690      int i;
4691      abi_long rv;
4692  
4693      /* shmdt pointers are always untagged */
4694  
4695      mmap_lock();
4696  
4697      for (i = 0; i < N_SHM_REGIONS; ++i) {
4698          if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4699              shm_regions[i].in_use = false;
4700              page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4701              break;
4702          }
4703      }
4704      rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4705  
4706      mmap_unlock();
4707  
4708      return rv;
4709  }
4710  
4711  #ifdef TARGET_NR_ipc
4712  /* ??? This only works with linear mappings.  */
4713  /* do_ipc() must return target values and target errnos. */
4714  static abi_long do_ipc(CPUArchState *cpu_env,
4715                         unsigned int call, abi_long first,
4716                         abi_long second, abi_long third,
4717                         abi_long ptr, abi_long fifth)
4718  {
4719      int version;
4720      abi_long ret = 0;
4721  
4722      version = call >> 16;
4723      call &= 0xffff;
4724  
4725      switch (call) {
4726      case IPCOP_semop:
4727          ret = do_semtimedop(first, ptr, second, 0, false);
4728          break;
4729      case IPCOP_semtimedop:
4730      /*
4731       * The s390 sys_ipc variant has only five parameters instead of six
4732       * (as for default variant) and the only difference is the handling of
4733       * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4734       * to a struct timespec where the generic variant uses fifth parameter.
4735       */
4736  #if defined(TARGET_S390X)
4737          ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4738  #else
4739          ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4740  #endif
4741          break;
4742  
4743      case IPCOP_semget:
4744          ret = get_errno(semget(first, second, third));
4745          break;
4746  
4747      case IPCOP_semctl: {
4748          /* The semun argument to semctl is passed by value, so dereference the
4749           * ptr argument. */
4750          abi_ulong atptr;
4751          get_user_ual(atptr, ptr);
4752          ret = do_semctl(first, second, third, atptr);
4753          break;
4754      }
4755  
4756      case IPCOP_msgget:
4757          ret = get_errno(msgget(first, second));
4758          break;
4759  
4760      case IPCOP_msgsnd:
4761          ret = do_msgsnd(first, ptr, second, third);
4762          break;
4763  
4764      case IPCOP_msgctl:
4765          ret = do_msgctl(first, second, ptr);
4766          break;
4767  
4768      case IPCOP_msgrcv:
4769          switch (version) {
4770          case 0:
4771              {
4772                  struct target_ipc_kludge {
4773                      abi_long msgp;
4774                      abi_long msgtyp;
4775                  } *tmp;
4776  
4777                  if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4778                      ret = -TARGET_EFAULT;
4779                      break;
4780                  }
4781  
4782                  ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4783  
4784                  unlock_user_struct(tmp, ptr, 0);
4785                  break;
4786              }
4787          default:
4788              ret = do_msgrcv(first, ptr, second, fifth, third);
4789          }
4790          break;
4791  
4792      case IPCOP_shmat:
4793          switch (version) {
4794          default:
4795          {
4796              abi_ulong raddr;
4797              raddr = do_shmat(cpu_env, first, ptr, second);
4798              if (is_error(raddr))
4799                  return get_errno(raddr);
4800              if (put_user_ual(raddr, third))
4801                  return -TARGET_EFAULT;
4802              break;
4803          }
4804          case 1:
4805              ret = -TARGET_EINVAL;
4806              break;
4807          }
4808  	break;
4809      case IPCOP_shmdt:
4810          ret = do_shmdt(ptr);
4811  	break;
4812  
4813      case IPCOP_shmget:
4814  	/* IPC_* flag values are the same on all linux platforms */
4815  	ret = get_errno(shmget(first, second, third));
4816  	break;
4817  
4818  	/* IPC_* and SHM_* command values are the same on all linux platforms */
4819      case IPCOP_shmctl:
4820          ret = do_shmctl(first, second, ptr);
4821          break;
4822      default:
4823          qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4824                        call, version);
4825  	ret = -TARGET_ENOSYS;
4826  	break;
4827      }
4828      return ret;
4829  }
4830  #endif
4831  
4832  /* kernel structure types definitions */
4833  
4834  #define STRUCT(name, ...) STRUCT_ ## name,
4835  #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4836  enum {
4837  #include "syscall_types.h"
4838  STRUCT_MAX
4839  };
4840  #undef STRUCT
4841  #undef STRUCT_SPECIAL
4842  
4843  #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4844  #define STRUCT_SPECIAL(name)
4845  #include "syscall_types.h"
4846  #undef STRUCT
4847  #undef STRUCT_SPECIAL
4848  
4849  #define MAX_STRUCT_SIZE 4096
4850  
4851  #ifdef CONFIG_FIEMAP
4852  /* So fiemap access checks don't overflow on 32 bit systems.
4853   * This is very slightly smaller than the limit imposed by
4854   * the underlying kernel.
4855   */
4856  #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4857                              / sizeof(struct fiemap_extent))
4858  
4859  static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4860                                         int fd, int cmd, abi_long arg)
4861  {
4862      /* The parameter for this ioctl is a struct fiemap followed
4863       * by an array of struct fiemap_extent whose size is set
4864       * in fiemap->fm_extent_count. The array is filled in by the
4865       * ioctl.
4866       */
4867      int target_size_in, target_size_out;
4868      struct fiemap *fm;
4869      const argtype *arg_type = ie->arg_type;
4870      const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4871      void *argptr, *p;
4872      abi_long ret;
4873      int i, extent_size = thunk_type_size(extent_arg_type, 0);
4874      uint32_t outbufsz;
4875      int free_fm = 0;
4876  
4877      assert(arg_type[0] == TYPE_PTR);
4878      assert(ie->access == IOC_RW);
4879      arg_type++;
4880      target_size_in = thunk_type_size(arg_type, 0);
4881      argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4882      if (!argptr) {
4883          return -TARGET_EFAULT;
4884      }
4885      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4886      unlock_user(argptr, arg, 0);
4887      fm = (struct fiemap *)buf_temp;
4888      if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4889          return -TARGET_EINVAL;
4890      }
4891  
4892      outbufsz = sizeof (*fm) +
4893          (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4894  
4895      if (outbufsz > MAX_STRUCT_SIZE) {
4896          /* We can't fit all the extents into the fixed size buffer.
4897           * Allocate one that is large enough and use it instead.
4898           */
4899          fm = g_try_malloc(outbufsz);
4900          if (!fm) {
4901              return -TARGET_ENOMEM;
4902          }
4903          memcpy(fm, buf_temp, sizeof(struct fiemap));
4904          free_fm = 1;
4905      }
4906      ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4907      if (!is_error(ret)) {
4908          target_size_out = target_size_in;
4909          /* An extent_count of 0 means we were only counting the extents
4910           * so there are no structs to copy
4911           */
4912          if (fm->fm_extent_count != 0) {
4913              target_size_out += fm->fm_mapped_extents * extent_size;
4914          }
4915          argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4916          if (!argptr) {
4917              ret = -TARGET_EFAULT;
4918          } else {
4919              /* Convert the struct fiemap */
4920              thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4921              if (fm->fm_extent_count != 0) {
4922                  p = argptr + target_size_in;
4923                  /* ...and then all the struct fiemap_extents */
4924                  for (i = 0; i < fm->fm_mapped_extents; i++) {
4925                      thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4926                                    THUNK_TARGET);
4927                      p += extent_size;
4928                  }
4929              }
4930              unlock_user(argptr, arg, target_size_out);
4931          }
4932      }
4933      if (free_fm) {
4934          g_free(fm);
4935      }
4936      return ret;
4937  }
4938  #endif
4939  
4940  static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4941                                  int fd, int cmd, abi_long arg)
4942  {
4943      const argtype *arg_type = ie->arg_type;
4944      int target_size;
4945      void *argptr;
4946      int ret;
4947      struct ifconf *host_ifconf;
4948      uint32_t outbufsz;
4949      const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4950      const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4951      int target_ifreq_size;
4952      int nb_ifreq;
4953      int free_buf = 0;
4954      int i;
4955      int target_ifc_len;
4956      abi_long target_ifc_buf;
4957      int host_ifc_len;
4958      char *host_ifc_buf;
4959  
4960      assert(arg_type[0] == TYPE_PTR);
4961      assert(ie->access == IOC_RW);
4962  
4963      arg_type++;
4964      target_size = thunk_type_size(arg_type, 0);
4965  
4966      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4967      if (!argptr)
4968          return -TARGET_EFAULT;
4969      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4970      unlock_user(argptr, arg, 0);
4971  
4972      host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4973      target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4974      target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4975  
4976      if (target_ifc_buf != 0) {
4977          target_ifc_len = host_ifconf->ifc_len;
4978          nb_ifreq = target_ifc_len / target_ifreq_size;
4979          host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4980  
4981          outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4982          if (outbufsz > MAX_STRUCT_SIZE) {
4983              /*
4984               * We can't fit all the extents into the fixed size buffer.
4985               * Allocate one that is large enough and use it instead.
4986               */
4987              host_ifconf = malloc(outbufsz);
4988              if (!host_ifconf) {
4989                  return -TARGET_ENOMEM;
4990              }
4991              memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4992              free_buf = 1;
4993          }
4994          host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4995  
4996          host_ifconf->ifc_len = host_ifc_len;
4997      } else {
4998        host_ifc_buf = NULL;
4999      }
5000      host_ifconf->ifc_buf = host_ifc_buf;
5001  
5002      ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5003      if (!is_error(ret)) {
5004  	/* convert host ifc_len to target ifc_len */
5005  
5006          nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5007          target_ifc_len = nb_ifreq * target_ifreq_size;
5008          host_ifconf->ifc_len = target_ifc_len;
5009  
5010  	/* restore target ifc_buf */
5011  
5012          host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5013  
5014  	/* copy struct ifconf to target user */
5015  
5016          argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5017          if (!argptr)
5018              return -TARGET_EFAULT;
5019          thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5020          unlock_user(argptr, arg, target_size);
5021  
5022          if (target_ifc_buf != 0) {
5023              /* copy ifreq[] to target user */
5024              argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5025              for (i = 0; i < nb_ifreq ; i++) {
5026                  thunk_convert(argptr + i * target_ifreq_size,
5027                                host_ifc_buf + i * sizeof(struct ifreq),
5028                                ifreq_arg_type, THUNK_TARGET);
5029              }
5030              unlock_user(argptr, target_ifc_buf, target_ifc_len);
5031          }
5032      }
5033  
5034      if (free_buf) {
5035          free(host_ifconf);
5036      }
5037  
5038      return ret;
5039  }
5040  
5041  #if defined(CONFIG_USBFS)
5042  #if HOST_LONG_BITS > 64
5043  #error USBDEVFS thunks do not support >64 bit hosts yet.
5044  #endif
5045  struct live_urb {
5046      uint64_t target_urb_adr;
5047      uint64_t target_buf_adr;
5048      char *target_buf_ptr;
5049      struct usbdevfs_urb host_urb;
5050  };
5051  
5052  static GHashTable *usbdevfs_urb_hashtable(void)
5053  {
5054      static GHashTable *urb_hashtable;
5055  
5056      if (!urb_hashtable) {
5057          urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5058      }
5059      return urb_hashtable;
5060  }
5061  
5062  static void urb_hashtable_insert(struct live_urb *urb)
5063  {
5064      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5065      g_hash_table_insert(urb_hashtable, urb, urb);
5066  }
5067  
5068  static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5069  {
5070      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5071      return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5072  }
5073  
5074  static void urb_hashtable_remove(struct live_urb *urb)
5075  {
5076      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5077      g_hash_table_remove(urb_hashtable, urb);
5078  }
5079  
5080  static abi_long
5081  do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5082                            int fd, int cmd, abi_long arg)
5083  {
5084      const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5085      const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5086      struct live_urb *lurb;
5087      void *argptr;
5088      uint64_t hurb;
5089      int target_size;
5090      uintptr_t target_urb_adr;
5091      abi_long ret;
5092  
5093      target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5094  
5095      memset(buf_temp, 0, sizeof(uint64_t));
5096      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5097      if (is_error(ret)) {
5098          return ret;
5099      }
5100  
5101      memcpy(&hurb, buf_temp, sizeof(uint64_t));
5102      lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5103      if (!lurb->target_urb_adr) {
5104          return -TARGET_EFAULT;
5105      }
5106      urb_hashtable_remove(lurb);
5107      unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5108          lurb->host_urb.buffer_length);
5109      lurb->target_buf_ptr = NULL;
5110  
5111      /* restore the guest buffer pointer */
5112      lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5113  
5114      /* update the guest urb struct */
5115      argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5116      if (!argptr) {
5117          g_free(lurb);
5118          return -TARGET_EFAULT;
5119      }
5120      thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5121      unlock_user(argptr, lurb->target_urb_adr, target_size);
5122  
5123      target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5124      /* write back the urb handle */
5125      argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5126      if (!argptr) {
5127          g_free(lurb);
5128          return -TARGET_EFAULT;
5129      }
5130  
5131      /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5132      target_urb_adr = lurb->target_urb_adr;
5133      thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5134      unlock_user(argptr, arg, target_size);
5135  
5136      g_free(lurb);
5137      return ret;
5138  }
5139  
5140  static abi_long
5141  do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5142                               uint8_t *buf_temp __attribute__((unused)),
5143                               int fd, int cmd, abi_long arg)
5144  {
5145      struct live_urb *lurb;
5146  
5147      /* map target address back to host URB with metadata. */
5148      lurb = urb_hashtable_lookup(arg);
5149      if (!lurb) {
5150          return -TARGET_EFAULT;
5151      }
5152      return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5153  }
5154  
5155  static abi_long
5156  do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5157                              int fd, int cmd, abi_long arg)
5158  {
5159      const argtype *arg_type = ie->arg_type;
5160      int target_size;
5161      abi_long ret;
5162      void *argptr;
5163      int rw_dir;
5164      struct live_urb *lurb;
5165  
5166      /*
5167       * each submitted URB needs to map to a unique ID for the
5168       * kernel, and that unique ID needs to be a pointer to
5169       * host memory.  hence, we need to malloc for each URB.
5170       * isochronous transfers have a variable length struct.
5171       */
5172      arg_type++;
5173      target_size = thunk_type_size(arg_type, THUNK_TARGET);
5174  
5175      /* construct host copy of urb and metadata */
5176      lurb = g_try_malloc0(sizeof(struct live_urb));
5177      if (!lurb) {
5178          return -TARGET_ENOMEM;
5179      }
5180  
5181      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5182      if (!argptr) {
5183          g_free(lurb);
5184          return -TARGET_EFAULT;
5185      }
5186      thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5187      unlock_user(argptr, arg, 0);
5188  
5189      lurb->target_urb_adr = arg;
5190      lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5191  
5192      /* buffer space used depends on endpoint type so lock the entire buffer */
5193      /* control type urbs should check the buffer contents for true direction */
5194      rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5195      lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5196          lurb->host_urb.buffer_length, 1);
5197      if (lurb->target_buf_ptr == NULL) {
5198          g_free(lurb);
5199          return -TARGET_EFAULT;
5200      }
5201  
5202      /* update buffer pointer in host copy */
5203      lurb->host_urb.buffer = lurb->target_buf_ptr;
5204  
5205      ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5206      if (is_error(ret)) {
5207          unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5208          g_free(lurb);
5209      } else {
5210          urb_hashtable_insert(lurb);
5211      }
5212  
5213      return ret;
5214  }
5215  #endif /* CONFIG_USBFS */
5216  
5217  static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5218                              int cmd, abi_long arg)
5219  {
5220      void *argptr;
5221      struct dm_ioctl *host_dm;
5222      abi_long guest_data;
5223      uint32_t guest_data_size;
5224      int target_size;
5225      const argtype *arg_type = ie->arg_type;
5226      abi_long ret;
5227      void *big_buf = NULL;
5228      char *host_data;
5229  
5230      arg_type++;
5231      target_size = thunk_type_size(arg_type, 0);
5232      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5233      if (!argptr) {
5234          ret = -TARGET_EFAULT;
5235          goto out;
5236      }
5237      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5238      unlock_user(argptr, arg, 0);
5239  
5240      /* buf_temp is too small, so fetch things into a bigger buffer */
5241      big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5242      memcpy(big_buf, buf_temp, target_size);
5243      buf_temp = big_buf;
5244      host_dm = big_buf;
5245  
5246      guest_data = arg + host_dm->data_start;
5247      if ((guest_data - arg) < 0) {
5248          ret = -TARGET_EINVAL;
5249          goto out;
5250      }
5251      guest_data_size = host_dm->data_size - host_dm->data_start;
5252      host_data = (char*)host_dm + host_dm->data_start;
5253  
5254      argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5255      if (!argptr) {
5256          ret = -TARGET_EFAULT;
5257          goto out;
5258      }
5259  
5260      switch (ie->host_cmd) {
5261      case DM_REMOVE_ALL:
5262      case DM_LIST_DEVICES:
5263      case DM_DEV_CREATE:
5264      case DM_DEV_REMOVE:
5265      case DM_DEV_SUSPEND:
5266      case DM_DEV_STATUS:
5267      case DM_DEV_WAIT:
5268      case DM_TABLE_STATUS:
5269      case DM_TABLE_CLEAR:
5270      case DM_TABLE_DEPS:
5271      case DM_LIST_VERSIONS:
5272          /* no input data */
5273          break;
5274      case DM_DEV_RENAME:
5275      case DM_DEV_SET_GEOMETRY:
5276          /* data contains only strings */
5277          memcpy(host_data, argptr, guest_data_size);
5278          break;
5279      case DM_TARGET_MSG:
5280          memcpy(host_data, argptr, guest_data_size);
5281          *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5282          break;
5283      case DM_TABLE_LOAD:
5284      {
5285          void *gspec = argptr;
5286          void *cur_data = host_data;
5287          const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5288          int spec_size = thunk_type_size(arg_type, 0);
5289          int i;
5290  
5291          for (i = 0; i < host_dm->target_count; i++) {
5292              struct dm_target_spec *spec = cur_data;
5293              uint32_t next;
5294              int slen;
5295  
5296              thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5297              slen = strlen((char*)gspec + spec_size) + 1;
5298              next = spec->next;
5299              spec->next = sizeof(*spec) + slen;
5300              strcpy((char*)&spec[1], gspec + spec_size);
5301              gspec += next;
5302              cur_data += spec->next;
5303          }
5304          break;
5305      }
5306      default:
5307          ret = -TARGET_EINVAL;
5308          unlock_user(argptr, guest_data, 0);
5309          goto out;
5310      }
5311      unlock_user(argptr, guest_data, 0);
5312  
5313      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5314      if (!is_error(ret)) {
5315          guest_data = arg + host_dm->data_start;
5316          guest_data_size = host_dm->data_size - host_dm->data_start;
5317          argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5318          switch (ie->host_cmd) {
5319          case DM_REMOVE_ALL:
5320          case DM_DEV_CREATE:
5321          case DM_DEV_REMOVE:
5322          case DM_DEV_RENAME:
5323          case DM_DEV_SUSPEND:
5324          case DM_DEV_STATUS:
5325          case DM_TABLE_LOAD:
5326          case DM_TABLE_CLEAR:
5327          case DM_TARGET_MSG:
5328          case DM_DEV_SET_GEOMETRY:
5329              /* no return data */
5330              break;
5331          case DM_LIST_DEVICES:
5332          {
5333              struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5334              uint32_t remaining_data = guest_data_size;
5335              void *cur_data = argptr;
5336              const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5337              int nl_size = 12; /* can't use thunk_size due to alignment */
5338  
5339              while (1) {
5340                  uint32_t next = nl->next;
5341                  if (next) {
5342                      nl->next = nl_size + (strlen(nl->name) + 1);
5343                  }
5344                  if (remaining_data < nl->next) {
5345                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5346                      break;
5347                  }
5348                  thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5349                  strcpy(cur_data + nl_size, nl->name);
5350                  cur_data += nl->next;
5351                  remaining_data -= nl->next;
5352                  if (!next) {
5353                      break;
5354                  }
5355                  nl = (void*)nl + next;
5356              }
5357              break;
5358          }
5359          case DM_DEV_WAIT:
5360          case DM_TABLE_STATUS:
5361          {
5362              struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5363              void *cur_data = argptr;
5364              const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5365              int spec_size = thunk_type_size(arg_type, 0);
5366              int i;
5367  
5368              for (i = 0; i < host_dm->target_count; i++) {
5369                  uint32_t next = spec->next;
5370                  int slen = strlen((char*)&spec[1]) + 1;
5371                  spec->next = (cur_data - argptr) + spec_size + slen;
5372                  if (guest_data_size < spec->next) {
5373                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5374                      break;
5375                  }
5376                  thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5377                  strcpy(cur_data + spec_size, (char*)&spec[1]);
5378                  cur_data = argptr + spec->next;
5379                  spec = (void*)host_dm + host_dm->data_start + next;
5380              }
5381              break;
5382          }
5383          case DM_TABLE_DEPS:
5384          {
5385              void *hdata = (void*)host_dm + host_dm->data_start;
5386              int count = *(uint32_t*)hdata;
5387              uint64_t *hdev = hdata + 8;
5388              uint64_t *gdev = argptr + 8;
5389              int i;
5390  
5391              *(uint32_t*)argptr = tswap32(count);
5392              for (i = 0; i < count; i++) {
5393                  *gdev = tswap64(*hdev);
5394                  gdev++;
5395                  hdev++;
5396              }
5397              break;
5398          }
5399          case DM_LIST_VERSIONS:
5400          {
5401              struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5402              uint32_t remaining_data = guest_data_size;
5403              void *cur_data = argptr;
5404              const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5405              int vers_size = thunk_type_size(arg_type, 0);
5406  
5407              while (1) {
5408                  uint32_t next = vers->next;
5409                  if (next) {
5410                      vers->next = vers_size + (strlen(vers->name) + 1);
5411                  }
5412                  if (remaining_data < vers->next) {
5413                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5414                      break;
5415                  }
5416                  thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5417                  strcpy(cur_data + vers_size, vers->name);
5418                  cur_data += vers->next;
5419                  remaining_data -= vers->next;
5420                  if (!next) {
5421                      break;
5422                  }
5423                  vers = (void*)vers + next;
5424              }
5425              break;
5426          }
5427          default:
5428              unlock_user(argptr, guest_data, 0);
5429              ret = -TARGET_EINVAL;
5430              goto out;
5431          }
5432          unlock_user(argptr, guest_data, guest_data_size);
5433  
5434          argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5435          if (!argptr) {
5436              ret = -TARGET_EFAULT;
5437              goto out;
5438          }
5439          thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5440          unlock_user(argptr, arg, target_size);
5441      }
5442  out:
5443      g_free(big_buf);
5444      return ret;
5445  }
5446  
5447  static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5448                                 int cmd, abi_long arg)
5449  {
5450      void *argptr;
5451      int target_size;
5452      const argtype *arg_type = ie->arg_type;
5453      const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5454      abi_long ret;
5455  
5456      struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5457      struct blkpg_partition host_part;
5458  
5459      /* Read and convert blkpg */
5460      arg_type++;
5461      target_size = thunk_type_size(arg_type, 0);
5462      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5463      if (!argptr) {
5464          ret = -TARGET_EFAULT;
5465          goto out;
5466      }
5467      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5468      unlock_user(argptr, arg, 0);
5469  
5470      switch (host_blkpg->op) {
5471      case BLKPG_ADD_PARTITION:
5472      case BLKPG_DEL_PARTITION:
5473          /* payload is struct blkpg_partition */
5474          break;
5475      default:
5476          /* Unknown opcode */
5477          ret = -TARGET_EINVAL;
5478          goto out;
5479      }
5480  
5481      /* Read and convert blkpg->data */
5482      arg = (abi_long)(uintptr_t)host_blkpg->data;
5483      target_size = thunk_type_size(part_arg_type, 0);
5484      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5485      if (!argptr) {
5486          ret = -TARGET_EFAULT;
5487          goto out;
5488      }
5489      thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5490      unlock_user(argptr, arg, 0);
5491  
5492      /* Swizzle the data pointer to our local copy and call! */
5493      host_blkpg->data = &host_part;
5494      ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5495  
5496  out:
5497      return ret;
5498  }
5499  
5500  static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5501                                  int fd, int cmd, abi_long arg)
5502  {
5503      const argtype *arg_type = ie->arg_type;
5504      const StructEntry *se;
5505      const argtype *field_types;
5506      const int *dst_offsets, *src_offsets;
5507      int target_size;
5508      void *argptr;
5509      abi_ulong *target_rt_dev_ptr = NULL;
5510      unsigned long *host_rt_dev_ptr = NULL;
5511      abi_long ret;
5512      int i;
5513  
5514      assert(ie->access == IOC_W);
5515      assert(*arg_type == TYPE_PTR);
5516      arg_type++;
5517      assert(*arg_type == TYPE_STRUCT);
5518      target_size = thunk_type_size(arg_type, 0);
5519      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5520      if (!argptr) {
5521          return -TARGET_EFAULT;
5522      }
5523      arg_type++;
5524      assert(*arg_type == (int)STRUCT_rtentry);
5525      se = struct_entries + *arg_type++;
5526      assert(se->convert[0] == NULL);
5527      /* convert struct here to be able to catch rt_dev string */
5528      field_types = se->field_types;
5529      dst_offsets = se->field_offsets[THUNK_HOST];
5530      src_offsets = se->field_offsets[THUNK_TARGET];
5531      for (i = 0; i < se->nb_fields; i++) {
5532          if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5533              assert(*field_types == TYPE_PTRVOID);
5534              target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5535              host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5536              if (*target_rt_dev_ptr != 0) {
5537                  *host_rt_dev_ptr = (unsigned long)lock_user_string(
5538                                                    tswapal(*target_rt_dev_ptr));
5539                  if (!*host_rt_dev_ptr) {
5540                      unlock_user(argptr, arg, 0);
5541                      return -TARGET_EFAULT;
5542                  }
5543              } else {
5544                  *host_rt_dev_ptr = 0;
5545              }
5546              field_types++;
5547              continue;
5548          }
5549          field_types = thunk_convert(buf_temp + dst_offsets[i],
5550                                      argptr + src_offsets[i],
5551                                      field_types, THUNK_HOST);
5552      }
5553      unlock_user(argptr, arg, 0);
5554  
5555      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5556  
5557      assert(host_rt_dev_ptr != NULL);
5558      assert(target_rt_dev_ptr != NULL);
5559      if (*host_rt_dev_ptr != 0) {
5560          unlock_user((void *)*host_rt_dev_ptr,
5561                      *target_rt_dev_ptr, 0);
5562      }
5563      return ret;
5564  }
5565  
5566  static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5567                                       int fd, int cmd, abi_long arg)
5568  {
5569      int sig = target_to_host_signal(arg);
5570      return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5571  }
5572  
5573  static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5574                                      int fd, int cmd, abi_long arg)
5575  {
5576      struct timeval tv;
5577      abi_long ret;
5578  
5579      ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5580      if (is_error(ret)) {
5581          return ret;
5582      }
5583  
5584      if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5585          if (copy_to_user_timeval(arg, &tv)) {
5586              return -TARGET_EFAULT;
5587          }
5588      } else {
5589          if (copy_to_user_timeval64(arg, &tv)) {
5590              return -TARGET_EFAULT;
5591          }
5592      }
5593  
5594      return ret;
5595  }
5596  
5597  static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5598                                        int fd, int cmd, abi_long arg)
5599  {
5600      struct timespec ts;
5601      abi_long ret;
5602  
5603      ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5604      if (is_error(ret)) {
5605          return ret;
5606      }
5607  
5608      if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5609          if (host_to_target_timespec(arg, &ts)) {
5610              return -TARGET_EFAULT;
5611          }
5612      } else{
5613          if (host_to_target_timespec64(arg, &ts)) {
5614              return -TARGET_EFAULT;
5615          }
5616      }
5617  
5618      return ret;
5619  }
5620  
5621  #ifdef TIOCGPTPEER
5622  static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5623                                       int fd, int cmd, abi_long arg)
5624  {
5625      int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5626      return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5627  }
5628  #endif
5629  
5630  #ifdef HAVE_DRM_H
5631  
5632  static void unlock_drm_version(struct drm_version *host_ver,
5633                                 struct target_drm_version *target_ver,
5634                                 bool copy)
5635  {
5636      unlock_user(host_ver->name, target_ver->name,
5637                                  copy ? host_ver->name_len : 0);
5638      unlock_user(host_ver->date, target_ver->date,
5639                                  copy ? host_ver->date_len : 0);
5640      unlock_user(host_ver->desc, target_ver->desc,
5641                                  copy ? host_ver->desc_len : 0);
5642  }
5643  
5644  static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5645                                            struct target_drm_version *target_ver)
5646  {
5647      memset(host_ver, 0, sizeof(*host_ver));
5648  
5649      __get_user(host_ver->name_len, &target_ver->name_len);
5650      if (host_ver->name_len) {
5651          host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5652                                     target_ver->name_len, 0);
5653          if (!host_ver->name) {
5654              return -EFAULT;
5655          }
5656      }
5657  
5658      __get_user(host_ver->date_len, &target_ver->date_len);
5659      if (host_ver->date_len) {
5660          host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5661                                     target_ver->date_len, 0);
5662          if (!host_ver->date) {
5663              goto err;
5664          }
5665      }
5666  
5667      __get_user(host_ver->desc_len, &target_ver->desc_len);
5668      if (host_ver->desc_len) {
5669          host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5670                                     target_ver->desc_len, 0);
5671          if (!host_ver->desc) {
5672              goto err;
5673          }
5674      }
5675  
5676      return 0;
5677  err:
5678      unlock_drm_version(host_ver, target_ver, false);
5679      return -EFAULT;
5680  }
5681  
5682  static inline void host_to_target_drmversion(
5683                                            struct target_drm_version *target_ver,
5684                                            struct drm_version *host_ver)
5685  {
5686      __put_user(host_ver->version_major, &target_ver->version_major);
5687      __put_user(host_ver->version_minor, &target_ver->version_minor);
5688      __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5689      __put_user(host_ver->name_len, &target_ver->name_len);
5690      __put_user(host_ver->date_len, &target_ver->date_len);
5691      __put_user(host_ver->desc_len, &target_ver->desc_len);
5692      unlock_drm_version(host_ver, target_ver, true);
5693  }
5694  
5695  static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5696                               int fd, int cmd, abi_long arg)
5697  {
5698      struct drm_version *ver;
5699      struct target_drm_version *target_ver;
5700      abi_long ret;
5701  
5702      switch (ie->host_cmd) {
5703      case DRM_IOCTL_VERSION:
5704          if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5705              return -TARGET_EFAULT;
5706          }
5707          ver = (struct drm_version *)buf_temp;
5708          ret = target_to_host_drmversion(ver, target_ver);
5709          if (!is_error(ret)) {
5710              ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5711              if (is_error(ret)) {
5712                  unlock_drm_version(ver, target_ver, false);
5713              } else {
5714                  host_to_target_drmversion(target_ver, ver);
5715              }
5716          }
5717          unlock_user_struct(target_ver, arg, 0);
5718          return ret;
5719      }
5720      return -TARGET_ENOSYS;
5721  }
5722  
5723  static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5724                                             struct drm_i915_getparam *gparam,
5725                                             int fd, abi_long arg)
5726  {
5727      abi_long ret;
5728      int value;
5729      struct target_drm_i915_getparam *target_gparam;
5730  
5731      if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5732          return -TARGET_EFAULT;
5733      }
5734  
5735      __get_user(gparam->param, &target_gparam->param);
5736      gparam->value = &value;
5737      ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5738      put_user_s32(value, target_gparam->value);
5739  
5740      unlock_user_struct(target_gparam, arg, 0);
5741      return ret;
5742  }
5743  
5744  static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5745                                    int fd, int cmd, abi_long arg)
5746  {
5747      switch (ie->host_cmd) {
5748      case DRM_IOCTL_I915_GETPARAM:
5749          return do_ioctl_drm_i915_getparam(ie,
5750                                            (struct drm_i915_getparam *)buf_temp,
5751                                            fd, arg);
5752      default:
5753          return -TARGET_ENOSYS;
5754      }
5755  }
5756  
5757  #endif
5758  
5759  static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5760                                          int fd, int cmd, abi_long arg)
5761  {
5762      struct tun_filter *filter = (struct tun_filter *)buf_temp;
5763      struct tun_filter *target_filter;
5764      char *target_addr;
5765  
5766      assert(ie->access == IOC_W);
5767  
5768      target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5769      if (!target_filter) {
5770          return -TARGET_EFAULT;
5771      }
5772      filter->flags = tswap16(target_filter->flags);
5773      filter->count = tswap16(target_filter->count);
5774      unlock_user(target_filter, arg, 0);
5775  
5776      if (filter->count) {
5777          if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5778              MAX_STRUCT_SIZE) {
5779              return -TARGET_EFAULT;
5780          }
5781  
5782          target_addr = lock_user(VERIFY_READ,
5783                                  arg + offsetof(struct tun_filter, addr),
5784                                  filter->count * ETH_ALEN, 1);
5785          if (!target_addr) {
5786              return -TARGET_EFAULT;
5787          }
5788          memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5789          unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5790      }
5791  
5792      return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5793  }
5794  
5795  IOCTLEntry ioctl_entries[] = {
5796  #define IOCTL(cmd, access, ...) \
5797      { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5798  #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5799      { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5800  #define IOCTL_IGNORE(cmd) \
5801      { TARGET_ ## cmd, 0, #cmd },
5802  #include "ioctls.h"
5803      { 0, 0, },
5804  };
5805  
5806  /* ??? Implement proper locking for ioctls.  */
5807  /* do_ioctl() Must return target values and target errnos. */
5808  static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5809  {
5810      const IOCTLEntry *ie;
5811      const argtype *arg_type;
5812      abi_long ret;
5813      uint8_t buf_temp[MAX_STRUCT_SIZE];
5814      int target_size;
5815      void *argptr;
5816  
5817      ie = ioctl_entries;
5818      for(;;) {
5819          if (ie->target_cmd == 0) {
5820              qemu_log_mask(
5821                  LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5822              return -TARGET_ENOSYS;
5823          }
5824          if (ie->target_cmd == cmd)
5825              break;
5826          ie++;
5827      }
5828      arg_type = ie->arg_type;
5829      if (ie->do_ioctl) {
5830          return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5831      } else if (!ie->host_cmd) {
5832          /* Some architectures define BSD ioctls in their headers
5833             that are not implemented in Linux.  */
5834          return -TARGET_ENOSYS;
5835      }
5836  
5837      switch(arg_type[0]) {
5838      case TYPE_NULL:
5839          /* no argument */
5840          ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5841          break;
5842      case TYPE_PTRVOID:
5843      case TYPE_INT:
5844      case TYPE_LONG:
5845      case TYPE_ULONG:
5846          ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5847          break;
5848      case TYPE_PTR:
5849          arg_type++;
5850          target_size = thunk_type_size(arg_type, 0);
5851          switch(ie->access) {
5852          case IOC_R:
5853              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5854              if (!is_error(ret)) {
5855                  argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5856                  if (!argptr)
5857                      return -TARGET_EFAULT;
5858                  thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5859                  unlock_user(argptr, arg, target_size);
5860              }
5861              break;
5862          case IOC_W:
5863              argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5864              if (!argptr)
5865                  return -TARGET_EFAULT;
5866              thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5867              unlock_user(argptr, arg, 0);
5868              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5869              break;
5870          default:
5871          case IOC_RW:
5872              argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5873              if (!argptr)
5874                  return -TARGET_EFAULT;
5875              thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5876              unlock_user(argptr, arg, 0);
5877              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5878              if (!is_error(ret)) {
5879                  argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5880                  if (!argptr)
5881                      return -TARGET_EFAULT;
5882                  thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5883                  unlock_user(argptr, arg, target_size);
5884              }
5885              break;
5886          }
5887          break;
5888      default:
5889          qemu_log_mask(LOG_UNIMP,
5890                        "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5891                        (long)cmd, arg_type[0]);
5892          ret = -TARGET_ENOSYS;
5893          break;
5894      }
5895      return ret;
5896  }
5897  
5898  static const bitmask_transtbl iflag_tbl[] = {
5899          { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5900          { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5901          { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5902          { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5903          { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5904          { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5905          { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5906          { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5907          { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5908          { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5909          { TARGET_IXON, TARGET_IXON, IXON, IXON },
5910          { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5911          { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5912          { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5913          { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5914          { 0, 0, 0, 0 }
5915  };
5916  
5917  static const bitmask_transtbl oflag_tbl[] = {
5918  	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5919  	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5920  	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5921  	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5922  	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5923  	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5924  	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5925  	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5926  	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5927  	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5928  	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5929  	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5930  	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5931  	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5932  	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5933  	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5934  	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5935  	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5936  	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5937  	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5938  	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5939  	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5940  	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5941  	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5942  	{ 0, 0, 0, 0 }
5943  };
5944  
5945  static const bitmask_transtbl cflag_tbl[] = {
5946  	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5947  	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5948  	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5949  	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5950  	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5951  	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5952  	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5953  	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5954  	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5955  	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5956  	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5957  	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5958  	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5959  	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5960  	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5961  	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5962  	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5963  	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5964  	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5965  	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5966  	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5967  	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5968  	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5969  	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5970  	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5971  	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5972  	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5973  	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5974  	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5975  	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5976  	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5977  	{ 0, 0, 0, 0 }
5978  };
5979  
5980  static const bitmask_transtbl lflag_tbl[] = {
5981    { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5982    { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5983    { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5984    { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5985    { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5986    { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5987    { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5988    { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5989    { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5990    { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5991    { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5992    { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5993    { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5994    { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5995    { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5996    { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5997    { 0, 0, 0, 0 }
5998  };
5999  
6000  static void target_to_host_termios (void *dst, const void *src)
6001  {
6002      struct host_termios *host = dst;
6003      const struct target_termios *target = src;
6004  
6005      host->c_iflag =
6006          target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
6007      host->c_oflag =
6008          target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
6009      host->c_cflag =
6010          target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
6011      host->c_lflag =
6012          target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
6013      host->c_line = target->c_line;
6014  
6015      memset(host->c_cc, 0, sizeof(host->c_cc));
6016      host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
6017      host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
6018      host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
6019      host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6020      host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6021      host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6022      host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6023      host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6024      host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6025      host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6026      host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6027      host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6028      host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6029      host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6030      host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6031      host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6032      host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6033  }
6034  
6035  static void host_to_target_termios (void *dst, const void *src)
6036  {
6037      struct target_termios *target = dst;
6038      const struct host_termios *host = src;
6039  
6040      target->c_iflag =
6041          tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6042      target->c_oflag =
6043          tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6044      target->c_cflag =
6045          tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6046      target->c_lflag =
6047          tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6048      target->c_line = host->c_line;
6049  
6050      memset(target->c_cc, 0, sizeof(target->c_cc));
6051      target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6052      target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6053      target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6054      target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6055      target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6056      target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6057      target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6058      target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6059      target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6060      target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6061      target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6062      target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6063      target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6064      target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6065      target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6066      target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6067      target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6068  }
6069  
6070  static const StructEntry struct_termios_def = {
6071      .convert = { host_to_target_termios, target_to_host_termios },
6072      .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6073      .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6074      .print = print_termios,
6075  };
6076  
6077  static const bitmask_transtbl mmap_flags_tbl[] = {
6078      { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6079      { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6080      { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6081      { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6082        MAP_ANONYMOUS, MAP_ANONYMOUS },
6083      { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6084        MAP_GROWSDOWN, MAP_GROWSDOWN },
6085      { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6086        MAP_DENYWRITE, MAP_DENYWRITE },
6087      { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6088        MAP_EXECUTABLE, MAP_EXECUTABLE },
6089      { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6090      { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6091        MAP_NORESERVE, MAP_NORESERVE },
6092      { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6093      /* MAP_STACK had been ignored by the kernel for quite some time.
6094         Recognize it for the target insofar as we do not want to pass
6095         it through to the host.  */
6096      { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6097      { 0, 0, 0, 0 }
6098  };
6099  
6100  /*
6101   * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6102   *       TARGET_I386 is defined if TARGET_X86_64 is defined
6103   */
6104  #if defined(TARGET_I386)
6105  
6106  /* NOTE: there is really one LDT for all the threads */
6107  static uint8_t *ldt_table;
6108  
6109  static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6110  {
6111      int size;
6112      void *p;
6113  
6114      if (!ldt_table)
6115          return 0;
6116      size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6117      if (size > bytecount)
6118          size = bytecount;
6119      p = lock_user(VERIFY_WRITE, ptr, size, 0);
6120      if (!p)
6121          return -TARGET_EFAULT;
6122      /* ??? Should this by byteswapped?  */
6123      memcpy(p, ldt_table, size);
6124      unlock_user(p, ptr, size);
6125      return size;
6126  }
6127  
6128  /* XXX: add locking support */
6129  static abi_long write_ldt(CPUX86State *env,
6130                            abi_ulong ptr, unsigned long bytecount, int oldmode)
6131  {
6132      struct target_modify_ldt_ldt_s ldt_info;
6133      struct target_modify_ldt_ldt_s *target_ldt_info;
6134      int seg_32bit, contents, read_exec_only, limit_in_pages;
6135      int seg_not_present, useable, lm;
6136      uint32_t *lp, entry_1, entry_2;
6137  
6138      if (bytecount != sizeof(ldt_info))
6139          return -TARGET_EINVAL;
6140      if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6141          return -TARGET_EFAULT;
6142      ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6143      ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6144      ldt_info.limit = tswap32(target_ldt_info->limit);
6145      ldt_info.flags = tswap32(target_ldt_info->flags);
6146      unlock_user_struct(target_ldt_info, ptr, 0);
6147  
6148      if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6149          return -TARGET_EINVAL;
6150      seg_32bit = ldt_info.flags & 1;
6151      contents = (ldt_info.flags >> 1) & 3;
6152      read_exec_only = (ldt_info.flags >> 3) & 1;
6153      limit_in_pages = (ldt_info.flags >> 4) & 1;
6154      seg_not_present = (ldt_info.flags >> 5) & 1;
6155      useable = (ldt_info.flags >> 6) & 1;
6156  #ifdef TARGET_ABI32
6157      lm = 0;
6158  #else
6159      lm = (ldt_info.flags >> 7) & 1;
6160  #endif
6161      if (contents == 3) {
6162          if (oldmode)
6163              return -TARGET_EINVAL;
6164          if (seg_not_present == 0)
6165              return -TARGET_EINVAL;
6166      }
6167      /* allocate the LDT */
6168      if (!ldt_table) {
6169          env->ldt.base = target_mmap(0,
6170                                      TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6171                                      PROT_READ|PROT_WRITE,
6172                                      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6173          if (env->ldt.base == -1)
6174              return -TARGET_ENOMEM;
6175          memset(g2h_untagged(env->ldt.base), 0,
6176                 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6177          env->ldt.limit = 0xffff;
6178          ldt_table = g2h_untagged(env->ldt.base);
6179      }
6180  
6181      /* NOTE: same code as Linux kernel */
6182      /* Allow LDTs to be cleared by the user. */
6183      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6184          if (oldmode ||
6185              (contents == 0		&&
6186               read_exec_only == 1	&&
6187               seg_32bit == 0		&&
6188               limit_in_pages == 0	&&
6189               seg_not_present == 1	&&
6190               useable == 0 )) {
6191              entry_1 = 0;
6192              entry_2 = 0;
6193              goto install;
6194          }
6195      }
6196  
6197      entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6198          (ldt_info.limit & 0x0ffff);
6199      entry_2 = (ldt_info.base_addr & 0xff000000) |
6200          ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6201          (ldt_info.limit & 0xf0000) |
6202          ((read_exec_only ^ 1) << 9) |
6203          (contents << 10) |
6204          ((seg_not_present ^ 1) << 15) |
6205          (seg_32bit << 22) |
6206          (limit_in_pages << 23) |
6207          (lm << 21) |
6208          0x7000;
6209      if (!oldmode)
6210          entry_2 |= (useable << 20);
6211  
6212      /* Install the new entry ...  */
6213  install:
6214      lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6215      lp[0] = tswap32(entry_1);
6216      lp[1] = tswap32(entry_2);
6217      return 0;
6218  }
6219  
6220  /* specific and weird i386 syscalls */
6221  static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6222                                unsigned long bytecount)
6223  {
6224      abi_long ret;
6225  
6226      switch (func) {
6227      case 0:
6228          ret = read_ldt(ptr, bytecount);
6229          break;
6230      case 1:
6231          ret = write_ldt(env, ptr, bytecount, 1);
6232          break;
6233      case 0x11:
6234          ret = write_ldt(env, ptr, bytecount, 0);
6235          break;
6236      default:
6237          ret = -TARGET_ENOSYS;
6238          break;
6239      }
6240      return ret;
6241  }
6242  
6243  #if defined(TARGET_ABI32)
6244  abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6245  {
6246      uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6247      struct target_modify_ldt_ldt_s ldt_info;
6248      struct target_modify_ldt_ldt_s *target_ldt_info;
6249      int seg_32bit, contents, read_exec_only, limit_in_pages;
6250      int seg_not_present, useable, lm;
6251      uint32_t *lp, entry_1, entry_2;
6252      int i;
6253  
6254      lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6255      if (!target_ldt_info)
6256          return -TARGET_EFAULT;
6257      ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6258      ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6259      ldt_info.limit = tswap32(target_ldt_info->limit);
6260      ldt_info.flags = tswap32(target_ldt_info->flags);
6261      if (ldt_info.entry_number == -1) {
6262          for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6263              if (gdt_table[i] == 0) {
6264                  ldt_info.entry_number = i;
6265                  target_ldt_info->entry_number = tswap32(i);
6266                  break;
6267              }
6268          }
6269      }
6270      unlock_user_struct(target_ldt_info, ptr, 1);
6271  
6272      if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6273          ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6274             return -TARGET_EINVAL;
6275      seg_32bit = ldt_info.flags & 1;
6276      contents = (ldt_info.flags >> 1) & 3;
6277      read_exec_only = (ldt_info.flags >> 3) & 1;
6278      limit_in_pages = (ldt_info.flags >> 4) & 1;
6279      seg_not_present = (ldt_info.flags >> 5) & 1;
6280      useable = (ldt_info.flags >> 6) & 1;
6281  #ifdef TARGET_ABI32
6282      lm = 0;
6283  #else
6284      lm = (ldt_info.flags >> 7) & 1;
6285  #endif
6286  
6287      if (contents == 3) {
6288          if (seg_not_present == 0)
6289              return -TARGET_EINVAL;
6290      }
6291  
6292      /* NOTE: same code as Linux kernel */
6293      /* Allow LDTs to be cleared by the user. */
6294      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6295          if ((contents == 0             &&
6296               read_exec_only == 1       &&
6297               seg_32bit == 0            &&
6298               limit_in_pages == 0       &&
6299               seg_not_present == 1      &&
6300               useable == 0 )) {
6301              entry_1 = 0;
6302              entry_2 = 0;
6303              goto install;
6304          }
6305      }
6306  
6307      entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6308          (ldt_info.limit & 0x0ffff);
6309      entry_2 = (ldt_info.base_addr & 0xff000000) |
6310          ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6311          (ldt_info.limit & 0xf0000) |
6312          ((read_exec_only ^ 1) << 9) |
6313          (contents << 10) |
6314          ((seg_not_present ^ 1) << 15) |
6315          (seg_32bit << 22) |
6316          (limit_in_pages << 23) |
6317          (useable << 20) |
6318          (lm << 21) |
6319          0x7000;
6320  
6321      /* Install the new entry ...  */
6322  install:
6323      lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6324      lp[0] = tswap32(entry_1);
6325      lp[1] = tswap32(entry_2);
6326      return 0;
6327  }
6328  
6329  static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6330  {
6331      struct target_modify_ldt_ldt_s *target_ldt_info;
6332      uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6333      uint32_t base_addr, limit, flags;
6334      int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6335      int seg_not_present, useable, lm;
6336      uint32_t *lp, entry_1, entry_2;
6337  
6338      lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6339      if (!target_ldt_info)
6340          return -TARGET_EFAULT;
6341      idx = tswap32(target_ldt_info->entry_number);
6342      if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6343          idx > TARGET_GDT_ENTRY_TLS_MAX) {
6344          unlock_user_struct(target_ldt_info, ptr, 1);
6345          return -TARGET_EINVAL;
6346      }
6347      lp = (uint32_t *)(gdt_table + idx);
6348      entry_1 = tswap32(lp[0]);
6349      entry_2 = tswap32(lp[1]);
6350  
6351      read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6352      contents = (entry_2 >> 10) & 3;
6353      seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6354      seg_32bit = (entry_2 >> 22) & 1;
6355      limit_in_pages = (entry_2 >> 23) & 1;
6356      useable = (entry_2 >> 20) & 1;
6357  #ifdef TARGET_ABI32
6358      lm = 0;
6359  #else
6360      lm = (entry_2 >> 21) & 1;
6361  #endif
6362      flags = (seg_32bit << 0) | (contents << 1) |
6363          (read_exec_only << 3) | (limit_in_pages << 4) |
6364          (seg_not_present << 5) | (useable << 6) | (lm << 7);
6365      limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6366      base_addr = (entry_1 >> 16) |
6367          (entry_2 & 0xff000000) |
6368          ((entry_2 & 0xff) << 16);
6369      target_ldt_info->base_addr = tswapal(base_addr);
6370      target_ldt_info->limit = tswap32(limit);
6371      target_ldt_info->flags = tswap32(flags);
6372      unlock_user_struct(target_ldt_info, ptr, 1);
6373      return 0;
6374  }
6375  
6376  abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6377  {
6378      return -TARGET_ENOSYS;
6379  }
6380  #else
6381  abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6382  {
6383      abi_long ret = 0;
6384      abi_ulong val;
6385      int idx;
6386  
6387      switch(code) {
6388      case TARGET_ARCH_SET_GS:
6389      case TARGET_ARCH_SET_FS:
6390          if (code == TARGET_ARCH_SET_GS)
6391              idx = R_GS;
6392          else
6393              idx = R_FS;
6394          cpu_x86_load_seg(env, idx, 0);
6395          env->segs[idx].base = addr;
6396          break;
6397      case TARGET_ARCH_GET_GS:
6398      case TARGET_ARCH_GET_FS:
6399          if (code == TARGET_ARCH_GET_GS)
6400              idx = R_GS;
6401          else
6402              idx = R_FS;
6403          val = env->segs[idx].base;
6404          if (put_user(val, addr, abi_ulong))
6405              ret = -TARGET_EFAULT;
6406          break;
6407      default:
6408          ret = -TARGET_EINVAL;
6409          break;
6410      }
6411      return ret;
6412  }
6413  #endif /* defined(TARGET_ABI32 */
6414  
6415  #endif /* defined(TARGET_I386) */
6416  
6417  #define NEW_STACK_SIZE 0x40000
6418  
6419  
6420  static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6421  typedef struct {
6422      CPUArchState *env;
6423      pthread_mutex_t mutex;
6424      pthread_cond_t cond;
6425      pthread_t thread;
6426      uint32_t tid;
6427      abi_ulong child_tidptr;
6428      abi_ulong parent_tidptr;
6429      sigset_t sigmask;
6430  } new_thread_info;
6431  
6432  static void *clone_func(void *arg)
6433  {
6434      new_thread_info *info = arg;
6435      CPUArchState *env;
6436      CPUState *cpu;
6437      TaskState *ts;
6438  
6439      rcu_register_thread();
6440      tcg_register_thread();
6441      env = info->env;
6442      cpu = env_cpu(env);
6443      thread_cpu = cpu;
6444      ts = (TaskState *)cpu->opaque;
6445      info->tid = sys_gettid();
6446      task_settid(ts);
6447      if (info->child_tidptr)
6448          put_user_u32(info->tid, info->child_tidptr);
6449      if (info->parent_tidptr)
6450          put_user_u32(info->tid, info->parent_tidptr);
6451      qemu_guest_random_seed_thread_part2(cpu->random_seed);
6452      /* Enable signals.  */
6453      sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6454      /* Signal to the parent that we're ready.  */
6455      pthread_mutex_lock(&info->mutex);
6456      pthread_cond_broadcast(&info->cond);
6457      pthread_mutex_unlock(&info->mutex);
6458      /* Wait until the parent has finished initializing the tls state.  */
6459      pthread_mutex_lock(&clone_lock);
6460      pthread_mutex_unlock(&clone_lock);
6461      cpu_loop(env);
6462      /* never exits */
6463      return NULL;
6464  }
6465  
6466  /* do_fork() Must return host values and target errnos (unlike most
6467     do_*() functions). */
6468  static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6469                     abi_ulong parent_tidptr, target_ulong newtls,
6470                     abi_ulong child_tidptr)
6471  {
6472      CPUState *cpu = env_cpu(env);
6473      int ret;
6474      TaskState *ts;
6475      CPUState *new_cpu;
6476      CPUArchState *new_env;
6477      sigset_t sigmask;
6478  
6479      flags &= ~CLONE_IGNORED_FLAGS;
6480  
6481      /* Emulate vfork() with fork() */
6482      if (flags & CLONE_VFORK)
6483          flags &= ~(CLONE_VFORK | CLONE_VM);
6484  
6485      if (flags & CLONE_VM) {
6486          TaskState *parent_ts = (TaskState *)cpu->opaque;
6487          new_thread_info info;
6488          pthread_attr_t attr;
6489  
6490          if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6491              (flags & CLONE_INVALID_THREAD_FLAGS)) {
6492              return -TARGET_EINVAL;
6493          }
6494  
6495          ts = g_new0(TaskState, 1);
6496          init_task_state(ts);
6497  
6498          /* Grab a mutex so that thread setup appears atomic.  */
6499          pthread_mutex_lock(&clone_lock);
6500  
6501          /*
6502           * If this is our first additional thread, we need to ensure we
6503           * generate code for parallel execution and flush old translations.
6504           * Do this now so that the copy gets CF_PARALLEL too.
6505           */
6506          if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6507              cpu->tcg_cflags |= CF_PARALLEL;
6508              tb_flush(cpu);
6509          }
6510  
6511          /* we create a new CPU instance. */
6512          new_env = cpu_copy(env);
6513          /* Init regs that differ from the parent.  */
6514          cpu_clone_regs_child(new_env, newsp, flags);
6515          cpu_clone_regs_parent(env, flags);
6516          new_cpu = env_cpu(new_env);
6517          new_cpu->opaque = ts;
6518          ts->bprm = parent_ts->bprm;
6519          ts->info = parent_ts->info;
6520          ts->signal_mask = parent_ts->signal_mask;
6521  
6522          if (flags & CLONE_CHILD_CLEARTID) {
6523              ts->child_tidptr = child_tidptr;
6524          }
6525  
6526          if (flags & CLONE_SETTLS) {
6527              cpu_set_tls (new_env, newtls);
6528          }
6529  
6530          memset(&info, 0, sizeof(info));
6531          pthread_mutex_init(&info.mutex, NULL);
6532          pthread_mutex_lock(&info.mutex);
6533          pthread_cond_init(&info.cond, NULL);
6534          info.env = new_env;
6535          if (flags & CLONE_CHILD_SETTID) {
6536              info.child_tidptr = child_tidptr;
6537          }
6538          if (flags & CLONE_PARENT_SETTID) {
6539              info.parent_tidptr = parent_tidptr;
6540          }
6541  
6542          ret = pthread_attr_init(&attr);
6543          ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6544          ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6545          /* It is not safe to deliver signals until the child has finished
6546             initializing, so temporarily block all signals.  */
6547          sigfillset(&sigmask);
6548          sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6549          cpu->random_seed = qemu_guest_random_seed_thread_part1();
6550  
6551          ret = pthread_create(&info.thread, &attr, clone_func, &info);
6552          /* TODO: Free new CPU state if thread creation failed.  */
6553  
6554          sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6555          pthread_attr_destroy(&attr);
6556          if (ret == 0) {
6557              /* Wait for the child to initialize.  */
6558              pthread_cond_wait(&info.cond, &info.mutex);
6559              ret = info.tid;
6560          } else {
6561              ret = -1;
6562          }
6563          pthread_mutex_unlock(&info.mutex);
6564          pthread_cond_destroy(&info.cond);
6565          pthread_mutex_destroy(&info.mutex);
6566          pthread_mutex_unlock(&clone_lock);
6567      } else {
6568          /* if no CLONE_VM, we consider it is a fork */
6569          if (flags & CLONE_INVALID_FORK_FLAGS) {
6570              return -TARGET_EINVAL;
6571          }
6572  
6573          /* We can't support custom termination signals */
6574          if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6575              return -TARGET_EINVAL;
6576          }
6577  
6578          if (block_signals()) {
6579              return -TARGET_ERESTARTSYS;
6580          }
6581  
6582          fork_start();
6583          ret = fork();
6584          if (ret == 0) {
6585              /* Child Process.  */
6586              cpu_clone_regs_child(env, newsp, flags);
6587              fork_end(1);
6588              /* There is a race condition here.  The parent process could
6589                 theoretically read the TID in the child process before the child
6590                 tid is set.  This would require using either ptrace
6591                 (not implemented) or having *_tidptr to point at a shared memory
6592                 mapping.  We can't repeat the spinlock hack used above because
6593                 the child process gets its own copy of the lock.  */
6594              if (flags & CLONE_CHILD_SETTID)
6595                  put_user_u32(sys_gettid(), child_tidptr);
6596              if (flags & CLONE_PARENT_SETTID)
6597                  put_user_u32(sys_gettid(), parent_tidptr);
6598              ts = (TaskState *)cpu->opaque;
6599              if (flags & CLONE_SETTLS)
6600                  cpu_set_tls (env, newtls);
6601              if (flags & CLONE_CHILD_CLEARTID)
6602                  ts->child_tidptr = child_tidptr;
6603          } else {
6604              cpu_clone_regs_parent(env, flags);
6605              fork_end(0);
6606          }
6607      }
6608      return ret;
6609  }
6610  
6611  /* warning : doesn't handle linux specific flags... */
6612  static int target_to_host_fcntl_cmd(int cmd)
6613  {
6614      int ret;
6615  
6616      switch(cmd) {
6617      case TARGET_F_DUPFD:
6618      case TARGET_F_GETFD:
6619      case TARGET_F_SETFD:
6620      case TARGET_F_GETFL:
6621      case TARGET_F_SETFL:
6622      case TARGET_F_OFD_GETLK:
6623      case TARGET_F_OFD_SETLK:
6624      case TARGET_F_OFD_SETLKW:
6625          ret = cmd;
6626          break;
6627      case TARGET_F_GETLK:
6628          ret = F_GETLK64;
6629          break;
6630      case TARGET_F_SETLK:
6631          ret = F_SETLK64;
6632          break;
6633      case TARGET_F_SETLKW:
6634          ret = F_SETLKW64;
6635          break;
6636      case TARGET_F_GETOWN:
6637          ret = F_GETOWN;
6638          break;
6639      case TARGET_F_SETOWN:
6640          ret = F_SETOWN;
6641          break;
6642      case TARGET_F_GETSIG:
6643          ret = F_GETSIG;
6644          break;
6645      case TARGET_F_SETSIG:
6646          ret = F_SETSIG;
6647          break;
6648  #if TARGET_ABI_BITS == 32
6649      case TARGET_F_GETLK64:
6650          ret = F_GETLK64;
6651          break;
6652      case TARGET_F_SETLK64:
6653          ret = F_SETLK64;
6654          break;
6655      case TARGET_F_SETLKW64:
6656          ret = F_SETLKW64;
6657          break;
6658  #endif
6659      case TARGET_F_SETLEASE:
6660          ret = F_SETLEASE;
6661          break;
6662      case TARGET_F_GETLEASE:
6663          ret = F_GETLEASE;
6664          break;
6665  #ifdef F_DUPFD_CLOEXEC
6666      case TARGET_F_DUPFD_CLOEXEC:
6667          ret = F_DUPFD_CLOEXEC;
6668          break;
6669  #endif
6670      case TARGET_F_NOTIFY:
6671          ret = F_NOTIFY;
6672          break;
6673  #ifdef F_GETOWN_EX
6674      case TARGET_F_GETOWN_EX:
6675          ret = F_GETOWN_EX;
6676          break;
6677  #endif
6678  #ifdef F_SETOWN_EX
6679      case TARGET_F_SETOWN_EX:
6680          ret = F_SETOWN_EX;
6681          break;
6682  #endif
6683  #ifdef F_SETPIPE_SZ
6684      case TARGET_F_SETPIPE_SZ:
6685          ret = F_SETPIPE_SZ;
6686          break;
6687      case TARGET_F_GETPIPE_SZ:
6688          ret = F_GETPIPE_SZ;
6689          break;
6690  #endif
6691  #ifdef F_ADD_SEALS
6692      case TARGET_F_ADD_SEALS:
6693          ret = F_ADD_SEALS;
6694          break;
6695      case TARGET_F_GET_SEALS:
6696          ret = F_GET_SEALS;
6697          break;
6698  #endif
6699      default:
6700          ret = -TARGET_EINVAL;
6701          break;
6702      }
6703  
6704  #if defined(__powerpc64__)
6705      /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6706       * is not supported by kernel. The glibc fcntl call actually adjusts
6707       * them to 5, 6 and 7 before making the syscall(). Since we make the
6708       * syscall directly, adjust to what is supported by the kernel.
6709       */
6710      if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6711          ret -= F_GETLK64 - 5;
6712      }
6713  #endif
6714  
6715      return ret;
6716  }
6717  
6718  #define FLOCK_TRANSTBL \
6719      switch (type) { \
6720      TRANSTBL_CONVERT(F_RDLCK); \
6721      TRANSTBL_CONVERT(F_WRLCK); \
6722      TRANSTBL_CONVERT(F_UNLCK); \
6723      }
6724  
6725  static int target_to_host_flock(int type)
6726  {
6727  #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6728      FLOCK_TRANSTBL
6729  #undef  TRANSTBL_CONVERT
6730      return -TARGET_EINVAL;
6731  }
6732  
6733  static int host_to_target_flock(int type)
6734  {
6735  #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6736      FLOCK_TRANSTBL
6737  #undef  TRANSTBL_CONVERT
6738      /* if we don't know how to convert the value coming
6739       * from the host we copy to the target field as-is
6740       */
6741      return type;
6742  }
6743  
6744  static inline abi_long copy_from_user_flock(struct flock64 *fl,
6745                                              abi_ulong target_flock_addr)
6746  {
6747      struct target_flock *target_fl;
6748      int l_type;
6749  
6750      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6751          return -TARGET_EFAULT;
6752      }
6753  
6754      __get_user(l_type, &target_fl->l_type);
6755      l_type = target_to_host_flock(l_type);
6756      if (l_type < 0) {
6757          return l_type;
6758      }
6759      fl->l_type = l_type;
6760      __get_user(fl->l_whence, &target_fl->l_whence);
6761      __get_user(fl->l_start, &target_fl->l_start);
6762      __get_user(fl->l_len, &target_fl->l_len);
6763      __get_user(fl->l_pid, &target_fl->l_pid);
6764      unlock_user_struct(target_fl, target_flock_addr, 0);
6765      return 0;
6766  }
6767  
6768  static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6769                                            const struct flock64 *fl)
6770  {
6771      struct target_flock *target_fl;
6772      short l_type;
6773  
6774      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6775          return -TARGET_EFAULT;
6776      }
6777  
6778      l_type = host_to_target_flock(fl->l_type);
6779      __put_user(l_type, &target_fl->l_type);
6780      __put_user(fl->l_whence, &target_fl->l_whence);
6781      __put_user(fl->l_start, &target_fl->l_start);
6782      __put_user(fl->l_len, &target_fl->l_len);
6783      __put_user(fl->l_pid, &target_fl->l_pid);
6784      unlock_user_struct(target_fl, target_flock_addr, 1);
6785      return 0;
6786  }
6787  
6788  typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6789  typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6790  
6791  #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6792  static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6793                                                     abi_ulong target_flock_addr)
6794  {
6795      struct target_oabi_flock64 *target_fl;
6796      int l_type;
6797  
6798      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6799          return -TARGET_EFAULT;
6800      }
6801  
6802      __get_user(l_type, &target_fl->l_type);
6803      l_type = target_to_host_flock(l_type);
6804      if (l_type < 0) {
6805          return l_type;
6806      }
6807      fl->l_type = l_type;
6808      __get_user(fl->l_whence, &target_fl->l_whence);
6809      __get_user(fl->l_start, &target_fl->l_start);
6810      __get_user(fl->l_len, &target_fl->l_len);
6811      __get_user(fl->l_pid, &target_fl->l_pid);
6812      unlock_user_struct(target_fl, target_flock_addr, 0);
6813      return 0;
6814  }
6815  
6816  static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6817                                                   const struct flock64 *fl)
6818  {
6819      struct target_oabi_flock64 *target_fl;
6820      short l_type;
6821  
6822      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6823          return -TARGET_EFAULT;
6824      }
6825  
6826      l_type = host_to_target_flock(fl->l_type);
6827      __put_user(l_type, &target_fl->l_type);
6828      __put_user(fl->l_whence, &target_fl->l_whence);
6829      __put_user(fl->l_start, &target_fl->l_start);
6830      __put_user(fl->l_len, &target_fl->l_len);
6831      __put_user(fl->l_pid, &target_fl->l_pid);
6832      unlock_user_struct(target_fl, target_flock_addr, 1);
6833      return 0;
6834  }
6835  #endif
6836  
6837  static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6838                                                abi_ulong target_flock_addr)
6839  {
6840      struct target_flock64 *target_fl;
6841      int l_type;
6842  
6843      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6844          return -TARGET_EFAULT;
6845      }
6846  
6847      __get_user(l_type, &target_fl->l_type);
6848      l_type = target_to_host_flock(l_type);
6849      if (l_type < 0) {
6850          return l_type;
6851      }
6852      fl->l_type = l_type;
6853      __get_user(fl->l_whence, &target_fl->l_whence);
6854      __get_user(fl->l_start, &target_fl->l_start);
6855      __get_user(fl->l_len, &target_fl->l_len);
6856      __get_user(fl->l_pid, &target_fl->l_pid);
6857      unlock_user_struct(target_fl, target_flock_addr, 0);
6858      return 0;
6859  }
6860  
6861  static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6862                                              const struct flock64 *fl)
6863  {
6864      struct target_flock64 *target_fl;
6865      short l_type;
6866  
6867      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6868          return -TARGET_EFAULT;
6869      }
6870  
6871      l_type = host_to_target_flock(fl->l_type);
6872      __put_user(l_type, &target_fl->l_type);
6873      __put_user(fl->l_whence, &target_fl->l_whence);
6874      __put_user(fl->l_start, &target_fl->l_start);
6875      __put_user(fl->l_len, &target_fl->l_len);
6876      __put_user(fl->l_pid, &target_fl->l_pid);
6877      unlock_user_struct(target_fl, target_flock_addr, 1);
6878      return 0;
6879  }
6880  
6881  static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6882  {
6883      struct flock64 fl64;
6884  #ifdef F_GETOWN_EX
6885      struct f_owner_ex fox;
6886      struct target_f_owner_ex *target_fox;
6887  #endif
6888      abi_long ret;
6889      int host_cmd = target_to_host_fcntl_cmd(cmd);
6890  
6891      if (host_cmd == -TARGET_EINVAL)
6892  	    return host_cmd;
6893  
6894      switch(cmd) {
6895      case TARGET_F_GETLK:
6896          ret = copy_from_user_flock(&fl64, arg);
6897          if (ret) {
6898              return ret;
6899          }
6900          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6901          if (ret == 0) {
6902              ret = copy_to_user_flock(arg, &fl64);
6903          }
6904          break;
6905  
6906      case TARGET_F_SETLK:
6907      case TARGET_F_SETLKW:
6908          ret = copy_from_user_flock(&fl64, arg);
6909          if (ret) {
6910              return ret;
6911          }
6912          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6913          break;
6914  
6915      case TARGET_F_GETLK64:
6916      case TARGET_F_OFD_GETLK:
6917          ret = copy_from_user_flock64(&fl64, arg);
6918          if (ret) {
6919              return ret;
6920          }
6921          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6922          if (ret == 0) {
6923              ret = copy_to_user_flock64(arg, &fl64);
6924          }
6925          break;
6926      case TARGET_F_SETLK64:
6927      case TARGET_F_SETLKW64:
6928      case TARGET_F_OFD_SETLK:
6929      case TARGET_F_OFD_SETLKW:
6930          ret = copy_from_user_flock64(&fl64, arg);
6931          if (ret) {
6932              return ret;
6933          }
6934          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6935          break;
6936  
6937      case TARGET_F_GETFL:
6938          ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6939          if (ret >= 0) {
6940              ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6941          }
6942          break;
6943  
6944      case TARGET_F_SETFL:
6945          ret = get_errno(safe_fcntl(fd, host_cmd,
6946                                     target_to_host_bitmask(arg,
6947                                                            fcntl_flags_tbl)));
6948          break;
6949  
6950  #ifdef F_GETOWN_EX
6951      case TARGET_F_GETOWN_EX:
6952          ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6953          if (ret >= 0) {
6954              if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6955                  return -TARGET_EFAULT;
6956              target_fox->type = tswap32(fox.type);
6957              target_fox->pid = tswap32(fox.pid);
6958              unlock_user_struct(target_fox, arg, 1);
6959          }
6960          break;
6961  #endif
6962  
6963  #ifdef F_SETOWN_EX
6964      case TARGET_F_SETOWN_EX:
6965          if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6966              return -TARGET_EFAULT;
6967          fox.type = tswap32(target_fox->type);
6968          fox.pid = tswap32(target_fox->pid);
6969          unlock_user_struct(target_fox, arg, 0);
6970          ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6971          break;
6972  #endif
6973  
6974      case TARGET_F_SETSIG:
6975          ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6976          break;
6977  
6978      case TARGET_F_GETSIG:
6979          ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6980          break;
6981  
6982      case TARGET_F_SETOWN:
6983      case TARGET_F_GETOWN:
6984      case TARGET_F_SETLEASE:
6985      case TARGET_F_GETLEASE:
6986      case TARGET_F_SETPIPE_SZ:
6987      case TARGET_F_GETPIPE_SZ:
6988      case TARGET_F_ADD_SEALS:
6989      case TARGET_F_GET_SEALS:
6990          ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6991          break;
6992  
6993      default:
6994          ret = get_errno(safe_fcntl(fd, cmd, arg));
6995          break;
6996      }
6997      return ret;
6998  }
6999  
7000  #ifdef USE_UID16
7001  
7002  static inline int high2lowuid(int uid)
7003  {
7004      if (uid > 65535)
7005          return 65534;
7006      else
7007          return uid;
7008  }
7009  
7010  static inline int high2lowgid(int gid)
7011  {
7012      if (gid > 65535)
7013          return 65534;
7014      else
7015          return gid;
7016  }
7017  
7018  static inline int low2highuid(int uid)
7019  {
7020      if ((int16_t)uid == -1)
7021          return -1;
7022      else
7023          return uid;
7024  }
7025  
7026  static inline int low2highgid(int gid)
7027  {
7028      if ((int16_t)gid == -1)
7029          return -1;
7030      else
7031          return gid;
7032  }
7033  static inline int tswapid(int id)
7034  {
7035      return tswap16(id);
7036  }
7037  
7038  #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7039  
7040  #else /* !USE_UID16 */
7041  static inline int high2lowuid(int uid)
7042  {
7043      return uid;
7044  }
7045  static inline int high2lowgid(int gid)
7046  {
7047      return gid;
7048  }
7049  static inline int low2highuid(int uid)
7050  {
7051      return uid;
7052  }
7053  static inline int low2highgid(int gid)
7054  {
7055      return gid;
7056  }
7057  static inline int tswapid(int id)
7058  {
7059      return tswap32(id);
7060  }
7061  
7062  #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7063  
7064  #endif /* USE_UID16 */
7065  
7066  /* We must do direct syscalls for setting UID/GID, because we want to
7067   * implement the Linux system call semantics of "change only for this thread",
7068   * not the libc/POSIX semantics of "change for all threads in process".
7069   * (See http://ewontfix.com/17/ for more details.)
7070   * We use the 32-bit version of the syscalls if present; if it is not
7071   * then either the host architecture supports 32-bit UIDs natively with
7072   * the standard syscall, or the 16-bit UID is the best we can do.
7073   */
7074  #ifdef __NR_setuid32
7075  #define __NR_sys_setuid __NR_setuid32
7076  #else
7077  #define __NR_sys_setuid __NR_setuid
7078  #endif
7079  #ifdef __NR_setgid32
7080  #define __NR_sys_setgid __NR_setgid32
7081  #else
7082  #define __NR_sys_setgid __NR_setgid
7083  #endif
7084  #ifdef __NR_setresuid32
7085  #define __NR_sys_setresuid __NR_setresuid32
7086  #else
7087  #define __NR_sys_setresuid __NR_setresuid
7088  #endif
7089  #ifdef __NR_setresgid32
7090  #define __NR_sys_setresgid __NR_setresgid32
7091  #else
7092  #define __NR_sys_setresgid __NR_setresgid
7093  #endif
7094  
7095  _syscall1(int, sys_setuid, uid_t, uid)
7096  _syscall1(int, sys_setgid, gid_t, gid)
7097  _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7098  _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7099  
7100  void syscall_init(void)
7101  {
7102      IOCTLEntry *ie;
7103      const argtype *arg_type;
7104      int size;
7105      int i;
7106  
7107      thunk_init(STRUCT_MAX);
7108  
7109  #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7110  #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7111  #include "syscall_types.h"
7112  #undef STRUCT
7113  #undef STRUCT_SPECIAL
7114  
7115      /* Build target_to_host_errno_table[] table from
7116       * host_to_target_errno_table[]. */
7117      for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7118          target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7119      }
7120  
7121      /* we patch the ioctl size if necessary. We rely on the fact that
7122         no ioctl has all the bits at '1' in the size field */
7123      ie = ioctl_entries;
7124      while (ie->target_cmd != 0) {
7125          if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7126              TARGET_IOC_SIZEMASK) {
7127              arg_type = ie->arg_type;
7128              if (arg_type[0] != TYPE_PTR) {
7129                  fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7130                          ie->target_cmd);
7131                  exit(1);
7132              }
7133              arg_type++;
7134              size = thunk_type_size(arg_type, 0);
7135              ie->target_cmd = (ie->target_cmd &
7136                                ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7137                  (size << TARGET_IOC_SIZESHIFT);
7138          }
7139  
7140          /* automatic consistency check if same arch */
7141  #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7142      (defined(__x86_64__) && defined(TARGET_X86_64))
7143          if (unlikely(ie->target_cmd != ie->host_cmd)) {
7144              fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7145                      ie->name, ie->target_cmd, ie->host_cmd);
7146          }
7147  #endif
7148          ie++;
7149      }
7150  }
7151  
7152  #ifdef TARGET_NR_truncate64
7153  static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7154                                           abi_long arg2,
7155                                           abi_long arg3,
7156                                           abi_long arg4)
7157  {
7158      if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7159          arg2 = arg3;
7160          arg3 = arg4;
7161      }
7162      return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7163  }
7164  #endif
7165  
7166  #ifdef TARGET_NR_ftruncate64
7167  static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7168                                            abi_long arg2,
7169                                            abi_long arg3,
7170                                            abi_long arg4)
7171  {
7172      if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7173          arg2 = arg3;
7174          arg3 = arg4;
7175      }
7176      return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7177  }
7178  #endif
7179  
7180  #if defined(TARGET_NR_timer_settime) || \
7181      (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7182  static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7183                                                   abi_ulong target_addr)
7184  {
7185      if (target_to_host_timespec(&host_its->it_interval, target_addr +
7186                                  offsetof(struct target_itimerspec,
7187                                           it_interval)) ||
7188          target_to_host_timespec(&host_its->it_value, target_addr +
7189                                  offsetof(struct target_itimerspec,
7190                                           it_value))) {
7191          return -TARGET_EFAULT;
7192      }
7193  
7194      return 0;
7195  }
7196  #endif
7197  
7198  #if defined(TARGET_NR_timer_settime64) || \
7199      (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7200  static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7201                                                     abi_ulong target_addr)
7202  {
7203      if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7204                                    offsetof(struct target__kernel_itimerspec,
7205                                             it_interval)) ||
7206          target_to_host_timespec64(&host_its->it_value, target_addr +
7207                                    offsetof(struct target__kernel_itimerspec,
7208                                             it_value))) {
7209          return -TARGET_EFAULT;
7210      }
7211  
7212      return 0;
7213  }
7214  #endif
7215  
7216  #if ((defined(TARGET_NR_timerfd_gettime) || \
7217        defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7218        defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7219  static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7220                                                   struct itimerspec *host_its)
7221  {
7222      if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7223                                                         it_interval),
7224                                  &host_its->it_interval) ||
7225          host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7226                                                         it_value),
7227                                  &host_its->it_value)) {
7228          return -TARGET_EFAULT;
7229      }
7230      return 0;
7231  }
7232  #endif
7233  
7234  #if ((defined(TARGET_NR_timerfd_gettime64) || \
7235        defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7236        defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7237  static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7238                                                     struct itimerspec *host_its)
7239  {
7240      if (host_to_target_timespec64(target_addr +
7241                                    offsetof(struct target__kernel_itimerspec,
7242                                             it_interval),
7243                                    &host_its->it_interval) ||
7244          host_to_target_timespec64(target_addr +
7245                                    offsetof(struct target__kernel_itimerspec,
7246                                             it_value),
7247                                    &host_its->it_value)) {
7248          return -TARGET_EFAULT;
7249      }
7250      return 0;
7251  }
7252  #endif
7253  
7254  #if defined(TARGET_NR_adjtimex) || \
7255      (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7256  static inline abi_long target_to_host_timex(struct timex *host_tx,
7257                                              abi_long target_addr)
7258  {
7259      struct target_timex *target_tx;
7260  
7261      if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7262          return -TARGET_EFAULT;
7263      }
7264  
7265      __get_user(host_tx->modes, &target_tx->modes);
7266      __get_user(host_tx->offset, &target_tx->offset);
7267      __get_user(host_tx->freq, &target_tx->freq);
7268      __get_user(host_tx->maxerror, &target_tx->maxerror);
7269      __get_user(host_tx->esterror, &target_tx->esterror);
7270      __get_user(host_tx->status, &target_tx->status);
7271      __get_user(host_tx->constant, &target_tx->constant);
7272      __get_user(host_tx->precision, &target_tx->precision);
7273      __get_user(host_tx->tolerance, &target_tx->tolerance);
7274      __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7275      __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7276      __get_user(host_tx->tick, &target_tx->tick);
7277      __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7278      __get_user(host_tx->jitter, &target_tx->jitter);
7279      __get_user(host_tx->shift, &target_tx->shift);
7280      __get_user(host_tx->stabil, &target_tx->stabil);
7281      __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7282      __get_user(host_tx->calcnt, &target_tx->calcnt);
7283      __get_user(host_tx->errcnt, &target_tx->errcnt);
7284      __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7285      __get_user(host_tx->tai, &target_tx->tai);
7286  
7287      unlock_user_struct(target_tx, target_addr, 0);
7288      return 0;
7289  }
7290  
7291  static inline abi_long host_to_target_timex(abi_long target_addr,
7292                                              struct timex *host_tx)
7293  {
7294      struct target_timex *target_tx;
7295  
7296      if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7297          return -TARGET_EFAULT;
7298      }
7299  
7300      __put_user(host_tx->modes, &target_tx->modes);
7301      __put_user(host_tx->offset, &target_tx->offset);
7302      __put_user(host_tx->freq, &target_tx->freq);
7303      __put_user(host_tx->maxerror, &target_tx->maxerror);
7304      __put_user(host_tx->esterror, &target_tx->esterror);
7305      __put_user(host_tx->status, &target_tx->status);
7306      __put_user(host_tx->constant, &target_tx->constant);
7307      __put_user(host_tx->precision, &target_tx->precision);
7308      __put_user(host_tx->tolerance, &target_tx->tolerance);
7309      __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7310      __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7311      __put_user(host_tx->tick, &target_tx->tick);
7312      __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7313      __put_user(host_tx->jitter, &target_tx->jitter);
7314      __put_user(host_tx->shift, &target_tx->shift);
7315      __put_user(host_tx->stabil, &target_tx->stabil);
7316      __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7317      __put_user(host_tx->calcnt, &target_tx->calcnt);
7318      __put_user(host_tx->errcnt, &target_tx->errcnt);
7319      __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7320      __put_user(host_tx->tai, &target_tx->tai);
7321  
7322      unlock_user_struct(target_tx, target_addr, 1);
7323      return 0;
7324  }
7325  #endif
7326  
7327  
7328  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7329  static inline abi_long target_to_host_timex64(struct timex *host_tx,
7330                                                abi_long target_addr)
7331  {
7332      struct target__kernel_timex *target_tx;
7333  
7334      if (copy_from_user_timeval64(&host_tx->time, target_addr +
7335                                   offsetof(struct target__kernel_timex,
7336                                            time))) {
7337          return -TARGET_EFAULT;
7338      }
7339  
7340      if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7341          return -TARGET_EFAULT;
7342      }
7343  
7344      __get_user(host_tx->modes, &target_tx->modes);
7345      __get_user(host_tx->offset, &target_tx->offset);
7346      __get_user(host_tx->freq, &target_tx->freq);
7347      __get_user(host_tx->maxerror, &target_tx->maxerror);
7348      __get_user(host_tx->esterror, &target_tx->esterror);
7349      __get_user(host_tx->status, &target_tx->status);
7350      __get_user(host_tx->constant, &target_tx->constant);
7351      __get_user(host_tx->precision, &target_tx->precision);
7352      __get_user(host_tx->tolerance, &target_tx->tolerance);
7353      __get_user(host_tx->tick, &target_tx->tick);
7354      __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7355      __get_user(host_tx->jitter, &target_tx->jitter);
7356      __get_user(host_tx->shift, &target_tx->shift);
7357      __get_user(host_tx->stabil, &target_tx->stabil);
7358      __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7359      __get_user(host_tx->calcnt, &target_tx->calcnt);
7360      __get_user(host_tx->errcnt, &target_tx->errcnt);
7361      __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7362      __get_user(host_tx->tai, &target_tx->tai);
7363  
7364      unlock_user_struct(target_tx, target_addr, 0);
7365      return 0;
7366  }
7367  
7368  static inline abi_long host_to_target_timex64(abi_long target_addr,
7369                                                struct timex *host_tx)
7370  {
7371      struct target__kernel_timex *target_tx;
7372  
7373     if (copy_to_user_timeval64(target_addr +
7374                                offsetof(struct target__kernel_timex, time),
7375                                &host_tx->time)) {
7376          return -TARGET_EFAULT;
7377      }
7378  
7379      if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7380          return -TARGET_EFAULT;
7381      }
7382  
7383      __put_user(host_tx->modes, &target_tx->modes);
7384      __put_user(host_tx->offset, &target_tx->offset);
7385      __put_user(host_tx->freq, &target_tx->freq);
7386      __put_user(host_tx->maxerror, &target_tx->maxerror);
7387      __put_user(host_tx->esterror, &target_tx->esterror);
7388      __put_user(host_tx->status, &target_tx->status);
7389      __put_user(host_tx->constant, &target_tx->constant);
7390      __put_user(host_tx->precision, &target_tx->precision);
7391      __put_user(host_tx->tolerance, &target_tx->tolerance);
7392      __put_user(host_tx->tick, &target_tx->tick);
7393      __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7394      __put_user(host_tx->jitter, &target_tx->jitter);
7395      __put_user(host_tx->shift, &target_tx->shift);
7396      __put_user(host_tx->stabil, &target_tx->stabil);
7397      __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7398      __put_user(host_tx->calcnt, &target_tx->calcnt);
7399      __put_user(host_tx->errcnt, &target_tx->errcnt);
7400      __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7401      __put_user(host_tx->tai, &target_tx->tai);
7402  
7403      unlock_user_struct(target_tx, target_addr, 1);
7404      return 0;
7405  }
7406  #endif
7407  
7408  #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7409  #define sigev_notify_thread_id _sigev_un._tid
7410  #endif
7411  
7412  static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7413                                                 abi_ulong target_addr)
7414  {
7415      struct target_sigevent *target_sevp;
7416  
7417      if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7418          return -TARGET_EFAULT;
7419      }
7420  
7421      /* This union is awkward on 64 bit systems because it has a 32 bit
7422       * integer and a pointer in it; we follow the conversion approach
7423       * used for handling sigval types in signal.c so the guest should get
7424       * the correct value back even if we did a 64 bit byteswap and it's
7425       * using the 32 bit integer.
7426       */
7427      host_sevp->sigev_value.sival_ptr =
7428          (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7429      host_sevp->sigev_signo =
7430          target_to_host_signal(tswap32(target_sevp->sigev_signo));
7431      host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7432      host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7433  
7434      unlock_user_struct(target_sevp, target_addr, 1);
7435      return 0;
7436  }
7437  
7438  #if defined(TARGET_NR_mlockall)
7439  static inline int target_to_host_mlockall_arg(int arg)
7440  {
7441      int result = 0;
7442  
7443      if (arg & TARGET_MCL_CURRENT) {
7444          result |= MCL_CURRENT;
7445      }
7446      if (arg & TARGET_MCL_FUTURE) {
7447          result |= MCL_FUTURE;
7448      }
7449  #ifdef MCL_ONFAULT
7450      if (arg & TARGET_MCL_ONFAULT) {
7451          result |= MCL_ONFAULT;
7452      }
7453  #endif
7454  
7455      return result;
7456  }
7457  #endif
7458  
7459  #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7460       defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7461       defined(TARGET_NR_newfstatat))
7462  static inline abi_long host_to_target_stat64(void *cpu_env,
7463                                               abi_ulong target_addr,
7464                                               struct stat *host_st)
7465  {
7466  #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7467      if (((CPUARMState *)cpu_env)->eabi) {
7468          struct target_eabi_stat64 *target_st;
7469  
7470          if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7471              return -TARGET_EFAULT;
7472          memset(target_st, 0, sizeof(struct target_eabi_stat64));
7473          __put_user(host_st->st_dev, &target_st->st_dev);
7474          __put_user(host_st->st_ino, &target_st->st_ino);
7475  #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7476          __put_user(host_st->st_ino, &target_st->__st_ino);
7477  #endif
7478          __put_user(host_st->st_mode, &target_st->st_mode);
7479          __put_user(host_st->st_nlink, &target_st->st_nlink);
7480          __put_user(host_st->st_uid, &target_st->st_uid);
7481          __put_user(host_st->st_gid, &target_st->st_gid);
7482          __put_user(host_st->st_rdev, &target_st->st_rdev);
7483          __put_user(host_st->st_size, &target_st->st_size);
7484          __put_user(host_st->st_blksize, &target_st->st_blksize);
7485          __put_user(host_st->st_blocks, &target_st->st_blocks);
7486          __put_user(host_st->st_atime, &target_st->target_st_atime);
7487          __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7488          __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7489  #ifdef HAVE_STRUCT_STAT_ST_ATIM
7490          __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7491          __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7492          __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7493  #endif
7494          unlock_user_struct(target_st, target_addr, 1);
7495      } else
7496  #endif
7497      {
7498  #if defined(TARGET_HAS_STRUCT_STAT64)
7499          struct target_stat64 *target_st;
7500  #else
7501          struct target_stat *target_st;
7502  #endif
7503  
7504          if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7505              return -TARGET_EFAULT;
7506          memset(target_st, 0, sizeof(*target_st));
7507          __put_user(host_st->st_dev, &target_st->st_dev);
7508          __put_user(host_st->st_ino, &target_st->st_ino);
7509  #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7510          __put_user(host_st->st_ino, &target_st->__st_ino);
7511  #endif
7512          __put_user(host_st->st_mode, &target_st->st_mode);
7513          __put_user(host_st->st_nlink, &target_st->st_nlink);
7514          __put_user(host_st->st_uid, &target_st->st_uid);
7515          __put_user(host_st->st_gid, &target_st->st_gid);
7516          __put_user(host_st->st_rdev, &target_st->st_rdev);
7517          /* XXX: better use of kernel struct */
7518          __put_user(host_st->st_size, &target_st->st_size);
7519          __put_user(host_st->st_blksize, &target_st->st_blksize);
7520          __put_user(host_st->st_blocks, &target_st->st_blocks);
7521          __put_user(host_st->st_atime, &target_st->target_st_atime);
7522          __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7523          __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7524  #ifdef HAVE_STRUCT_STAT_ST_ATIM
7525          __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7526          __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7527          __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7528  #endif
7529          unlock_user_struct(target_st, target_addr, 1);
7530      }
7531  
7532      return 0;
7533  }
7534  #endif
7535  
7536  #if defined(TARGET_NR_statx) && defined(__NR_statx)
7537  static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7538                                              abi_ulong target_addr)
7539  {
7540      struct target_statx *target_stx;
7541  
7542      if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7543          return -TARGET_EFAULT;
7544      }
7545      memset(target_stx, 0, sizeof(*target_stx));
7546  
7547      __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7548      __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7549      __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7550      __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7551      __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7552      __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7553      __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7554      __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7555      __put_user(host_stx->stx_size, &target_stx->stx_size);
7556      __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7557      __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7558      __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7559      __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7560      __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7561      __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7562      __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7563      __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7564      __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7565      __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7566      __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7567      __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7568      __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7569      __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7570  
7571      unlock_user_struct(target_stx, target_addr, 1);
7572  
7573      return 0;
7574  }
7575  #endif
7576  
7577  static int do_sys_futex(int *uaddr, int op, int val,
7578                           const struct timespec *timeout, int *uaddr2,
7579                           int val3)
7580  {
7581  #if HOST_LONG_BITS == 64
7582  #if defined(__NR_futex)
7583      /* always a 64-bit time_t, it doesn't define _time64 version  */
7584      return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7585  
7586  #endif
7587  #else /* HOST_LONG_BITS == 64 */
7588  #if defined(__NR_futex_time64)
7589      if (sizeof(timeout->tv_sec) == 8) {
7590          /* _time64 function on 32bit arch */
7591          return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7592      }
7593  #endif
7594  #if defined(__NR_futex)
7595      /* old function on 32bit arch */
7596      return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7597  #endif
7598  #endif /* HOST_LONG_BITS == 64 */
7599      g_assert_not_reached();
7600  }
7601  
7602  static int do_safe_futex(int *uaddr, int op, int val,
7603                           const struct timespec *timeout, int *uaddr2,
7604                           int val3)
7605  {
7606  #if HOST_LONG_BITS == 64
7607  #if defined(__NR_futex)
7608      /* always a 64-bit time_t, it doesn't define _time64 version  */
7609      return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7610  #endif
7611  #else /* HOST_LONG_BITS == 64 */
7612  #if defined(__NR_futex_time64)
7613      if (sizeof(timeout->tv_sec) == 8) {
7614          /* _time64 function on 32bit arch */
7615          return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7616                                             val3));
7617      }
7618  #endif
7619  #if defined(__NR_futex)
7620      /* old function on 32bit arch */
7621      return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7622  #endif
7623  #endif /* HOST_LONG_BITS == 64 */
7624      return -TARGET_ENOSYS;
7625  }
7626  
7627  /* ??? Using host futex calls even when target atomic operations
7628     are not really atomic probably breaks things.  However implementing
7629     futexes locally would make futexes shared between multiple processes
7630     tricky.  However they're probably useless because guest atomic
7631     operations won't work either.  */
7632  #if defined(TARGET_NR_futex)
7633  static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7634                      target_ulong timeout, target_ulong uaddr2, int val3)
7635  {
7636      struct timespec ts, *pts;
7637      int base_op;
7638  
7639      /* ??? We assume FUTEX_* constants are the same on both host
7640         and target.  */
7641  #ifdef FUTEX_CMD_MASK
7642      base_op = op & FUTEX_CMD_MASK;
7643  #else
7644      base_op = op;
7645  #endif
7646      switch (base_op) {
7647      case FUTEX_WAIT:
7648      case FUTEX_WAIT_BITSET:
7649          if (timeout) {
7650              pts = &ts;
7651              target_to_host_timespec(pts, timeout);
7652          } else {
7653              pts = NULL;
7654          }
7655          return do_safe_futex(g2h(cpu, uaddr),
7656                               op, tswap32(val), pts, NULL, val3);
7657      case FUTEX_WAKE:
7658          return do_safe_futex(g2h(cpu, uaddr),
7659                               op, val, NULL, NULL, 0);
7660      case FUTEX_FD:
7661          return do_safe_futex(g2h(cpu, uaddr),
7662                               op, val, NULL, NULL, 0);
7663      case FUTEX_REQUEUE:
7664      case FUTEX_CMP_REQUEUE:
7665      case FUTEX_WAKE_OP:
7666          /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7667             TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7668             But the prototype takes a `struct timespec *'; insert casts
7669             to satisfy the compiler.  We do not need to tswap TIMEOUT
7670             since it's not compared to guest memory.  */
7671          pts = (struct timespec *)(uintptr_t) timeout;
7672          return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7673                               (base_op == FUTEX_CMP_REQUEUE
7674                                ? tswap32(val3) : val3));
7675      default:
7676          return -TARGET_ENOSYS;
7677      }
7678  }
7679  #endif
7680  
7681  #if defined(TARGET_NR_futex_time64)
7682  static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7683                             int val, target_ulong timeout,
7684                             target_ulong uaddr2, int val3)
7685  {
7686      struct timespec ts, *pts;
7687      int base_op;
7688  
7689      /* ??? We assume FUTEX_* constants are the same on both host
7690         and target.  */
7691  #ifdef FUTEX_CMD_MASK
7692      base_op = op & FUTEX_CMD_MASK;
7693  #else
7694      base_op = op;
7695  #endif
7696      switch (base_op) {
7697      case FUTEX_WAIT:
7698      case FUTEX_WAIT_BITSET:
7699          if (timeout) {
7700              pts = &ts;
7701              if (target_to_host_timespec64(pts, timeout)) {
7702                  return -TARGET_EFAULT;
7703              }
7704          } else {
7705              pts = NULL;
7706          }
7707          return do_safe_futex(g2h(cpu, uaddr), op,
7708                               tswap32(val), pts, NULL, val3);
7709      case FUTEX_WAKE:
7710          return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7711      case FUTEX_FD:
7712          return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7713      case FUTEX_REQUEUE:
7714      case FUTEX_CMP_REQUEUE:
7715      case FUTEX_WAKE_OP:
7716          /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7717             TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7718             But the prototype takes a `struct timespec *'; insert casts
7719             to satisfy the compiler.  We do not need to tswap TIMEOUT
7720             since it's not compared to guest memory.  */
7721          pts = (struct timespec *)(uintptr_t) timeout;
7722          return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7723                               (base_op == FUTEX_CMP_REQUEUE
7724                                ? tswap32(val3) : val3));
7725      default:
7726          return -TARGET_ENOSYS;
7727      }
7728  }
7729  #endif
7730  
7731  #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7732  static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7733                                       abi_long handle, abi_long mount_id,
7734                                       abi_long flags)
7735  {
7736      struct file_handle *target_fh;
7737      struct file_handle *fh;
7738      int mid = 0;
7739      abi_long ret;
7740      char *name;
7741      unsigned int size, total_size;
7742  
7743      if (get_user_s32(size, handle)) {
7744          return -TARGET_EFAULT;
7745      }
7746  
7747      name = lock_user_string(pathname);
7748      if (!name) {
7749          return -TARGET_EFAULT;
7750      }
7751  
7752      total_size = sizeof(struct file_handle) + size;
7753      target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7754      if (!target_fh) {
7755          unlock_user(name, pathname, 0);
7756          return -TARGET_EFAULT;
7757      }
7758  
7759      fh = g_malloc0(total_size);
7760      fh->handle_bytes = size;
7761  
7762      ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7763      unlock_user(name, pathname, 0);
7764  
7765      /* man name_to_handle_at(2):
7766       * Other than the use of the handle_bytes field, the caller should treat
7767       * the file_handle structure as an opaque data type
7768       */
7769  
7770      memcpy(target_fh, fh, total_size);
7771      target_fh->handle_bytes = tswap32(fh->handle_bytes);
7772      target_fh->handle_type = tswap32(fh->handle_type);
7773      g_free(fh);
7774      unlock_user(target_fh, handle, total_size);
7775  
7776      if (put_user_s32(mid, mount_id)) {
7777          return -TARGET_EFAULT;
7778      }
7779  
7780      return ret;
7781  
7782  }
7783  #endif
7784  
7785  #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7786  static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7787                                       abi_long flags)
7788  {
7789      struct file_handle *target_fh;
7790      struct file_handle *fh;
7791      unsigned int size, total_size;
7792      abi_long ret;
7793  
7794      if (get_user_s32(size, handle)) {
7795          return -TARGET_EFAULT;
7796      }
7797  
7798      total_size = sizeof(struct file_handle) + size;
7799      target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7800      if (!target_fh) {
7801          return -TARGET_EFAULT;
7802      }
7803  
7804      fh = g_memdup(target_fh, total_size);
7805      fh->handle_bytes = size;
7806      fh->handle_type = tswap32(target_fh->handle_type);
7807  
7808      ret = get_errno(open_by_handle_at(mount_fd, fh,
7809                      target_to_host_bitmask(flags, fcntl_flags_tbl)));
7810  
7811      g_free(fh);
7812  
7813      unlock_user(target_fh, handle, total_size);
7814  
7815      return ret;
7816  }
7817  #endif
7818  
7819  #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7820  
7821  static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7822  {
7823      int host_flags;
7824      target_sigset_t *target_mask;
7825      sigset_t host_mask;
7826      abi_long ret;
7827  
7828      if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7829          return -TARGET_EINVAL;
7830      }
7831      if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7832          return -TARGET_EFAULT;
7833      }
7834  
7835      target_to_host_sigset(&host_mask, target_mask);
7836  
7837      host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7838  
7839      ret = get_errno(signalfd(fd, &host_mask, host_flags));
7840      if (ret >= 0) {
7841          fd_trans_register(ret, &target_signalfd_trans);
7842      }
7843  
7844      unlock_user_struct(target_mask, mask, 0);
7845  
7846      return ret;
7847  }
7848  #endif
7849  
7850  /* Map host to target signal numbers for the wait family of syscalls.
7851     Assume all other status bits are the same.  */
7852  int host_to_target_waitstatus(int status)
7853  {
7854      if (WIFSIGNALED(status)) {
7855          return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7856      }
7857      if (WIFSTOPPED(status)) {
7858          return (host_to_target_signal(WSTOPSIG(status)) << 8)
7859                 | (status & 0xff);
7860      }
7861      return status;
7862  }
7863  
7864  static int open_self_cmdline(void *cpu_env, int fd)
7865  {
7866      CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7867      struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7868      int i;
7869  
7870      for (i = 0; i < bprm->argc; i++) {
7871          size_t len = strlen(bprm->argv[i]) + 1;
7872  
7873          if (write(fd, bprm->argv[i], len) != len) {
7874              return -1;
7875          }
7876      }
7877  
7878      return 0;
7879  }
7880  
7881  static int open_self_maps(void *cpu_env, int fd)
7882  {
7883      CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7884      TaskState *ts = cpu->opaque;
7885      GSList *map_info = read_self_maps();
7886      GSList *s;
7887      int count;
7888  
7889      for (s = map_info; s; s = g_slist_next(s)) {
7890          MapInfo *e = (MapInfo *) s->data;
7891  
7892          if (h2g_valid(e->start)) {
7893              unsigned long min = e->start;
7894              unsigned long max = e->end;
7895              int flags = page_get_flags(h2g(min));
7896              const char *path;
7897  
7898              max = h2g_valid(max - 1) ?
7899                  max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7900  
7901              if (page_check_range(h2g(min), max - min, flags) == -1) {
7902                  continue;
7903              }
7904  
7905              if (h2g(min) == ts->info->stack_limit) {
7906                  path = "[stack]";
7907              } else {
7908                  path = e->path;
7909              }
7910  
7911              count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7912                              " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7913                              h2g(min), h2g(max - 1) + 1,
7914                              (flags & PAGE_READ) ? 'r' : '-',
7915                              (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7916                              (flags & PAGE_EXEC) ? 'x' : '-',
7917                              e->is_priv ? 'p' : '-',
7918                              (uint64_t) e->offset, e->dev, e->inode);
7919              if (path) {
7920                  dprintf(fd, "%*s%s\n", 73 - count, "", path);
7921              } else {
7922                  dprintf(fd, "\n");
7923              }
7924          }
7925      }
7926  
7927      free_self_maps(map_info);
7928  
7929  #ifdef TARGET_VSYSCALL_PAGE
7930      /*
7931       * We only support execution from the vsyscall page.
7932       * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7933       */
7934      count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7935                      " --xp 00000000 00:00 0",
7936                      TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7937      dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7938  #endif
7939  
7940      return 0;
7941  }
7942  
7943  static int open_self_stat(void *cpu_env, int fd)
7944  {
7945      CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7946      TaskState *ts = cpu->opaque;
7947      g_autoptr(GString) buf = g_string_new(NULL);
7948      int i;
7949  
7950      for (i = 0; i < 44; i++) {
7951          if (i == 0) {
7952              /* pid */
7953              g_string_printf(buf, FMT_pid " ", getpid());
7954          } else if (i == 1) {
7955              /* app name */
7956              gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7957              bin = bin ? bin + 1 : ts->bprm->argv[0];
7958              g_string_printf(buf, "(%.15s) ", bin);
7959          } else if (i == 3) {
7960              /* ppid */
7961              g_string_printf(buf, FMT_pid " ", getppid());
7962          } else if (i == 27) {
7963              /* stack bottom */
7964              g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7965          } else {
7966              /* for the rest, there is MasterCard */
7967              g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7968          }
7969  
7970          if (write(fd, buf->str, buf->len) != buf->len) {
7971              return -1;
7972          }
7973      }
7974  
7975      return 0;
7976  }
7977  
7978  static int open_self_auxv(void *cpu_env, int fd)
7979  {
7980      CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7981      TaskState *ts = cpu->opaque;
7982      abi_ulong auxv = ts->info->saved_auxv;
7983      abi_ulong len = ts->info->auxv_len;
7984      char *ptr;
7985  
7986      /*
7987       * Auxiliary vector is stored in target process stack.
7988       * read in whole auxv vector and copy it to file
7989       */
7990      ptr = lock_user(VERIFY_READ, auxv, len, 0);
7991      if (ptr != NULL) {
7992          while (len > 0) {
7993              ssize_t r;
7994              r = write(fd, ptr, len);
7995              if (r <= 0) {
7996                  break;
7997              }
7998              len -= r;
7999              ptr += r;
8000          }
8001          lseek(fd, 0, SEEK_SET);
8002          unlock_user(ptr, auxv, len);
8003      }
8004  
8005      return 0;
8006  }
8007  
8008  static int is_proc_myself(const char *filename, const char *entry)
8009  {
8010      if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8011          filename += strlen("/proc/");
8012          if (!strncmp(filename, "self/", strlen("self/"))) {
8013              filename += strlen("self/");
8014          } else if (*filename >= '1' && *filename <= '9') {
8015              char myself[80];
8016              snprintf(myself, sizeof(myself), "%d/", getpid());
8017              if (!strncmp(filename, myself, strlen(myself))) {
8018                  filename += strlen(myself);
8019              } else {
8020                  return 0;
8021              }
8022          } else {
8023              return 0;
8024          }
8025          if (!strcmp(filename, entry)) {
8026              return 1;
8027          }
8028      }
8029      return 0;
8030  }
8031  
8032  #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8033      defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8034  static int is_proc(const char *filename, const char *entry)
8035  {
8036      return strcmp(filename, entry) == 0;
8037  }
8038  #endif
8039  
8040  #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8041  static int open_net_route(void *cpu_env, int fd)
8042  {
8043      FILE *fp;
8044      char *line = NULL;
8045      size_t len = 0;
8046      ssize_t read;
8047  
8048      fp = fopen("/proc/net/route", "r");
8049      if (fp == NULL) {
8050          return -1;
8051      }
8052  
8053      /* read header */
8054  
8055      read = getline(&line, &len, fp);
8056      dprintf(fd, "%s", line);
8057  
8058      /* read routes */
8059  
8060      while ((read = getline(&line, &len, fp)) != -1) {
8061          char iface[16];
8062          uint32_t dest, gw, mask;
8063          unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8064          int fields;
8065  
8066          fields = sscanf(line,
8067                          "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8068                          iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8069                          &mask, &mtu, &window, &irtt);
8070          if (fields != 11) {
8071              continue;
8072          }
8073          dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8074                  iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8075                  metric, tswap32(mask), mtu, window, irtt);
8076      }
8077  
8078      free(line);
8079      fclose(fp);
8080  
8081      return 0;
8082  }
8083  #endif
8084  
8085  #if defined(TARGET_SPARC)
8086  static int open_cpuinfo(void *cpu_env, int fd)
8087  {
8088      dprintf(fd, "type\t\t: sun4u\n");
8089      return 0;
8090  }
8091  #endif
8092  
8093  #if defined(TARGET_HPPA)
8094  static int open_cpuinfo(void *cpu_env, int fd)
8095  {
8096      dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8097      dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8098      dprintf(fd, "capabilities\t: os32\n");
8099      dprintf(fd, "model\t\t: 9000/778/B160L\n");
8100      dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8101      return 0;
8102  }
8103  #endif
8104  
8105  #if defined(TARGET_M68K)
8106  static int open_hardware(void *cpu_env, int fd)
8107  {
8108      dprintf(fd, "Model:\t\tqemu-m68k\n");
8109      return 0;
8110  }
8111  #endif
8112  
8113  static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8114  {
8115      struct fake_open {
8116          const char *filename;
8117          int (*fill)(void *cpu_env, int fd);
8118          int (*cmp)(const char *s1, const char *s2);
8119      };
8120      const struct fake_open *fake_open;
8121      static const struct fake_open fakes[] = {
8122          { "maps", open_self_maps, is_proc_myself },
8123          { "stat", open_self_stat, is_proc_myself },
8124          { "auxv", open_self_auxv, is_proc_myself },
8125          { "cmdline", open_self_cmdline, is_proc_myself },
8126  #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8127          { "/proc/net/route", open_net_route, is_proc },
8128  #endif
8129  #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8130          { "/proc/cpuinfo", open_cpuinfo, is_proc },
8131  #endif
8132  #if defined(TARGET_M68K)
8133          { "/proc/hardware", open_hardware, is_proc },
8134  #endif
8135          { NULL, NULL, NULL }
8136      };
8137  
8138      if (is_proc_myself(pathname, "exe")) {
8139          int execfd = qemu_getauxval(AT_EXECFD);
8140          return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8141      }
8142  
8143      for (fake_open = fakes; fake_open->filename; fake_open++) {
8144          if (fake_open->cmp(pathname, fake_open->filename)) {
8145              break;
8146          }
8147      }
8148  
8149      if (fake_open->filename) {
8150          const char *tmpdir;
8151          char filename[PATH_MAX];
8152          int fd, r;
8153  
8154          /* create temporary file to map stat to */
8155          tmpdir = getenv("TMPDIR");
8156          if (!tmpdir)
8157              tmpdir = "/tmp";
8158          snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8159          fd = mkstemp(filename);
8160          if (fd < 0) {
8161              return fd;
8162          }
8163          unlink(filename);
8164  
8165          if ((r = fake_open->fill(cpu_env, fd))) {
8166              int e = errno;
8167              close(fd);
8168              errno = e;
8169              return r;
8170          }
8171          lseek(fd, 0, SEEK_SET);
8172  
8173          return fd;
8174      }
8175  
8176      return safe_openat(dirfd, path(pathname), flags, mode);
8177  }
8178  
8179  #define TIMER_MAGIC 0x0caf0000
8180  #define TIMER_MAGIC_MASK 0xffff0000
8181  
8182  /* Convert QEMU provided timer ID back to internal 16bit index format */
8183  static target_timer_t get_timer_id(abi_long arg)
8184  {
8185      target_timer_t timerid = arg;
8186  
8187      if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8188          return -TARGET_EINVAL;
8189      }
8190  
8191      timerid &= 0xffff;
8192  
8193      if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8194          return -TARGET_EINVAL;
8195      }
8196  
8197      return timerid;
8198  }
8199  
8200  static int target_to_host_cpu_mask(unsigned long *host_mask,
8201                                     size_t host_size,
8202                                     abi_ulong target_addr,
8203                                     size_t target_size)
8204  {
8205      unsigned target_bits = sizeof(abi_ulong) * 8;
8206      unsigned host_bits = sizeof(*host_mask) * 8;
8207      abi_ulong *target_mask;
8208      unsigned i, j;
8209  
8210      assert(host_size >= target_size);
8211  
8212      target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8213      if (!target_mask) {
8214          return -TARGET_EFAULT;
8215      }
8216      memset(host_mask, 0, host_size);
8217  
8218      for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8219          unsigned bit = i * target_bits;
8220          abi_ulong val;
8221  
8222          __get_user(val, &target_mask[i]);
8223          for (j = 0; j < target_bits; j++, bit++) {
8224              if (val & (1UL << j)) {
8225                  host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8226              }
8227          }
8228      }
8229  
8230      unlock_user(target_mask, target_addr, 0);
8231      return 0;
8232  }
8233  
8234  static int host_to_target_cpu_mask(const unsigned long *host_mask,
8235                                     size_t host_size,
8236                                     abi_ulong target_addr,
8237                                     size_t target_size)
8238  {
8239      unsigned target_bits = sizeof(abi_ulong) * 8;
8240      unsigned host_bits = sizeof(*host_mask) * 8;
8241      abi_ulong *target_mask;
8242      unsigned i, j;
8243  
8244      assert(host_size >= target_size);
8245  
8246      target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8247      if (!target_mask) {
8248          return -TARGET_EFAULT;
8249      }
8250  
8251      for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8252          unsigned bit = i * target_bits;
8253          abi_ulong val = 0;
8254  
8255          for (j = 0; j < target_bits; j++, bit++) {
8256              if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8257                  val |= 1UL << j;
8258              }
8259          }
8260          __put_user(val, &target_mask[i]);
8261      }
8262  
8263      unlock_user(target_mask, target_addr, target_size);
8264      return 0;
8265  }
8266  
8267  #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8268  _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8269  #endif
8270  
8271  /* This is an internal helper for do_syscall so that it is easier
8272   * to have a single return point, so that actions, such as logging
8273   * of syscall results, can be performed.
8274   * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8275   */
8276  static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8277                              abi_long arg2, abi_long arg3, abi_long arg4,
8278                              abi_long arg5, abi_long arg6, abi_long arg7,
8279                              abi_long arg8)
8280  {
8281      CPUState *cpu = env_cpu(cpu_env);
8282      abi_long ret;
8283  #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8284      || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8285      || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8286      || defined(TARGET_NR_statx)
8287      struct stat st;
8288  #endif
8289  #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8290      || defined(TARGET_NR_fstatfs)
8291      struct statfs stfs;
8292  #endif
8293      void *p;
8294  
8295      switch(num) {
8296      case TARGET_NR_exit:
8297          /* In old applications this may be used to implement _exit(2).
8298             However in threaded applications it is used for thread termination,
8299             and _exit_group is used for application termination.
8300             Do thread termination if we have more then one thread.  */
8301  
8302          if (block_signals()) {
8303              return -TARGET_ERESTARTSYS;
8304          }
8305  
8306          pthread_mutex_lock(&clone_lock);
8307  
8308          if (CPU_NEXT(first_cpu)) {
8309              TaskState *ts = cpu->opaque;
8310  
8311              object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8312              object_unref(OBJECT(cpu));
8313              /*
8314               * At this point the CPU should be unrealized and removed
8315               * from cpu lists. We can clean-up the rest of the thread
8316               * data without the lock held.
8317               */
8318  
8319              pthread_mutex_unlock(&clone_lock);
8320  
8321              if (ts->child_tidptr) {
8322                  put_user_u32(0, ts->child_tidptr);
8323                  do_sys_futex(g2h(cpu, ts->child_tidptr),
8324                               FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8325              }
8326              thread_cpu = NULL;
8327              g_free(ts);
8328              rcu_unregister_thread();
8329              pthread_exit(NULL);
8330          }
8331  
8332          pthread_mutex_unlock(&clone_lock);
8333          preexit_cleanup(cpu_env, arg1);
8334          _exit(arg1);
8335          return 0; /* avoid warning */
8336      case TARGET_NR_read:
8337          if (arg2 == 0 && arg3 == 0) {
8338              return get_errno(safe_read(arg1, 0, 0));
8339          } else {
8340              if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8341                  return -TARGET_EFAULT;
8342              ret = get_errno(safe_read(arg1, p, arg3));
8343              if (ret >= 0 &&
8344                  fd_trans_host_to_target_data(arg1)) {
8345                  ret = fd_trans_host_to_target_data(arg1)(p, ret);
8346              }
8347              unlock_user(p, arg2, ret);
8348          }
8349          return ret;
8350      case TARGET_NR_write:
8351          if (arg2 == 0 && arg3 == 0) {
8352              return get_errno(safe_write(arg1, 0, 0));
8353          }
8354          if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8355              return -TARGET_EFAULT;
8356          if (fd_trans_target_to_host_data(arg1)) {
8357              void *copy = g_malloc(arg3);
8358              memcpy(copy, p, arg3);
8359              ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8360              if (ret >= 0) {
8361                  ret = get_errno(safe_write(arg1, copy, ret));
8362              }
8363              g_free(copy);
8364          } else {
8365              ret = get_errno(safe_write(arg1, p, arg3));
8366          }
8367          unlock_user(p, arg2, 0);
8368          return ret;
8369  
8370  #ifdef TARGET_NR_open
8371      case TARGET_NR_open:
8372          if (!(p = lock_user_string(arg1)))
8373              return -TARGET_EFAULT;
8374          ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8375                                    target_to_host_bitmask(arg2, fcntl_flags_tbl),
8376                                    arg3));
8377          fd_trans_unregister(ret);
8378          unlock_user(p, arg1, 0);
8379          return ret;
8380  #endif
8381      case TARGET_NR_openat:
8382          if (!(p = lock_user_string(arg2)))
8383              return -TARGET_EFAULT;
8384          ret = get_errno(do_openat(cpu_env, arg1, p,
8385                                    target_to_host_bitmask(arg3, fcntl_flags_tbl),
8386                                    arg4));
8387          fd_trans_unregister(ret);
8388          unlock_user(p, arg2, 0);
8389          return ret;
8390  #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8391      case TARGET_NR_name_to_handle_at:
8392          ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8393          return ret;
8394  #endif
8395  #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8396      case TARGET_NR_open_by_handle_at:
8397          ret = do_open_by_handle_at(arg1, arg2, arg3);
8398          fd_trans_unregister(ret);
8399          return ret;
8400  #endif
8401      case TARGET_NR_close:
8402          fd_trans_unregister(arg1);
8403          return get_errno(close(arg1));
8404  
8405      case TARGET_NR_brk:
8406          return do_brk(arg1);
8407  #ifdef TARGET_NR_fork
8408      case TARGET_NR_fork:
8409          return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8410  #endif
8411  #ifdef TARGET_NR_waitpid
8412      case TARGET_NR_waitpid:
8413          {
8414              int status;
8415              ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8416              if (!is_error(ret) && arg2 && ret
8417                  && put_user_s32(host_to_target_waitstatus(status), arg2))
8418                  return -TARGET_EFAULT;
8419          }
8420          return ret;
8421  #endif
8422  #ifdef TARGET_NR_waitid
8423      case TARGET_NR_waitid:
8424          {
8425              siginfo_t info;
8426              info.si_pid = 0;
8427              ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8428              if (!is_error(ret) && arg3 && info.si_pid != 0) {
8429                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8430                      return -TARGET_EFAULT;
8431                  host_to_target_siginfo(p, &info);
8432                  unlock_user(p, arg3, sizeof(target_siginfo_t));
8433              }
8434          }
8435          return ret;
8436  #endif
8437  #ifdef TARGET_NR_creat /* not on alpha */
8438      case TARGET_NR_creat:
8439          if (!(p = lock_user_string(arg1)))
8440              return -TARGET_EFAULT;
8441          ret = get_errno(creat(p, arg2));
8442          fd_trans_unregister(ret);
8443          unlock_user(p, arg1, 0);
8444          return ret;
8445  #endif
8446  #ifdef TARGET_NR_link
8447      case TARGET_NR_link:
8448          {
8449              void * p2;
8450              p = lock_user_string(arg1);
8451              p2 = lock_user_string(arg2);
8452              if (!p || !p2)
8453                  ret = -TARGET_EFAULT;
8454              else
8455                  ret = get_errno(link(p, p2));
8456              unlock_user(p2, arg2, 0);
8457              unlock_user(p, arg1, 0);
8458          }
8459          return ret;
8460  #endif
8461  #if defined(TARGET_NR_linkat)
8462      case TARGET_NR_linkat:
8463          {
8464              void * p2 = NULL;
8465              if (!arg2 || !arg4)
8466                  return -TARGET_EFAULT;
8467              p  = lock_user_string(arg2);
8468              p2 = lock_user_string(arg4);
8469              if (!p || !p2)
8470                  ret = -TARGET_EFAULT;
8471              else
8472                  ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8473              unlock_user(p, arg2, 0);
8474              unlock_user(p2, arg4, 0);
8475          }
8476          return ret;
8477  #endif
8478  #ifdef TARGET_NR_unlink
8479      case TARGET_NR_unlink:
8480          if (!(p = lock_user_string(arg1)))
8481              return -TARGET_EFAULT;
8482          ret = get_errno(unlink(p));
8483          unlock_user(p, arg1, 0);
8484          return ret;
8485  #endif
8486  #if defined(TARGET_NR_unlinkat)
8487      case TARGET_NR_unlinkat:
8488          if (!(p = lock_user_string(arg2)))
8489              return -TARGET_EFAULT;
8490          ret = get_errno(unlinkat(arg1, p, arg3));
8491          unlock_user(p, arg2, 0);
8492          return ret;
8493  #endif
8494      case TARGET_NR_execve:
8495          {
8496              char **argp, **envp;
8497              int argc, envc;
8498              abi_ulong gp;
8499              abi_ulong guest_argp;
8500              abi_ulong guest_envp;
8501              abi_ulong addr;
8502              char **q;
8503              int total_size = 0;
8504  
8505              argc = 0;
8506              guest_argp = arg2;
8507              for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8508                  if (get_user_ual(addr, gp))
8509                      return -TARGET_EFAULT;
8510                  if (!addr)
8511                      break;
8512                  argc++;
8513              }
8514              envc = 0;
8515              guest_envp = arg3;
8516              for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8517                  if (get_user_ual(addr, gp))
8518                      return -TARGET_EFAULT;
8519                  if (!addr)
8520                      break;
8521                  envc++;
8522              }
8523  
8524              argp = g_new0(char *, argc + 1);
8525              envp = g_new0(char *, envc + 1);
8526  
8527              for (gp = guest_argp, q = argp; gp;
8528                    gp += sizeof(abi_ulong), q++) {
8529                  if (get_user_ual(addr, gp))
8530                      goto execve_efault;
8531                  if (!addr)
8532                      break;
8533                  if (!(*q = lock_user_string(addr)))
8534                      goto execve_efault;
8535                  total_size += strlen(*q) + 1;
8536              }
8537              *q = NULL;
8538  
8539              for (gp = guest_envp, q = envp; gp;
8540                    gp += sizeof(abi_ulong), q++) {
8541                  if (get_user_ual(addr, gp))
8542                      goto execve_efault;
8543                  if (!addr)
8544                      break;
8545                  if (!(*q = lock_user_string(addr)))
8546                      goto execve_efault;
8547                  total_size += strlen(*q) + 1;
8548              }
8549              *q = NULL;
8550  
8551              if (!(p = lock_user_string(arg1)))
8552                  goto execve_efault;
8553              /* Although execve() is not an interruptible syscall it is
8554               * a special case where we must use the safe_syscall wrapper:
8555               * if we allow a signal to happen before we make the host
8556               * syscall then we will 'lose' it, because at the point of
8557               * execve the process leaves QEMU's control. So we use the
8558               * safe syscall wrapper to ensure that we either take the
8559               * signal as a guest signal, or else it does not happen
8560               * before the execve completes and makes it the other
8561               * program's problem.
8562               */
8563              ret = get_errno(safe_execve(p, argp, envp));
8564              unlock_user(p, arg1, 0);
8565  
8566              goto execve_end;
8567  
8568          execve_efault:
8569              ret = -TARGET_EFAULT;
8570  
8571          execve_end:
8572              for (gp = guest_argp, q = argp; *q;
8573                    gp += sizeof(abi_ulong), q++) {
8574                  if (get_user_ual(addr, gp)
8575                      || !addr)
8576                      break;
8577                  unlock_user(*q, addr, 0);
8578              }
8579              for (gp = guest_envp, q = envp; *q;
8580                    gp += sizeof(abi_ulong), q++) {
8581                  if (get_user_ual(addr, gp)
8582                      || !addr)
8583                      break;
8584                  unlock_user(*q, addr, 0);
8585              }
8586  
8587              g_free(argp);
8588              g_free(envp);
8589          }
8590          return ret;
8591      case TARGET_NR_chdir:
8592          if (!(p = lock_user_string(arg1)))
8593              return -TARGET_EFAULT;
8594          ret = get_errno(chdir(p));
8595          unlock_user(p, arg1, 0);
8596          return ret;
8597  #ifdef TARGET_NR_time
8598      case TARGET_NR_time:
8599          {
8600              time_t host_time;
8601              ret = get_errno(time(&host_time));
8602              if (!is_error(ret)
8603                  && arg1
8604                  && put_user_sal(host_time, arg1))
8605                  return -TARGET_EFAULT;
8606          }
8607          return ret;
8608  #endif
8609  #ifdef TARGET_NR_mknod
8610      case TARGET_NR_mknod:
8611          if (!(p = lock_user_string(arg1)))
8612              return -TARGET_EFAULT;
8613          ret = get_errno(mknod(p, arg2, arg3));
8614          unlock_user(p, arg1, 0);
8615          return ret;
8616  #endif
8617  #if defined(TARGET_NR_mknodat)
8618      case TARGET_NR_mknodat:
8619          if (!(p = lock_user_string(arg2)))
8620              return -TARGET_EFAULT;
8621          ret = get_errno(mknodat(arg1, p, arg3, arg4));
8622          unlock_user(p, arg2, 0);
8623          return ret;
8624  #endif
8625  #ifdef TARGET_NR_chmod
8626      case TARGET_NR_chmod:
8627          if (!(p = lock_user_string(arg1)))
8628              return -TARGET_EFAULT;
8629          ret = get_errno(chmod(p, arg2));
8630          unlock_user(p, arg1, 0);
8631          return ret;
8632  #endif
8633  #ifdef TARGET_NR_lseek
8634      case TARGET_NR_lseek:
8635          return get_errno(lseek(arg1, arg2, arg3));
8636  #endif
8637  #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8638      /* Alpha specific */
8639      case TARGET_NR_getxpid:
8640          ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8641          return get_errno(getpid());
8642  #endif
8643  #ifdef TARGET_NR_getpid
8644      case TARGET_NR_getpid:
8645          return get_errno(getpid());
8646  #endif
8647      case TARGET_NR_mount:
8648          {
8649              /* need to look at the data field */
8650              void *p2, *p3;
8651  
8652              if (arg1) {
8653                  p = lock_user_string(arg1);
8654                  if (!p) {
8655                      return -TARGET_EFAULT;
8656                  }
8657              } else {
8658                  p = NULL;
8659              }
8660  
8661              p2 = lock_user_string(arg2);
8662              if (!p2) {
8663                  if (arg1) {
8664                      unlock_user(p, arg1, 0);
8665                  }
8666                  return -TARGET_EFAULT;
8667              }
8668  
8669              if (arg3) {
8670                  p3 = lock_user_string(arg3);
8671                  if (!p3) {
8672                      if (arg1) {
8673                          unlock_user(p, arg1, 0);
8674                      }
8675                      unlock_user(p2, arg2, 0);
8676                      return -TARGET_EFAULT;
8677                  }
8678              } else {
8679                  p3 = NULL;
8680              }
8681  
8682              /* FIXME - arg5 should be locked, but it isn't clear how to
8683               * do that since it's not guaranteed to be a NULL-terminated
8684               * string.
8685               */
8686              if (!arg5) {
8687                  ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8688              } else {
8689                  ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8690              }
8691              ret = get_errno(ret);
8692  
8693              if (arg1) {
8694                  unlock_user(p, arg1, 0);
8695              }
8696              unlock_user(p2, arg2, 0);
8697              if (arg3) {
8698                  unlock_user(p3, arg3, 0);
8699              }
8700          }
8701          return ret;
8702  #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8703  #if defined(TARGET_NR_umount)
8704      case TARGET_NR_umount:
8705  #endif
8706  #if defined(TARGET_NR_oldumount)
8707      case TARGET_NR_oldumount:
8708  #endif
8709          if (!(p = lock_user_string(arg1)))
8710              return -TARGET_EFAULT;
8711          ret = get_errno(umount(p));
8712          unlock_user(p, arg1, 0);
8713          return ret;
8714  #endif
8715  #ifdef TARGET_NR_stime /* not on alpha */
8716      case TARGET_NR_stime:
8717          {
8718              struct timespec ts;
8719              ts.tv_nsec = 0;
8720              if (get_user_sal(ts.tv_sec, arg1)) {
8721                  return -TARGET_EFAULT;
8722              }
8723              return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8724          }
8725  #endif
8726  #ifdef TARGET_NR_alarm /* not on alpha */
8727      case TARGET_NR_alarm:
8728          return alarm(arg1);
8729  #endif
8730  #ifdef TARGET_NR_pause /* not on alpha */
8731      case TARGET_NR_pause:
8732          if (!block_signals()) {
8733              sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8734          }
8735          return -TARGET_EINTR;
8736  #endif
8737  #ifdef TARGET_NR_utime
8738      case TARGET_NR_utime:
8739          {
8740              struct utimbuf tbuf, *host_tbuf;
8741              struct target_utimbuf *target_tbuf;
8742              if (arg2) {
8743                  if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8744                      return -TARGET_EFAULT;
8745                  tbuf.actime = tswapal(target_tbuf->actime);
8746                  tbuf.modtime = tswapal(target_tbuf->modtime);
8747                  unlock_user_struct(target_tbuf, arg2, 0);
8748                  host_tbuf = &tbuf;
8749              } else {
8750                  host_tbuf = NULL;
8751              }
8752              if (!(p = lock_user_string(arg1)))
8753                  return -TARGET_EFAULT;
8754              ret = get_errno(utime(p, host_tbuf));
8755              unlock_user(p, arg1, 0);
8756          }
8757          return ret;
8758  #endif
8759  #ifdef TARGET_NR_utimes
8760      case TARGET_NR_utimes:
8761          {
8762              struct timeval *tvp, tv[2];
8763              if (arg2) {
8764                  if (copy_from_user_timeval(&tv[0], arg2)
8765                      || copy_from_user_timeval(&tv[1],
8766                                                arg2 + sizeof(struct target_timeval)))
8767                      return -TARGET_EFAULT;
8768                  tvp = tv;
8769              } else {
8770                  tvp = NULL;
8771              }
8772              if (!(p = lock_user_string(arg1)))
8773                  return -TARGET_EFAULT;
8774              ret = get_errno(utimes(p, tvp));
8775              unlock_user(p, arg1, 0);
8776          }
8777          return ret;
8778  #endif
8779  #if defined(TARGET_NR_futimesat)
8780      case TARGET_NR_futimesat:
8781          {
8782              struct timeval *tvp, tv[2];
8783              if (arg3) {
8784                  if (copy_from_user_timeval(&tv[0], arg3)
8785                      || copy_from_user_timeval(&tv[1],
8786                                                arg3 + sizeof(struct target_timeval)))
8787                      return -TARGET_EFAULT;
8788                  tvp = tv;
8789              } else {
8790                  tvp = NULL;
8791              }
8792              if (!(p = lock_user_string(arg2))) {
8793                  return -TARGET_EFAULT;
8794              }
8795              ret = get_errno(futimesat(arg1, path(p), tvp));
8796              unlock_user(p, arg2, 0);
8797          }
8798          return ret;
8799  #endif
8800  #ifdef TARGET_NR_access
8801      case TARGET_NR_access:
8802          if (!(p = lock_user_string(arg1))) {
8803              return -TARGET_EFAULT;
8804          }
8805          ret = get_errno(access(path(p), arg2));
8806          unlock_user(p, arg1, 0);
8807          return ret;
8808  #endif
8809  #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8810      case TARGET_NR_faccessat:
8811          if (!(p = lock_user_string(arg2))) {
8812              return -TARGET_EFAULT;
8813          }
8814          ret = get_errno(faccessat(arg1, p, arg3, 0));
8815          unlock_user(p, arg2, 0);
8816          return ret;
8817  #endif
8818  #ifdef TARGET_NR_nice /* not on alpha */
8819      case TARGET_NR_nice:
8820          return get_errno(nice(arg1));
8821  #endif
8822      case TARGET_NR_sync:
8823          sync();
8824          return 0;
8825  #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8826      case TARGET_NR_syncfs:
8827          return get_errno(syncfs(arg1));
8828  #endif
8829      case TARGET_NR_kill:
8830          return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8831  #ifdef TARGET_NR_rename
8832      case TARGET_NR_rename:
8833          {
8834              void *p2;
8835              p = lock_user_string(arg1);
8836              p2 = lock_user_string(arg2);
8837              if (!p || !p2)
8838                  ret = -TARGET_EFAULT;
8839              else
8840                  ret = get_errno(rename(p, p2));
8841              unlock_user(p2, arg2, 0);
8842              unlock_user(p, arg1, 0);
8843          }
8844          return ret;
8845  #endif
8846  #if defined(TARGET_NR_renameat)
8847      case TARGET_NR_renameat:
8848          {
8849              void *p2;
8850              p  = lock_user_string(arg2);
8851              p2 = lock_user_string(arg4);
8852              if (!p || !p2)
8853                  ret = -TARGET_EFAULT;
8854              else
8855                  ret = get_errno(renameat(arg1, p, arg3, p2));
8856              unlock_user(p2, arg4, 0);
8857              unlock_user(p, arg2, 0);
8858          }
8859          return ret;
8860  #endif
8861  #if defined(TARGET_NR_renameat2)
8862      case TARGET_NR_renameat2:
8863          {
8864              void *p2;
8865              p  = lock_user_string(arg2);
8866              p2 = lock_user_string(arg4);
8867              if (!p || !p2) {
8868                  ret = -TARGET_EFAULT;
8869              } else {
8870                  ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8871              }
8872              unlock_user(p2, arg4, 0);
8873              unlock_user(p, arg2, 0);
8874          }
8875          return ret;
8876  #endif
8877  #ifdef TARGET_NR_mkdir
8878      case TARGET_NR_mkdir:
8879          if (!(p = lock_user_string(arg1)))
8880              return -TARGET_EFAULT;
8881          ret = get_errno(mkdir(p, arg2));
8882          unlock_user(p, arg1, 0);
8883          return ret;
8884  #endif
8885  #if defined(TARGET_NR_mkdirat)
8886      case TARGET_NR_mkdirat:
8887          if (!(p = lock_user_string(arg2)))
8888              return -TARGET_EFAULT;
8889          ret = get_errno(mkdirat(arg1, p, arg3));
8890          unlock_user(p, arg2, 0);
8891          return ret;
8892  #endif
8893  #ifdef TARGET_NR_rmdir
8894      case TARGET_NR_rmdir:
8895          if (!(p = lock_user_string(arg1)))
8896              return -TARGET_EFAULT;
8897          ret = get_errno(rmdir(p));
8898          unlock_user(p, arg1, 0);
8899          return ret;
8900  #endif
8901      case TARGET_NR_dup:
8902          ret = get_errno(dup(arg1));
8903          if (ret >= 0) {
8904              fd_trans_dup(arg1, ret);
8905          }
8906          return ret;
8907  #ifdef TARGET_NR_pipe
8908      case TARGET_NR_pipe:
8909          return do_pipe(cpu_env, arg1, 0, 0);
8910  #endif
8911  #ifdef TARGET_NR_pipe2
8912      case TARGET_NR_pipe2:
8913          return do_pipe(cpu_env, arg1,
8914                         target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8915  #endif
8916      case TARGET_NR_times:
8917          {
8918              struct target_tms *tmsp;
8919              struct tms tms;
8920              ret = get_errno(times(&tms));
8921              if (arg1) {
8922                  tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8923                  if (!tmsp)
8924                      return -TARGET_EFAULT;
8925                  tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8926                  tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8927                  tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8928                  tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8929              }
8930              if (!is_error(ret))
8931                  ret = host_to_target_clock_t(ret);
8932          }
8933          return ret;
8934      case TARGET_NR_acct:
8935          if (arg1 == 0) {
8936              ret = get_errno(acct(NULL));
8937          } else {
8938              if (!(p = lock_user_string(arg1))) {
8939                  return -TARGET_EFAULT;
8940              }
8941              ret = get_errno(acct(path(p)));
8942              unlock_user(p, arg1, 0);
8943          }
8944          return ret;
8945  #ifdef TARGET_NR_umount2
8946      case TARGET_NR_umount2:
8947          if (!(p = lock_user_string(arg1)))
8948              return -TARGET_EFAULT;
8949          ret = get_errno(umount2(p, arg2));
8950          unlock_user(p, arg1, 0);
8951          return ret;
8952  #endif
8953      case TARGET_NR_ioctl:
8954          return do_ioctl(arg1, arg2, arg3);
8955  #ifdef TARGET_NR_fcntl
8956      case TARGET_NR_fcntl:
8957          return do_fcntl(arg1, arg2, arg3);
8958  #endif
8959      case TARGET_NR_setpgid:
8960          return get_errno(setpgid(arg1, arg2));
8961      case TARGET_NR_umask:
8962          return get_errno(umask(arg1));
8963      case TARGET_NR_chroot:
8964          if (!(p = lock_user_string(arg1)))
8965              return -TARGET_EFAULT;
8966          ret = get_errno(chroot(p));
8967          unlock_user(p, arg1, 0);
8968          return ret;
8969  #ifdef TARGET_NR_dup2
8970      case TARGET_NR_dup2:
8971          ret = get_errno(dup2(arg1, arg2));
8972          if (ret >= 0) {
8973              fd_trans_dup(arg1, arg2);
8974          }
8975          return ret;
8976  #endif
8977  #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8978      case TARGET_NR_dup3:
8979      {
8980          int host_flags;
8981  
8982          if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8983              return -EINVAL;
8984          }
8985          host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8986          ret = get_errno(dup3(arg1, arg2, host_flags));
8987          if (ret >= 0) {
8988              fd_trans_dup(arg1, arg2);
8989          }
8990          return ret;
8991      }
8992  #endif
8993  #ifdef TARGET_NR_getppid /* not on alpha */
8994      case TARGET_NR_getppid:
8995          return get_errno(getppid());
8996  #endif
8997  #ifdef TARGET_NR_getpgrp
8998      case TARGET_NR_getpgrp:
8999          return get_errno(getpgrp());
9000  #endif
9001      case TARGET_NR_setsid:
9002          return get_errno(setsid());
9003  #ifdef TARGET_NR_sigaction
9004      case TARGET_NR_sigaction:
9005          {
9006  #if defined(TARGET_MIPS)
9007  	    struct target_sigaction act, oact, *pact, *old_act;
9008  
9009  	    if (arg2) {
9010                  if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9011                      return -TARGET_EFAULT;
9012  		act._sa_handler = old_act->_sa_handler;
9013  		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9014  		act.sa_flags = old_act->sa_flags;
9015  		unlock_user_struct(old_act, arg2, 0);
9016  		pact = &act;
9017  	    } else {
9018  		pact = NULL;
9019  	    }
9020  
9021          ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9022  
9023  	    if (!is_error(ret) && arg3) {
9024                  if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9025                      return -TARGET_EFAULT;
9026  		old_act->_sa_handler = oact._sa_handler;
9027  		old_act->sa_flags = oact.sa_flags;
9028  		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9029  		old_act->sa_mask.sig[1] = 0;
9030  		old_act->sa_mask.sig[2] = 0;
9031  		old_act->sa_mask.sig[3] = 0;
9032  		unlock_user_struct(old_act, arg3, 1);
9033  	    }
9034  #else
9035              struct target_old_sigaction *old_act;
9036              struct target_sigaction act, oact, *pact;
9037              if (arg2) {
9038                  if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9039                      return -TARGET_EFAULT;
9040                  act._sa_handler = old_act->_sa_handler;
9041                  target_siginitset(&act.sa_mask, old_act->sa_mask);
9042                  act.sa_flags = old_act->sa_flags;
9043  #ifdef TARGET_ARCH_HAS_SA_RESTORER
9044                  act.sa_restorer = old_act->sa_restorer;
9045  #endif
9046                  unlock_user_struct(old_act, arg2, 0);
9047                  pact = &act;
9048              } else {
9049                  pact = NULL;
9050              }
9051              ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9052              if (!is_error(ret) && arg3) {
9053                  if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9054                      return -TARGET_EFAULT;
9055                  old_act->_sa_handler = oact._sa_handler;
9056                  old_act->sa_mask = oact.sa_mask.sig[0];
9057                  old_act->sa_flags = oact.sa_flags;
9058  #ifdef TARGET_ARCH_HAS_SA_RESTORER
9059                  old_act->sa_restorer = oact.sa_restorer;
9060  #endif
9061                  unlock_user_struct(old_act, arg3, 1);
9062              }
9063  #endif
9064          }
9065          return ret;
9066  #endif
9067      case TARGET_NR_rt_sigaction:
9068          {
9069              /*
9070               * For Alpha and SPARC this is a 5 argument syscall, with
9071               * a 'restorer' parameter which must be copied into the
9072               * sa_restorer field of the sigaction struct.
9073               * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9074               * and arg5 is the sigsetsize.
9075               */
9076  #if defined(TARGET_ALPHA)
9077              target_ulong sigsetsize = arg4;
9078              target_ulong restorer = arg5;
9079  #elif defined(TARGET_SPARC)
9080              target_ulong restorer = arg4;
9081              target_ulong sigsetsize = arg5;
9082  #else
9083              target_ulong sigsetsize = arg4;
9084              target_ulong restorer = 0;
9085  #endif
9086              struct target_sigaction *act = NULL;
9087              struct target_sigaction *oact = NULL;
9088  
9089              if (sigsetsize != sizeof(target_sigset_t)) {
9090                  return -TARGET_EINVAL;
9091              }
9092              if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9093                  return -TARGET_EFAULT;
9094              }
9095              if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9096                  ret = -TARGET_EFAULT;
9097              } else {
9098                  ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9099                  if (oact) {
9100                      unlock_user_struct(oact, arg3, 1);
9101                  }
9102              }
9103              if (act) {
9104                  unlock_user_struct(act, arg2, 0);
9105              }
9106          }
9107          return ret;
9108  #ifdef TARGET_NR_sgetmask /* not on alpha */
9109      case TARGET_NR_sgetmask:
9110          {
9111              sigset_t cur_set;
9112              abi_ulong target_set;
9113              ret = do_sigprocmask(0, NULL, &cur_set);
9114              if (!ret) {
9115                  host_to_target_old_sigset(&target_set, &cur_set);
9116                  ret = target_set;
9117              }
9118          }
9119          return ret;
9120  #endif
9121  #ifdef TARGET_NR_ssetmask /* not on alpha */
9122      case TARGET_NR_ssetmask:
9123          {
9124              sigset_t set, oset;
9125              abi_ulong target_set = arg1;
9126              target_to_host_old_sigset(&set, &target_set);
9127              ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9128              if (!ret) {
9129                  host_to_target_old_sigset(&target_set, &oset);
9130                  ret = target_set;
9131              }
9132          }
9133          return ret;
9134  #endif
9135  #ifdef TARGET_NR_sigprocmask
9136      case TARGET_NR_sigprocmask:
9137          {
9138  #if defined(TARGET_ALPHA)
9139              sigset_t set, oldset;
9140              abi_ulong mask;
9141              int how;
9142  
9143              switch (arg1) {
9144              case TARGET_SIG_BLOCK:
9145                  how = SIG_BLOCK;
9146                  break;
9147              case TARGET_SIG_UNBLOCK:
9148                  how = SIG_UNBLOCK;
9149                  break;
9150              case TARGET_SIG_SETMASK:
9151                  how = SIG_SETMASK;
9152                  break;
9153              default:
9154                  return -TARGET_EINVAL;
9155              }
9156              mask = arg2;
9157              target_to_host_old_sigset(&set, &mask);
9158  
9159              ret = do_sigprocmask(how, &set, &oldset);
9160              if (!is_error(ret)) {
9161                  host_to_target_old_sigset(&mask, &oldset);
9162                  ret = mask;
9163                  ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9164              }
9165  #else
9166              sigset_t set, oldset, *set_ptr;
9167              int how;
9168  
9169              if (arg2) {
9170                  switch (arg1) {
9171                  case TARGET_SIG_BLOCK:
9172                      how = SIG_BLOCK;
9173                      break;
9174                  case TARGET_SIG_UNBLOCK:
9175                      how = SIG_UNBLOCK;
9176                      break;
9177                  case TARGET_SIG_SETMASK:
9178                      how = SIG_SETMASK;
9179                      break;
9180                  default:
9181                      return -TARGET_EINVAL;
9182                  }
9183                  if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9184                      return -TARGET_EFAULT;
9185                  target_to_host_old_sigset(&set, p);
9186                  unlock_user(p, arg2, 0);
9187                  set_ptr = &set;
9188              } else {
9189                  how = 0;
9190                  set_ptr = NULL;
9191              }
9192              ret = do_sigprocmask(how, set_ptr, &oldset);
9193              if (!is_error(ret) && arg3) {
9194                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9195                      return -TARGET_EFAULT;
9196                  host_to_target_old_sigset(p, &oldset);
9197                  unlock_user(p, arg3, sizeof(target_sigset_t));
9198              }
9199  #endif
9200          }
9201          return ret;
9202  #endif
9203      case TARGET_NR_rt_sigprocmask:
9204          {
9205              int how = arg1;
9206              sigset_t set, oldset, *set_ptr;
9207  
9208              if (arg4 != sizeof(target_sigset_t)) {
9209                  return -TARGET_EINVAL;
9210              }
9211  
9212              if (arg2) {
9213                  switch(how) {
9214                  case TARGET_SIG_BLOCK:
9215                      how = SIG_BLOCK;
9216                      break;
9217                  case TARGET_SIG_UNBLOCK:
9218                      how = SIG_UNBLOCK;
9219                      break;
9220                  case TARGET_SIG_SETMASK:
9221                      how = SIG_SETMASK;
9222                      break;
9223                  default:
9224                      return -TARGET_EINVAL;
9225                  }
9226                  if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9227                      return -TARGET_EFAULT;
9228                  target_to_host_sigset(&set, p);
9229                  unlock_user(p, arg2, 0);
9230                  set_ptr = &set;
9231              } else {
9232                  how = 0;
9233                  set_ptr = NULL;
9234              }
9235              ret = do_sigprocmask(how, set_ptr, &oldset);
9236              if (!is_error(ret) && arg3) {
9237                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9238                      return -TARGET_EFAULT;
9239                  host_to_target_sigset(p, &oldset);
9240                  unlock_user(p, arg3, sizeof(target_sigset_t));
9241              }
9242          }
9243          return ret;
9244  #ifdef TARGET_NR_sigpending
9245      case TARGET_NR_sigpending:
9246          {
9247              sigset_t set;
9248              ret = get_errno(sigpending(&set));
9249              if (!is_error(ret)) {
9250                  if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9251                      return -TARGET_EFAULT;
9252                  host_to_target_old_sigset(p, &set);
9253                  unlock_user(p, arg1, sizeof(target_sigset_t));
9254              }
9255          }
9256          return ret;
9257  #endif
9258      case TARGET_NR_rt_sigpending:
9259          {
9260              sigset_t set;
9261  
9262              /* Yes, this check is >, not != like most. We follow the kernel's
9263               * logic and it does it like this because it implements
9264               * NR_sigpending through the same code path, and in that case
9265               * the old_sigset_t is smaller in size.
9266               */
9267              if (arg2 > sizeof(target_sigset_t)) {
9268                  return -TARGET_EINVAL;
9269              }
9270  
9271              ret = get_errno(sigpending(&set));
9272              if (!is_error(ret)) {
9273                  if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9274                      return -TARGET_EFAULT;
9275                  host_to_target_sigset(p, &set);
9276                  unlock_user(p, arg1, sizeof(target_sigset_t));
9277              }
9278          }
9279          return ret;
9280  #ifdef TARGET_NR_sigsuspend
9281      case TARGET_NR_sigsuspend:
9282          {
9283              TaskState *ts = cpu->opaque;
9284  #if defined(TARGET_ALPHA)
9285              abi_ulong mask = arg1;
9286              target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9287  #else
9288              if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9289                  return -TARGET_EFAULT;
9290              target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9291              unlock_user(p, arg1, 0);
9292  #endif
9293              ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9294                                                 SIGSET_T_SIZE));
9295              if (ret != -TARGET_ERESTARTSYS) {
9296                  ts->in_sigsuspend = 1;
9297              }
9298          }
9299          return ret;
9300  #endif
9301      case TARGET_NR_rt_sigsuspend:
9302          {
9303              TaskState *ts = cpu->opaque;
9304  
9305              if (arg2 != sizeof(target_sigset_t)) {
9306                  return -TARGET_EINVAL;
9307              }
9308              if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9309                  return -TARGET_EFAULT;
9310              target_to_host_sigset(&ts->sigsuspend_mask, p);
9311              unlock_user(p, arg1, 0);
9312              ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9313                                                 SIGSET_T_SIZE));
9314              if (ret != -TARGET_ERESTARTSYS) {
9315                  ts->in_sigsuspend = 1;
9316              }
9317          }
9318          return ret;
9319  #ifdef TARGET_NR_rt_sigtimedwait
9320      case TARGET_NR_rt_sigtimedwait:
9321          {
9322              sigset_t set;
9323              struct timespec uts, *puts;
9324              siginfo_t uinfo;
9325  
9326              if (arg4 != sizeof(target_sigset_t)) {
9327                  return -TARGET_EINVAL;
9328              }
9329  
9330              if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9331                  return -TARGET_EFAULT;
9332              target_to_host_sigset(&set, p);
9333              unlock_user(p, arg1, 0);
9334              if (arg3) {
9335                  puts = &uts;
9336                  if (target_to_host_timespec(puts, arg3)) {
9337                      return -TARGET_EFAULT;
9338                  }
9339              } else {
9340                  puts = NULL;
9341              }
9342              ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9343                                                   SIGSET_T_SIZE));
9344              if (!is_error(ret)) {
9345                  if (arg2) {
9346                      p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9347                                    0);
9348                      if (!p) {
9349                          return -TARGET_EFAULT;
9350                      }
9351                      host_to_target_siginfo(p, &uinfo);
9352                      unlock_user(p, arg2, sizeof(target_siginfo_t));
9353                  }
9354                  ret = host_to_target_signal(ret);
9355              }
9356          }
9357          return ret;
9358  #endif
9359  #ifdef TARGET_NR_rt_sigtimedwait_time64
9360      case TARGET_NR_rt_sigtimedwait_time64:
9361          {
9362              sigset_t set;
9363              struct timespec uts, *puts;
9364              siginfo_t uinfo;
9365  
9366              if (arg4 != sizeof(target_sigset_t)) {
9367                  return -TARGET_EINVAL;
9368              }
9369  
9370              p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9371              if (!p) {
9372                  return -TARGET_EFAULT;
9373              }
9374              target_to_host_sigset(&set, p);
9375              unlock_user(p, arg1, 0);
9376              if (arg3) {
9377                  puts = &uts;
9378                  if (target_to_host_timespec64(puts, arg3)) {
9379                      return -TARGET_EFAULT;
9380                  }
9381              } else {
9382                  puts = NULL;
9383              }
9384              ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9385                                                   SIGSET_T_SIZE));
9386              if (!is_error(ret)) {
9387                  if (arg2) {
9388                      p = lock_user(VERIFY_WRITE, arg2,
9389                                    sizeof(target_siginfo_t), 0);
9390                      if (!p) {
9391                          return -TARGET_EFAULT;
9392                      }
9393                      host_to_target_siginfo(p, &uinfo);
9394                      unlock_user(p, arg2, sizeof(target_siginfo_t));
9395                  }
9396                  ret = host_to_target_signal(ret);
9397              }
9398          }
9399          return ret;
9400  #endif
9401      case TARGET_NR_rt_sigqueueinfo:
9402          {
9403              siginfo_t uinfo;
9404  
9405              p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9406              if (!p) {
9407                  return -TARGET_EFAULT;
9408              }
9409              target_to_host_siginfo(&uinfo, p);
9410              unlock_user(p, arg3, 0);
9411              ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9412          }
9413          return ret;
9414      case TARGET_NR_rt_tgsigqueueinfo:
9415          {
9416              siginfo_t uinfo;
9417  
9418              p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9419              if (!p) {
9420                  return -TARGET_EFAULT;
9421              }
9422              target_to_host_siginfo(&uinfo, p);
9423              unlock_user(p, arg4, 0);
9424              ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9425          }
9426          return ret;
9427  #ifdef TARGET_NR_sigreturn
9428      case TARGET_NR_sigreturn:
9429          if (block_signals()) {
9430              return -TARGET_ERESTARTSYS;
9431          }
9432          return do_sigreturn(cpu_env);
9433  #endif
9434      case TARGET_NR_rt_sigreturn:
9435          if (block_signals()) {
9436              return -TARGET_ERESTARTSYS;
9437          }
9438          return do_rt_sigreturn(cpu_env);
9439      case TARGET_NR_sethostname:
9440          if (!(p = lock_user_string(arg1)))
9441              return -TARGET_EFAULT;
9442          ret = get_errno(sethostname(p, arg2));
9443          unlock_user(p, arg1, 0);
9444          return ret;
9445  #ifdef TARGET_NR_setrlimit
9446      case TARGET_NR_setrlimit:
9447          {
9448              int resource = target_to_host_resource(arg1);
9449              struct target_rlimit *target_rlim;
9450              struct rlimit rlim;
9451              if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9452                  return -TARGET_EFAULT;
9453              rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9454              rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9455              unlock_user_struct(target_rlim, arg2, 0);
9456              /*
9457               * If we just passed through resource limit settings for memory then
9458               * they would also apply to QEMU's own allocations, and QEMU will
9459               * crash or hang or die if its allocations fail. Ideally we would
9460               * track the guest allocations in QEMU and apply the limits ourselves.
9461               * For now, just tell the guest the call succeeded but don't actually
9462               * limit anything.
9463               */
9464              if (resource != RLIMIT_AS &&
9465                  resource != RLIMIT_DATA &&
9466                  resource != RLIMIT_STACK) {
9467                  return get_errno(setrlimit(resource, &rlim));
9468              } else {
9469                  return 0;
9470              }
9471          }
9472  #endif
9473  #ifdef TARGET_NR_getrlimit
9474      case TARGET_NR_getrlimit:
9475          {
9476              int resource = target_to_host_resource(arg1);
9477              struct target_rlimit *target_rlim;
9478              struct rlimit rlim;
9479  
9480              ret = get_errno(getrlimit(resource, &rlim));
9481              if (!is_error(ret)) {
9482                  if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9483                      return -TARGET_EFAULT;
9484                  target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9485                  target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9486                  unlock_user_struct(target_rlim, arg2, 1);
9487              }
9488          }
9489          return ret;
9490  #endif
9491      case TARGET_NR_getrusage:
9492          {
9493              struct rusage rusage;
9494              ret = get_errno(getrusage(arg1, &rusage));
9495              if (!is_error(ret)) {
9496                  ret = host_to_target_rusage(arg2, &rusage);
9497              }
9498          }
9499          return ret;
9500  #if defined(TARGET_NR_gettimeofday)
9501      case TARGET_NR_gettimeofday:
9502          {
9503              struct timeval tv;
9504              struct timezone tz;
9505  
9506              ret = get_errno(gettimeofday(&tv, &tz));
9507              if (!is_error(ret)) {
9508                  if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9509                      return -TARGET_EFAULT;
9510                  }
9511                  if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9512                      return -TARGET_EFAULT;
9513                  }
9514              }
9515          }
9516          return ret;
9517  #endif
9518  #if defined(TARGET_NR_settimeofday)
9519      case TARGET_NR_settimeofday:
9520          {
9521              struct timeval tv, *ptv = NULL;
9522              struct timezone tz, *ptz = NULL;
9523  
9524              if (arg1) {
9525                  if (copy_from_user_timeval(&tv, arg1)) {
9526                      return -TARGET_EFAULT;
9527                  }
9528                  ptv = &tv;
9529              }
9530  
9531              if (arg2) {
9532                  if (copy_from_user_timezone(&tz, arg2)) {
9533                      return -TARGET_EFAULT;
9534                  }
9535                  ptz = &tz;
9536              }
9537  
9538              return get_errno(settimeofday(ptv, ptz));
9539          }
9540  #endif
9541  #if defined(TARGET_NR_select)
9542      case TARGET_NR_select:
9543  #if defined(TARGET_WANT_NI_OLD_SELECT)
9544          /* some architectures used to have old_select here
9545           * but now ENOSYS it.
9546           */
9547          ret = -TARGET_ENOSYS;
9548  #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9549          ret = do_old_select(arg1);
9550  #else
9551          ret = do_select(arg1, arg2, arg3, arg4, arg5);
9552  #endif
9553          return ret;
9554  #endif
9555  #ifdef TARGET_NR_pselect6
9556      case TARGET_NR_pselect6:
9557          return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9558  #endif
9559  #ifdef TARGET_NR_pselect6_time64
9560      case TARGET_NR_pselect6_time64:
9561          return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9562  #endif
9563  #ifdef TARGET_NR_symlink
9564      case TARGET_NR_symlink:
9565          {
9566              void *p2;
9567              p = lock_user_string(arg1);
9568              p2 = lock_user_string(arg2);
9569              if (!p || !p2)
9570                  ret = -TARGET_EFAULT;
9571              else
9572                  ret = get_errno(symlink(p, p2));
9573              unlock_user(p2, arg2, 0);
9574              unlock_user(p, arg1, 0);
9575          }
9576          return ret;
9577  #endif
9578  #if defined(TARGET_NR_symlinkat)
9579      case TARGET_NR_symlinkat:
9580          {
9581              void *p2;
9582              p  = lock_user_string(arg1);
9583              p2 = lock_user_string(arg3);
9584              if (!p || !p2)
9585                  ret = -TARGET_EFAULT;
9586              else
9587                  ret = get_errno(symlinkat(p, arg2, p2));
9588              unlock_user(p2, arg3, 0);
9589              unlock_user(p, arg1, 0);
9590          }
9591          return ret;
9592  #endif
9593  #ifdef TARGET_NR_readlink
9594      case TARGET_NR_readlink:
9595          {
9596              void *p2;
9597              p = lock_user_string(arg1);
9598              p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9599              if (!p || !p2) {
9600                  ret = -TARGET_EFAULT;
9601              } else if (!arg3) {
9602                  /* Short circuit this for the magic exe check. */
9603                  ret = -TARGET_EINVAL;
9604              } else if (is_proc_myself((const char *)p, "exe")) {
9605                  char real[PATH_MAX], *temp;
9606                  temp = realpath(exec_path, real);
9607                  /* Return value is # of bytes that we wrote to the buffer. */
9608                  if (temp == NULL) {
9609                      ret = get_errno(-1);
9610                  } else {
9611                      /* Don't worry about sign mismatch as earlier mapping
9612                       * logic would have thrown a bad address error. */
9613                      ret = MIN(strlen(real), arg3);
9614                      /* We cannot NUL terminate the string. */
9615                      memcpy(p2, real, ret);
9616                  }
9617              } else {
9618                  ret = get_errno(readlink(path(p), p2, arg3));
9619              }
9620              unlock_user(p2, arg2, ret);
9621              unlock_user(p, arg1, 0);
9622          }
9623          return ret;
9624  #endif
9625  #if defined(TARGET_NR_readlinkat)
9626      case TARGET_NR_readlinkat:
9627          {
9628              void *p2;
9629              p  = lock_user_string(arg2);
9630              p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9631              if (!p || !p2) {
9632                  ret = -TARGET_EFAULT;
9633              } else if (is_proc_myself((const char *)p, "exe")) {
9634                  char real[PATH_MAX], *temp;
9635                  temp = realpath(exec_path, real);
9636                  ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9637                  snprintf((char *)p2, arg4, "%s", real);
9638              } else {
9639                  ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9640              }
9641              unlock_user(p2, arg3, ret);
9642              unlock_user(p, arg2, 0);
9643          }
9644          return ret;
9645  #endif
9646  #ifdef TARGET_NR_swapon
9647      case TARGET_NR_swapon:
9648          if (!(p = lock_user_string(arg1)))
9649              return -TARGET_EFAULT;
9650          ret = get_errno(swapon(p, arg2));
9651          unlock_user(p, arg1, 0);
9652          return ret;
9653  #endif
9654      case TARGET_NR_reboot:
9655          if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9656             /* arg4 must be ignored in all other cases */
9657             p = lock_user_string(arg4);
9658             if (!p) {
9659                 return -TARGET_EFAULT;
9660             }
9661             ret = get_errno(reboot(arg1, arg2, arg3, p));
9662             unlock_user(p, arg4, 0);
9663          } else {
9664             ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9665          }
9666          return ret;
9667  #ifdef TARGET_NR_mmap
9668      case TARGET_NR_mmap:
9669  #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9670      (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9671      defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9672      || defined(TARGET_S390X)
9673          {
9674              abi_ulong *v;
9675              abi_ulong v1, v2, v3, v4, v5, v6;
9676              if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9677                  return -TARGET_EFAULT;
9678              v1 = tswapal(v[0]);
9679              v2 = tswapal(v[1]);
9680              v3 = tswapal(v[2]);
9681              v4 = tswapal(v[3]);
9682              v5 = tswapal(v[4]);
9683              v6 = tswapal(v[5]);
9684              unlock_user(v, arg1, 0);
9685              ret = get_errno(target_mmap(v1, v2, v3,
9686                                          target_to_host_bitmask(v4, mmap_flags_tbl),
9687                                          v5, v6));
9688          }
9689  #else
9690          /* mmap pointers are always untagged */
9691          ret = get_errno(target_mmap(arg1, arg2, arg3,
9692                                      target_to_host_bitmask(arg4, mmap_flags_tbl),
9693                                      arg5,
9694                                      arg6));
9695  #endif
9696          return ret;
9697  #endif
9698  #ifdef TARGET_NR_mmap2
9699      case TARGET_NR_mmap2:
9700  #ifndef MMAP_SHIFT
9701  #define MMAP_SHIFT 12
9702  #endif
9703          ret = target_mmap(arg1, arg2, arg3,
9704                            target_to_host_bitmask(arg4, mmap_flags_tbl),
9705                            arg5, arg6 << MMAP_SHIFT);
9706          return get_errno(ret);
9707  #endif
9708      case TARGET_NR_munmap:
9709          arg1 = cpu_untagged_addr(cpu, arg1);
9710          return get_errno(target_munmap(arg1, arg2));
9711      case TARGET_NR_mprotect:
9712          arg1 = cpu_untagged_addr(cpu, arg1);
9713          {
9714              TaskState *ts = cpu->opaque;
9715              /* Special hack to detect libc making the stack executable.  */
9716              if ((arg3 & PROT_GROWSDOWN)
9717                  && arg1 >= ts->info->stack_limit
9718                  && arg1 <= ts->info->start_stack) {
9719                  arg3 &= ~PROT_GROWSDOWN;
9720                  arg2 = arg2 + arg1 - ts->info->stack_limit;
9721                  arg1 = ts->info->stack_limit;
9722              }
9723          }
9724          return get_errno(target_mprotect(arg1, arg2, arg3));
9725  #ifdef TARGET_NR_mremap
9726      case TARGET_NR_mremap:
9727          arg1 = cpu_untagged_addr(cpu, arg1);
9728          /* mremap new_addr (arg5) is always untagged */
9729          return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9730  #endif
9731          /* ??? msync/mlock/munlock are broken for softmmu.  */
9732  #ifdef TARGET_NR_msync
9733      case TARGET_NR_msync:
9734          return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9735  #endif
9736  #ifdef TARGET_NR_mlock
9737      case TARGET_NR_mlock:
9738          return get_errno(mlock(g2h(cpu, arg1), arg2));
9739  #endif
9740  #ifdef TARGET_NR_munlock
9741      case TARGET_NR_munlock:
9742          return get_errno(munlock(g2h(cpu, arg1), arg2));
9743  #endif
9744  #ifdef TARGET_NR_mlockall
9745      case TARGET_NR_mlockall:
9746          return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9747  #endif
9748  #ifdef TARGET_NR_munlockall
9749      case TARGET_NR_munlockall:
9750          return get_errno(munlockall());
9751  #endif
9752  #ifdef TARGET_NR_truncate
9753      case TARGET_NR_truncate:
9754          if (!(p = lock_user_string(arg1)))
9755              return -TARGET_EFAULT;
9756          ret = get_errno(truncate(p, arg2));
9757          unlock_user(p, arg1, 0);
9758          return ret;
9759  #endif
9760  #ifdef TARGET_NR_ftruncate
9761      case TARGET_NR_ftruncate:
9762          return get_errno(ftruncate(arg1, arg2));
9763  #endif
9764      case TARGET_NR_fchmod:
9765          return get_errno(fchmod(arg1, arg2));
9766  #if defined(TARGET_NR_fchmodat)
9767      case TARGET_NR_fchmodat:
9768          if (!(p = lock_user_string(arg2)))
9769              return -TARGET_EFAULT;
9770          ret = get_errno(fchmodat(arg1, p, arg3, 0));
9771          unlock_user(p, arg2, 0);
9772          return ret;
9773  #endif
9774      case TARGET_NR_getpriority:
9775          /* Note that negative values are valid for getpriority, so we must
9776             differentiate based on errno settings.  */
9777          errno = 0;
9778          ret = getpriority(arg1, arg2);
9779          if (ret == -1 && errno != 0) {
9780              return -host_to_target_errno(errno);
9781          }
9782  #ifdef TARGET_ALPHA
9783          /* Return value is the unbiased priority.  Signal no error.  */
9784          ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9785  #else
9786          /* Return value is a biased priority to avoid negative numbers.  */
9787          ret = 20 - ret;
9788  #endif
9789          return ret;
9790      case TARGET_NR_setpriority:
9791          return get_errno(setpriority(arg1, arg2, arg3));
9792  #ifdef TARGET_NR_statfs
9793      case TARGET_NR_statfs:
9794          if (!(p = lock_user_string(arg1))) {
9795              return -TARGET_EFAULT;
9796          }
9797          ret = get_errno(statfs(path(p), &stfs));
9798          unlock_user(p, arg1, 0);
9799      convert_statfs:
9800          if (!is_error(ret)) {
9801              struct target_statfs *target_stfs;
9802  
9803              if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9804                  return -TARGET_EFAULT;
9805              __put_user(stfs.f_type, &target_stfs->f_type);
9806              __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9807              __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9808              __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9809              __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9810              __put_user(stfs.f_files, &target_stfs->f_files);
9811              __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9812              __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9813              __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9814              __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9815              __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9816  #ifdef _STATFS_F_FLAGS
9817              __put_user(stfs.f_flags, &target_stfs->f_flags);
9818  #else
9819              __put_user(0, &target_stfs->f_flags);
9820  #endif
9821              memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9822              unlock_user_struct(target_stfs, arg2, 1);
9823          }
9824          return ret;
9825  #endif
9826  #ifdef TARGET_NR_fstatfs
9827      case TARGET_NR_fstatfs:
9828          ret = get_errno(fstatfs(arg1, &stfs));
9829          goto convert_statfs;
9830  #endif
9831  #ifdef TARGET_NR_statfs64
9832      case TARGET_NR_statfs64:
9833          if (!(p = lock_user_string(arg1))) {
9834              return -TARGET_EFAULT;
9835          }
9836          ret = get_errno(statfs(path(p), &stfs));
9837          unlock_user(p, arg1, 0);
9838      convert_statfs64:
9839          if (!is_error(ret)) {
9840              struct target_statfs64 *target_stfs;
9841  
9842              if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9843                  return -TARGET_EFAULT;
9844              __put_user(stfs.f_type, &target_stfs->f_type);
9845              __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9846              __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9847              __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9848              __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9849              __put_user(stfs.f_files, &target_stfs->f_files);
9850              __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9851              __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9852              __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9853              __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9854              __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9855  #ifdef _STATFS_F_FLAGS
9856              __put_user(stfs.f_flags, &target_stfs->f_flags);
9857  #else
9858              __put_user(0, &target_stfs->f_flags);
9859  #endif
9860              memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9861              unlock_user_struct(target_stfs, arg3, 1);
9862          }
9863          return ret;
9864      case TARGET_NR_fstatfs64:
9865          ret = get_errno(fstatfs(arg1, &stfs));
9866          goto convert_statfs64;
9867  #endif
9868  #ifdef TARGET_NR_socketcall
9869      case TARGET_NR_socketcall:
9870          return do_socketcall(arg1, arg2);
9871  #endif
9872  #ifdef TARGET_NR_accept
9873      case TARGET_NR_accept:
9874          return do_accept4(arg1, arg2, arg3, 0);
9875  #endif
9876  #ifdef TARGET_NR_accept4
9877      case TARGET_NR_accept4:
9878          return do_accept4(arg1, arg2, arg3, arg4);
9879  #endif
9880  #ifdef TARGET_NR_bind
9881      case TARGET_NR_bind:
9882          return do_bind(arg1, arg2, arg3);
9883  #endif
9884  #ifdef TARGET_NR_connect
9885      case TARGET_NR_connect:
9886          return do_connect(arg1, arg2, arg3);
9887  #endif
9888  #ifdef TARGET_NR_getpeername
9889      case TARGET_NR_getpeername:
9890          return do_getpeername(arg1, arg2, arg3);
9891  #endif
9892  #ifdef TARGET_NR_getsockname
9893      case TARGET_NR_getsockname:
9894          return do_getsockname(arg1, arg2, arg3);
9895  #endif
9896  #ifdef TARGET_NR_getsockopt
9897      case TARGET_NR_getsockopt:
9898          return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9899  #endif
9900  #ifdef TARGET_NR_listen
9901      case TARGET_NR_listen:
9902          return get_errno(listen(arg1, arg2));
9903  #endif
9904  #ifdef TARGET_NR_recv
9905      case TARGET_NR_recv:
9906          return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9907  #endif
9908  #ifdef TARGET_NR_recvfrom
9909      case TARGET_NR_recvfrom:
9910          return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9911  #endif
9912  #ifdef TARGET_NR_recvmsg
9913      case TARGET_NR_recvmsg:
9914          return do_sendrecvmsg(arg1, arg2, arg3, 0);
9915  #endif
9916  #ifdef TARGET_NR_send
9917      case TARGET_NR_send:
9918          return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9919  #endif
9920  #ifdef TARGET_NR_sendmsg
9921      case TARGET_NR_sendmsg:
9922          return do_sendrecvmsg(arg1, arg2, arg3, 1);
9923  #endif
9924  #ifdef TARGET_NR_sendmmsg
9925      case TARGET_NR_sendmmsg:
9926          return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9927  #endif
9928  #ifdef TARGET_NR_recvmmsg
9929      case TARGET_NR_recvmmsg:
9930          return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9931  #endif
9932  #ifdef TARGET_NR_sendto
9933      case TARGET_NR_sendto:
9934          return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9935  #endif
9936  #ifdef TARGET_NR_shutdown
9937      case TARGET_NR_shutdown:
9938          return get_errno(shutdown(arg1, arg2));
9939  #endif
9940  #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9941      case TARGET_NR_getrandom:
9942          p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9943          if (!p) {
9944              return -TARGET_EFAULT;
9945          }
9946          ret = get_errno(getrandom(p, arg2, arg3));
9947          unlock_user(p, arg1, ret);
9948          return ret;
9949  #endif
9950  #ifdef TARGET_NR_socket
9951      case TARGET_NR_socket:
9952          return do_socket(arg1, arg2, arg3);
9953  #endif
9954  #ifdef TARGET_NR_socketpair
9955      case TARGET_NR_socketpair:
9956          return do_socketpair(arg1, arg2, arg3, arg4);
9957  #endif
9958  #ifdef TARGET_NR_setsockopt
9959      case TARGET_NR_setsockopt:
9960          return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9961  #endif
9962  #if defined(TARGET_NR_syslog)
9963      case TARGET_NR_syslog:
9964          {
9965              int len = arg2;
9966  
9967              switch (arg1) {
9968              case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9969              case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9970              case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9971              case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9972              case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9973              case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9974              case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9975              case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9976                  return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9977              case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9978              case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9979              case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9980                  {
9981                      if (len < 0) {
9982                          return -TARGET_EINVAL;
9983                      }
9984                      if (len == 0) {
9985                          return 0;
9986                      }
9987                      p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9988                      if (!p) {
9989                          return -TARGET_EFAULT;
9990                      }
9991                      ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9992                      unlock_user(p, arg2, arg3);
9993                  }
9994                  return ret;
9995              default:
9996                  return -TARGET_EINVAL;
9997              }
9998          }
9999          break;
10000  #endif
10001      case TARGET_NR_setitimer:
10002          {
10003              struct itimerval value, ovalue, *pvalue;
10004  
10005              if (arg2) {
10006                  pvalue = &value;
10007                  if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10008                      || copy_from_user_timeval(&pvalue->it_value,
10009                                                arg2 + sizeof(struct target_timeval)))
10010                      return -TARGET_EFAULT;
10011              } else {
10012                  pvalue = NULL;
10013              }
10014              ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10015              if (!is_error(ret) && arg3) {
10016                  if (copy_to_user_timeval(arg3,
10017                                           &ovalue.it_interval)
10018                      || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10019                                              &ovalue.it_value))
10020                      return -TARGET_EFAULT;
10021              }
10022          }
10023          return ret;
10024      case TARGET_NR_getitimer:
10025          {
10026              struct itimerval value;
10027  
10028              ret = get_errno(getitimer(arg1, &value));
10029              if (!is_error(ret) && arg2) {
10030                  if (copy_to_user_timeval(arg2,
10031                                           &value.it_interval)
10032                      || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10033                                              &value.it_value))
10034                      return -TARGET_EFAULT;
10035              }
10036          }
10037          return ret;
10038  #ifdef TARGET_NR_stat
10039      case TARGET_NR_stat:
10040          if (!(p = lock_user_string(arg1))) {
10041              return -TARGET_EFAULT;
10042          }
10043          ret = get_errno(stat(path(p), &st));
10044          unlock_user(p, arg1, 0);
10045          goto do_stat;
10046  #endif
10047  #ifdef TARGET_NR_lstat
10048      case TARGET_NR_lstat:
10049          if (!(p = lock_user_string(arg1))) {
10050              return -TARGET_EFAULT;
10051          }
10052          ret = get_errno(lstat(path(p), &st));
10053          unlock_user(p, arg1, 0);
10054          goto do_stat;
10055  #endif
10056  #ifdef TARGET_NR_fstat
10057      case TARGET_NR_fstat:
10058          {
10059              ret = get_errno(fstat(arg1, &st));
10060  #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10061          do_stat:
10062  #endif
10063              if (!is_error(ret)) {
10064                  struct target_stat *target_st;
10065  
10066                  if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10067                      return -TARGET_EFAULT;
10068                  memset(target_st, 0, sizeof(*target_st));
10069                  __put_user(st.st_dev, &target_st->st_dev);
10070                  __put_user(st.st_ino, &target_st->st_ino);
10071                  __put_user(st.st_mode, &target_st->st_mode);
10072                  __put_user(st.st_uid, &target_st->st_uid);
10073                  __put_user(st.st_gid, &target_st->st_gid);
10074                  __put_user(st.st_nlink, &target_st->st_nlink);
10075                  __put_user(st.st_rdev, &target_st->st_rdev);
10076                  __put_user(st.st_size, &target_st->st_size);
10077                  __put_user(st.st_blksize, &target_st->st_blksize);
10078                  __put_user(st.st_blocks, &target_st->st_blocks);
10079                  __put_user(st.st_atime, &target_st->target_st_atime);
10080                  __put_user(st.st_mtime, &target_st->target_st_mtime);
10081                  __put_user(st.st_ctime, &target_st->target_st_ctime);
10082  #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10083                  __put_user(st.st_atim.tv_nsec,
10084                             &target_st->target_st_atime_nsec);
10085                  __put_user(st.st_mtim.tv_nsec,
10086                             &target_st->target_st_mtime_nsec);
10087                  __put_user(st.st_ctim.tv_nsec,
10088                             &target_st->target_st_ctime_nsec);
10089  #endif
10090                  unlock_user_struct(target_st, arg2, 1);
10091              }
10092          }
10093          return ret;
10094  #endif
10095      case TARGET_NR_vhangup:
10096          return get_errno(vhangup());
10097  #ifdef TARGET_NR_syscall
10098      case TARGET_NR_syscall:
10099          return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10100                            arg6, arg7, arg8, 0);
10101  #endif
10102  #if defined(TARGET_NR_wait4)
10103      case TARGET_NR_wait4:
10104          {
10105              int status;
10106              abi_long status_ptr = arg2;
10107              struct rusage rusage, *rusage_ptr;
10108              abi_ulong target_rusage = arg4;
10109              abi_long rusage_err;
10110              if (target_rusage)
10111                  rusage_ptr = &rusage;
10112              else
10113                  rusage_ptr = NULL;
10114              ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10115              if (!is_error(ret)) {
10116                  if (status_ptr && ret) {
10117                      status = host_to_target_waitstatus(status);
10118                      if (put_user_s32(status, status_ptr))
10119                          return -TARGET_EFAULT;
10120                  }
10121                  if (target_rusage) {
10122                      rusage_err = host_to_target_rusage(target_rusage, &rusage);
10123                      if (rusage_err) {
10124                          ret = rusage_err;
10125                      }
10126                  }
10127              }
10128          }
10129          return ret;
10130  #endif
10131  #ifdef TARGET_NR_swapoff
10132      case TARGET_NR_swapoff:
10133          if (!(p = lock_user_string(arg1)))
10134              return -TARGET_EFAULT;
10135          ret = get_errno(swapoff(p));
10136          unlock_user(p, arg1, 0);
10137          return ret;
10138  #endif
10139      case TARGET_NR_sysinfo:
10140          {
10141              struct target_sysinfo *target_value;
10142              struct sysinfo value;
10143              ret = get_errno(sysinfo(&value));
10144              if (!is_error(ret) && arg1)
10145              {
10146                  if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10147                      return -TARGET_EFAULT;
10148                  __put_user(value.uptime, &target_value->uptime);
10149                  __put_user(value.loads[0], &target_value->loads[0]);
10150                  __put_user(value.loads[1], &target_value->loads[1]);
10151                  __put_user(value.loads[2], &target_value->loads[2]);
10152                  __put_user(value.totalram, &target_value->totalram);
10153                  __put_user(value.freeram, &target_value->freeram);
10154                  __put_user(value.sharedram, &target_value->sharedram);
10155                  __put_user(value.bufferram, &target_value->bufferram);
10156                  __put_user(value.totalswap, &target_value->totalswap);
10157                  __put_user(value.freeswap, &target_value->freeswap);
10158                  __put_user(value.procs, &target_value->procs);
10159                  __put_user(value.totalhigh, &target_value->totalhigh);
10160                  __put_user(value.freehigh, &target_value->freehigh);
10161                  __put_user(value.mem_unit, &target_value->mem_unit);
10162                  unlock_user_struct(target_value, arg1, 1);
10163              }
10164          }
10165          return ret;
10166  #ifdef TARGET_NR_ipc
10167      case TARGET_NR_ipc:
10168          return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10169  #endif
10170  #ifdef TARGET_NR_semget
10171      case TARGET_NR_semget:
10172          return get_errno(semget(arg1, arg2, arg3));
10173  #endif
10174  #ifdef TARGET_NR_semop
10175      case TARGET_NR_semop:
10176          return do_semtimedop(arg1, arg2, arg3, 0, false);
10177  #endif
10178  #ifdef TARGET_NR_semtimedop
10179      case TARGET_NR_semtimedop:
10180          return do_semtimedop(arg1, arg2, arg3, arg4, false);
10181  #endif
10182  #ifdef TARGET_NR_semtimedop_time64
10183      case TARGET_NR_semtimedop_time64:
10184          return do_semtimedop(arg1, arg2, arg3, arg4, true);
10185  #endif
10186  #ifdef TARGET_NR_semctl
10187      case TARGET_NR_semctl:
10188          return do_semctl(arg1, arg2, arg3, arg4);
10189  #endif
10190  #ifdef TARGET_NR_msgctl
10191      case TARGET_NR_msgctl:
10192          return do_msgctl(arg1, arg2, arg3);
10193  #endif
10194  #ifdef TARGET_NR_msgget
10195      case TARGET_NR_msgget:
10196          return get_errno(msgget(arg1, arg2));
10197  #endif
10198  #ifdef TARGET_NR_msgrcv
10199      case TARGET_NR_msgrcv:
10200          return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10201  #endif
10202  #ifdef TARGET_NR_msgsnd
10203      case TARGET_NR_msgsnd:
10204          return do_msgsnd(arg1, arg2, arg3, arg4);
10205  #endif
10206  #ifdef TARGET_NR_shmget
10207      case TARGET_NR_shmget:
10208          return get_errno(shmget(arg1, arg2, arg3));
10209  #endif
10210  #ifdef TARGET_NR_shmctl
10211      case TARGET_NR_shmctl:
10212          return do_shmctl(arg1, arg2, arg3);
10213  #endif
10214  #ifdef TARGET_NR_shmat
10215      case TARGET_NR_shmat:
10216          return do_shmat(cpu_env, arg1, arg2, arg3);
10217  #endif
10218  #ifdef TARGET_NR_shmdt
10219      case TARGET_NR_shmdt:
10220          return do_shmdt(arg1);
10221  #endif
10222      case TARGET_NR_fsync:
10223          return get_errno(fsync(arg1));
10224      case TARGET_NR_clone:
10225          /* Linux manages to have three different orderings for its
10226           * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10227           * match the kernel's CONFIG_CLONE_* settings.
10228           * Microblaze is further special in that it uses a sixth
10229           * implicit argument to clone for the TLS pointer.
10230           */
10231  #if defined(TARGET_MICROBLAZE)
10232          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10233  #elif defined(TARGET_CLONE_BACKWARDS)
10234          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10235  #elif defined(TARGET_CLONE_BACKWARDS2)
10236          ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10237  #else
10238          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10239  #endif
10240          return ret;
10241  #ifdef __NR_exit_group
10242          /* new thread calls */
10243      case TARGET_NR_exit_group:
10244          preexit_cleanup(cpu_env, arg1);
10245          return get_errno(exit_group(arg1));
10246  #endif
10247      case TARGET_NR_setdomainname:
10248          if (!(p = lock_user_string(arg1)))
10249              return -TARGET_EFAULT;
10250          ret = get_errno(setdomainname(p, arg2));
10251          unlock_user(p, arg1, 0);
10252          return ret;
10253      case TARGET_NR_uname:
10254          /* no need to transcode because we use the linux syscall */
10255          {
10256              struct new_utsname * buf;
10257  
10258              if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10259                  return -TARGET_EFAULT;
10260              ret = get_errno(sys_uname(buf));
10261              if (!is_error(ret)) {
10262                  /* Overwrite the native machine name with whatever is being
10263                     emulated. */
10264                  g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10265                            sizeof(buf->machine));
10266                  /* Allow the user to override the reported release.  */
10267                  if (qemu_uname_release && *qemu_uname_release) {
10268                      g_strlcpy(buf->release, qemu_uname_release,
10269                                sizeof(buf->release));
10270                  }
10271              }
10272              unlock_user_struct(buf, arg1, 1);
10273          }
10274          return ret;
10275  #ifdef TARGET_I386
10276      case TARGET_NR_modify_ldt:
10277          return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10278  #if !defined(TARGET_X86_64)
10279      case TARGET_NR_vm86:
10280          return do_vm86(cpu_env, arg1, arg2);
10281  #endif
10282  #endif
10283  #if defined(TARGET_NR_adjtimex)
10284      case TARGET_NR_adjtimex:
10285          {
10286              struct timex host_buf;
10287  
10288              if (target_to_host_timex(&host_buf, arg1) != 0) {
10289                  return -TARGET_EFAULT;
10290              }
10291              ret = get_errno(adjtimex(&host_buf));
10292              if (!is_error(ret)) {
10293                  if (host_to_target_timex(arg1, &host_buf) != 0) {
10294                      return -TARGET_EFAULT;
10295                  }
10296              }
10297          }
10298          return ret;
10299  #endif
10300  #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10301      case TARGET_NR_clock_adjtime:
10302          {
10303              struct timex htx, *phtx = &htx;
10304  
10305              if (target_to_host_timex(phtx, arg2) != 0) {
10306                  return -TARGET_EFAULT;
10307              }
10308              ret = get_errno(clock_adjtime(arg1, phtx));
10309              if (!is_error(ret) && phtx) {
10310                  if (host_to_target_timex(arg2, phtx) != 0) {
10311                      return -TARGET_EFAULT;
10312                  }
10313              }
10314          }
10315          return ret;
10316  #endif
10317  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10318      case TARGET_NR_clock_adjtime64:
10319          {
10320              struct timex htx;
10321  
10322              if (target_to_host_timex64(&htx, arg2) != 0) {
10323                  return -TARGET_EFAULT;
10324              }
10325              ret = get_errno(clock_adjtime(arg1, &htx));
10326              if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10327                      return -TARGET_EFAULT;
10328              }
10329          }
10330          return ret;
10331  #endif
10332      case TARGET_NR_getpgid:
10333          return get_errno(getpgid(arg1));
10334      case TARGET_NR_fchdir:
10335          return get_errno(fchdir(arg1));
10336      case TARGET_NR_personality:
10337          return get_errno(personality(arg1));
10338  #ifdef TARGET_NR__llseek /* Not on alpha */
10339      case TARGET_NR__llseek:
10340          {
10341              int64_t res;
10342  #if !defined(__NR_llseek)
10343              res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10344              if (res == -1) {
10345                  ret = get_errno(res);
10346              } else {
10347                  ret = 0;
10348              }
10349  #else
10350              ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10351  #endif
10352              if ((ret == 0) && put_user_s64(res, arg4)) {
10353                  return -TARGET_EFAULT;
10354              }
10355          }
10356          return ret;
10357  #endif
10358  #ifdef TARGET_NR_getdents
10359      case TARGET_NR_getdents:
10360  #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10361  #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10362          {
10363              struct target_dirent *target_dirp;
10364              struct linux_dirent *dirp;
10365              abi_long count = arg3;
10366  
10367              dirp = g_try_malloc(count);
10368              if (!dirp) {
10369                  return -TARGET_ENOMEM;
10370              }
10371  
10372              ret = get_errno(sys_getdents(arg1, dirp, count));
10373              if (!is_error(ret)) {
10374                  struct linux_dirent *de;
10375  		struct target_dirent *tde;
10376                  int len = ret;
10377                  int reclen, treclen;
10378  		int count1, tnamelen;
10379  
10380  		count1 = 0;
10381                  de = dirp;
10382                  if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10383                      return -TARGET_EFAULT;
10384  		tde = target_dirp;
10385                  while (len > 0) {
10386                      reclen = de->d_reclen;
10387                      tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10388                      assert(tnamelen >= 0);
10389                      treclen = tnamelen + offsetof(struct target_dirent, d_name);
10390                      assert(count1 + treclen <= count);
10391                      tde->d_reclen = tswap16(treclen);
10392                      tde->d_ino = tswapal(de->d_ino);
10393                      tde->d_off = tswapal(de->d_off);
10394                      memcpy(tde->d_name, de->d_name, tnamelen);
10395                      de = (struct linux_dirent *)((char *)de + reclen);
10396                      len -= reclen;
10397                      tde = (struct target_dirent *)((char *)tde + treclen);
10398  		    count1 += treclen;
10399                  }
10400  		ret = count1;
10401                  unlock_user(target_dirp, arg2, ret);
10402              }
10403              g_free(dirp);
10404          }
10405  #else
10406          {
10407              struct linux_dirent *dirp;
10408              abi_long count = arg3;
10409  
10410              if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10411                  return -TARGET_EFAULT;
10412              ret = get_errno(sys_getdents(arg1, dirp, count));
10413              if (!is_error(ret)) {
10414                  struct linux_dirent *de;
10415                  int len = ret;
10416                  int reclen;
10417                  de = dirp;
10418                  while (len > 0) {
10419                      reclen = de->d_reclen;
10420                      if (reclen > len)
10421                          break;
10422                      de->d_reclen = tswap16(reclen);
10423                      tswapls(&de->d_ino);
10424                      tswapls(&de->d_off);
10425                      de = (struct linux_dirent *)((char *)de + reclen);
10426                      len -= reclen;
10427                  }
10428              }
10429              unlock_user(dirp, arg2, ret);
10430          }
10431  #endif
10432  #else
10433          /* Implement getdents in terms of getdents64 */
10434          {
10435              struct linux_dirent64 *dirp;
10436              abi_long count = arg3;
10437  
10438              dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10439              if (!dirp) {
10440                  return -TARGET_EFAULT;
10441              }
10442              ret = get_errno(sys_getdents64(arg1, dirp, count));
10443              if (!is_error(ret)) {
10444                  /* Convert the dirent64 structs to target dirent.  We do this
10445                   * in-place, since we can guarantee that a target_dirent is no
10446                   * larger than a dirent64; however this means we have to be
10447                   * careful to read everything before writing in the new format.
10448                   */
10449                  struct linux_dirent64 *de;
10450                  struct target_dirent *tde;
10451                  int len = ret;
10452                  int tlen = 0;
10453  
10454                  de = dirp;
10455                  tde = (struct target_dirent *)dirp;
10456                  while (len > 0) {
10457                      int namelen, treclen;
10458                      int reclen = de->d_reclen;
10459                      uint64_t ino = de->d_ino;
10460                      int64_t off = de->d_off;
10461                      uint8_t type = de->d_type;
10462  
10463                      namelen = strlen(de->d_name);
10464                      treclen = offsetof(struct target_dirent, d_name)
10465                          + namelen + 2;
10466                      treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10467  
10468                      memmove(tde->d_name, de->d_name, namelen + 1);
10469                      tde->d_ino = tswapal(ino);
10470                      tde->d_off = tswapal(off);
10471                      tde->d_reclen = tswap16(treclen);
10472                      /* The target_dirent type is in what was formerly a padding
10473                       * byte at the end of the structure:
10474                       */
10475                      *(((char *)tde) + treclen - 1) = type;
10476  
10477                      de = (struct linux_dirent64 *)((char *)de + reclen);
10478                      tde = (struct target_dirent *)((char *)tde + treclen);
10479                      len -= reclen;
10480                      tlen += treclen;
10481                  }
10482                  ret = tlen;
10483              }
10484              unlock_user(dirp, arg2, ret);
10485          }
10486  #endif
10487          return ret;
10488  #endif /* TARGET_NR_getdents */
10489  #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10490      case TARGET_NR_getdents64:
10491          {
10492              struct linux_dirent64 *dirp;
10493              abi_long count = arg3;
10494              if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10495                  return -TARGET_EFAULT;
10496              ret = get_errno(sys_getdents64(arg1, dirp, count));
10497              if (!is_error(ret)) {
10498                  struct linux_dirent64 *de;
10499                  int len = ret;
10500                  int reclen;
10501                  de = dirp;
10502                  while (len > 0) {
10503                      reclen = de->d_reclen;
10504                      if (reclen > len)
10505                          break;
10506                      de->d_reclen = tswap16(reclen);
10507                      tswap64s((uint64_t *)&de->d_ino);
10508                      tswap64s((uint64_t *)&de->d_off);
10509                      de = (struct linux_dirent64 *)((char *)de + reclen);
10510                      len -= reclen;
10511                  }
10512              }
10513              unlock_user(dirp, arg2, ret);
10514          }
10515          return ret;
10516  #endif /* TARGET_NR_getdents64 */
10517  #if defined(TARGET_NR__newselect)
10518      case TARGET_NR__newselect:
10519          return do_select(arg1, arg2, arg3, arg4, arg5);
10520  #endif
10521  #ifdef TARGET_NR_poll
10522      case TARGET_NR_poll:
10523          return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10524  #endif
10525  #ifdef TARGET_NR_ppoll
10526      case TARGET_NR_ppoll:
10527          return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10528  #endif
10529  #ifdef TARGET_NR_ppoll_time64
10530      case TARGET_NR_ppoll_time64:
10531          return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10532  #endif
10533      case TARGET_NR_flock:
10534          /* NOTE: the flock constant seems to be the same for every
10535             Linux platform */
10536          return get_errno(safe_flock(arg1, arg2));
10537      case TARGET_NR_readv:
10538          {
10539              struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10540              if (vec != NULL) {
10541                  ret = get_errno(safe_readv(arg1, vec, arg3));
10542                  unlock_iovec(vec, arg2, arg3, 1);
10543              } else {
10544                  ret = -host_to_target_errno(errno);
10545              }
10546          }
10547          return ret;
10548      case TARGET_NR_writev:
10549          {
10550              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10551              if (vec != NULL) {
10552                  ret = get_errno(safe_writev(arg1, vec, arg3));
10553                  unlock_iovec(vec, arg2, arg3, 0);
10554              } else {
10555                  ret = -host_to_target_errno(errno);
10556              }
10557          }
10558          return ret;
10559  #if defined(TARGET_NR_preadv)
10560      case TARGET_NR_preadv:
10561          {
10562              struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10563              if (vec != NULL) {
10564                  unsigned long low, high;
10565  
10566                  target_to_host_low_high(arg4, arg5, &low, &high);
10567                  ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10568                  unlock_iovec(vec, arg2, arg3, 1);
10569              } else {
10570                  ret = -host_to_target_errno(errno);
10571             }
10572          }
10573          return ret;
10574  #endif
10575  #if defined(TARGET_NR_pwritev)
10576      case TARGET_NR_pwritev:
10577          {
10578              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10579              if (vec != NULL) {
10580                  unsigned long low, high;
10581  
10582                  target_to_host_low_high(arg4, arg5, &low, &high);
10583                  ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10584                  unlock_iovec(vec, arg2, arg3, 0);
10585              } else {
10586                  ret = -host_to_target_errno(errno);
10587             }
10588          }
10589          return ret;
10590  #endif
10591      case TARGET_NR_getsid:
10592          return get_errno(getsid(arg1));
10593  #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10594      case TARGET_NR_fdatasync:
10595          return get_errno(fdatasync(arg1));
10596  #endif
10597      case TARGET_NR_sched_getaffinity:
10598          {
10599              unsigned int mask_size;
10600              unsigned long *mask;
10601  
10602              /*
10603               * sched_getaffinity needs multiples of ulong, so need to take
10604               * care of mismatches between target ulong and host ulong sizes.
10605               */
10606              if (arg2 & (sizeof(abi_ulong) - 1)) {
10607                  return -TARGET_EINVAL;
10608              }
10609              mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10610  
10611              mask = alloca(mask_size);
10612              memset(mask, 0, mask_size);
10613              ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10614  
10615              if (!is_error(ret)) {
10616                  if (ret > arg2) {
10617                      /* More data returned than the caller's buffer will fit.
10618                       * This only happens if sizeof(abi_long) < sizeof(long)
10619                       * and the caller passed us a buffer holding an odd number
10620                       * of abi_longs. If the host kernel is actually using the
10621                       * extra 4 bytes then fail EINVAL; otherwise we can just
10622                       * ignore them and only copy the interesting part.
10623                       */
10624                      int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10625                      if (numcpus > arg2 * 8) {
10626                          return -TARGET_EINVAL;
10627                      }
10628                      ret = arg2;
10629                  }
10630  
10631                  if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10632                      return -TARGET_EFAULT;
10633                  }
10634              }
10635          }
10636          return ret;
10637      case TARGET_NR_sched_setaffinity:
10638          {
10639              unsigned int mask_size;
10640              unsigned long *mask;
10641  
10642              /*
10643               * sched_setaffinity needs multiples of ulong, so need to take
10644               * care of mismatches between target ulong and host ulong sizes.
10645               */
10646              if (arg2 & (sizeof(abi_ulong) - 1)) {
10647                  return -TARGET_EINVAL;
10648              }
10649              mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10650              mask = alloca(mask_size);
10651  
10652              ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10653              if (ret) {
10654                  return ret;
10655              }
10656  
10657              return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10658          }
10659      case TARGET_NR_getcpu:
10660          {
10661              unsigned cpu, node;
10662              ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10663                                         arg2 ? &node : NULL,
10664                                         NULL));
10665              if (is_error(ret)) {
10666                  return ret;
10667              }
10668              if (arg1 && put_user_u32(cpu, arg1)) {
10669                  return -TARGET_EFAULT;
10670              }
10671              if (arg2 && put_user_u32(node, arg2)) {
10672                  return -TARGET_EFAULT;
10673              }
10674          }
10675          return ret;
10676      case TARGET_NR_sched_setparam:
10677          {
10678              struct sched_param *target_schp;
10679              struct sched_param schp;
10680  
10681              if (arg2 == 0) {
10682                  return -TARGET_EINVAL;
10683              }
10684              if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10685                  return -TARGET_EFAULT;
10686              schp.sched_priority = tswap32(target_schp->sched_priority);
10687              unlock_user_struct(target_schp, arg2, 0);
10688              return get_errno(sched_setparam(arg1, &schp));
10689          }
10690      case TARGET_NR_sched_getparam:
10691          {
10692              struct sched_param *target_schp;
10693              struct sched_param schp;
10694  
10695              if (arg2 == 0) {
10696                  return -TARGET_EINVAL;
10697              }
10698              ret = get_errno(sched_getparam(arg1, &schp));
10699              if (!is_error(ret)) {
10700                  if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10701                      return -TARGET_EFAULT;
10702                  target_schp->sched_priority = tswap32(schp.sched_priority);
10703                  unlock_user_struct(target_schp, arg2, 1);
10704              }
10705          }
10706          return ret;
10707      case TARGET_NR_sched_setscheduler:
10708          {
10709              struct sched_param *target_schp;
10710              struct sched_param schp;
10711              if (arg3 == 0) {
10712                  return -TARGET_EINVAL;
10713              }
10714              if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10715                  return -TARGET_EFAULT;
10716              schp.sched_priority = tswap32(target_schp->sched_priority);
10717              unlock_user_struct(target_schp, arg3, 0);
10718              return get_errno(sched_setscheduler(arg1, arg2, &schp));
10719          }
10720      case TARGET_NR_sched_getscheduler:
10721          return get_errno(sched_getscheduler(arg1));
10722      case TARGET_NR_sched_yield:
10723          return get_errno(sched_yield());
10724      case TARGET_NR_sched_get_priority_max:
10725          return get_errno(sched_get_priority_max(arg1));
10726      case TARGET_NR_sched_get_priority_min:
10727          return get_errno(sched_get_priority_min(arg1));
10728  #ifdef TARGET_NR_sched_rr_get_interval
10729      case TARGET_NR_sched_rr_get_interval:
10730          {
10731              struct timespec ts;
10732              ret = get_errno(sched_rr_get_interval(arg1, &ts));
10733              if (!is_error(ret)) {
10734                  ret = host_to_target_timespec(arg2, &ts);
10735              }
10736          }
10737          return ret;
10738  #endif
10739  #ifdef TARGET_NR_sched_rr_get_interval_time64
10740      case TARGET_NR_sched_rr_get_interval_time64:
10741          {
10742              struct timespec ts;
10743              ret = get_errno(sched_rr_get_interval(arg1, &ts));
10744              if (!is_error(ret)) {
10745                  ret = host_to_target_timespec64(arg2, &ts);
10746              }
10747          }
10748          return ret;
10749  #endif
10750  #if defined(TARGET_NR_nanosleep)
10751      case TARGET_NR_nanosleep:
10752          {
10753              struct timespec req, rem;
10754              target_to_host_timespec(&req, arg1);
10755              ret = get_errno(safe_nanosleep(&req, &rem));
10756              if (is_error(ret) && arg2) {
10757                  host_to_target_timespec(arg2, &rem);
10758              }
10759          }
10760          return ret;
10761  #endif
10762      case TARGET_NR_prctl:
10763          switch (arg1) {
10764          case PR_GET_PDEATHSIG:
10765          {
10766              int deathsig;
10767              ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10768              if (!is_error(ret) && arg2
10769                  && put_user_s32(deathsig, arg2)) {
10770                  return -TARGET_EFAULT;
10771              }
10772              return ret;
10773          }
10774  #ifdef PR_GET_NAME
10775          case PR_GET_NAME:
10776          {
10777              void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10778              if (!name) {
10779                  return -TARGET_EFAULT;
10780              }
10781              ret = get_errno(prctl(arg1, (unsigned long)name,
10782                                    arg3, arg4, arg5));
10783              unlock_user(name, arg2, 16);
10784              return ret;
10785          }
10786          case PR_SET_NAME:
10787          {
10788              void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10789              if (!name) {
10790                  return -TARGET_EFAULT;
10791              }
10792              ret = get_errno(prctl(arg1, (unsigned long)name,
10793                                    arg3, arg4, arg5));
10794              unlock_user(name, arg2, 0);
10795              return ret;
10796          }
10797  #endif
10798  #ifdef TARGET_MIPS
10799          case TARGET_PR_GET_FP_MODE:
10800          {
10801              CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10802              ret = 0;
10803              if (env->CP0_Status & (1 << CP0St_FR)) {
10804                  ret |= TARGET_PR_FP_MODE_FR;
10805              }
10806              if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10807                  ret |= TARGET_PR_FP_MODE_FRE;
10808              }
10809              return ret;
10810          }
10811          case TARGET_PR_SET_FP_MODE:
10812          {
10813              CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10814              bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10815              bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10816              bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10817              bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10818  
10819              const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10820                                              TARGET_PR_FP_MODE_FRE;
10821  
10822              /* If nothing to change, return right away, successfully.  */
10823              if (old_fr == new_fr && old_fre == new_fre) {
10824                  return 0;
10825              }
10826              /* Check the value is valid */
10827              if (arg2 & ~known_bits) {
10828                  return -TARGET_EOPNOTSUPP;
10829              }
10830              /* Setting FRE without FR is not supported.  */
10831              if (new_fre && !new_fr) {
10832                  return -TARGET_EOPNOTSUPP;
10833              }
10834              if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10835                  /* FR1 is not supported */
10836                  return -TARGET_EOPNOTSUPP;
10837              }
10838              if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10839                  && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10840                  /* cannot set FR=0 */
10841                  return -TARGET_EOPNOTSUPP;
10842              }
10843              if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10844                  /* Cannot set FRE=1 */
10845                  return -TARGET_EOPNOTSUPP;
10846              }
10847  
10848              int i;
10849              fpr_t *fpr = env->active_fpu.fpr;
10850              for (i = 0; i < 32 ; i += 2) {
10851                  if (!old_fr && new_fr) {
10852                      fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10853                  } else if (old_fr && !new_fr) {
10854                      fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10855                  }
10856              }
10857  
10858              if (new_fr) {
10859                  env->CP0_Status |= (1 << CP0St_FR);
10860                  env->hflags |= MIPS_HFLAG_F64;
10861              } else {
10862                  env->CP0_Status &= ~(1 << CP0St_FR);
10863                  env->hflags &= ~MIPS_HFLAG_F64;
10864              }
10865              if (new_fre) {
10866                  env->CP0_Config5 |= (1 << CP0C5_FRE);
10867                  if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10868                      env->hflags |= MIPS_HFLAG_FRE;
10869                  }
10870              } else {
10871                  env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10872                  env->hflags &= ~MIPS_HFLAG_FRE;
10873              }
10874  
10875              return 0;
10876          }
10877  #endif /* MIPS */
10878  #ifdef TARGET_AARCH64
10879          case TARGET_PR_SVE_SET_VL:
10880              /*
10881               * We cannot support either PR_SVE_SET_VL_ONEXEC or
10882               * PR_SVE_VL_INHERIT.  Note the kernel definition
10883               * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10884               * even though the current architectural maximum is VQ=16.
10885               */
10886              ret = -TARGET_EINVAL;
10887              if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10888                  && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10889                  CPUARMState *env = cpu_env;
10890                  ARMCPU *cpu = env_archcpu(env);
10891                  uint32_t vq, old_vq;
10892  
10893                  old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10894                  vq = MAX(arg2 / 16, 1);
10895                  vq = MIN(vq, cpu->sve_max_vq);
10896  
10897                  if (vq < old_vq) {
10898                      aarch64_sve_narrow_vq(env, vq);
10899                  }
10900                  env->vfp.zcr_el[1] = vq - 1;
10901                  arm_rebuild_hflags(env);
10902                  ret = vq * 16;
10903              }
10904              return ret;
10905          case TARGET_PR_SVE_GET_VL:
10906              ret = -TARGET_EINVAL;
10907              {
10908                  ARMCPU *cpu = env_archcpu(cpu_env);
10909                  if (cpu_isar_feature(aa64_sve, cpu)) {
10910                      ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10911                  }
10912              }
10913              return ret;
10914          case TARGET_PR_PAC_RESET_KEYS:
10915              {
10916                  CPUARMState *env = cpu_env;
10917                  ARMCPU *cpu = env_archcpu(env);
10918  
10919                  if (arg3 || arg4 || arg5) {
10920                      return -TARGET_EINVAL;
10921                  }
10922                  if (cpu_isar_feature(aa64_pauth, cpu)) {
10923                      int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10924                                 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10925                                 TARGET_PR_PAC_APGAKEY);
10926                      int ret = 0;
10927                      Error *err = NULL;
10928  
10929                      if (arg2 == 0) {
10930                          arg2 = all;
10931                      } else if (arg2 & ~all) {
10932                          return -TARGET_EINVAL;
10933                      }
10934                      if (arg2 & TARGET_PR_PAC_APIAKEY) {
10935                          ret |= qemu_guest_getrandom(&env->keys.apia,
10936                                                      sizeof(ARMPACKey), &err);
10937                      }
10938                      if (arg2 & TARGET_PR_PAC_APIBKEY) {
10939                          ret |= qemu_guest_getrandom(&env->keys.apib,
10940                                                      sizeof(ARMPACKey), &err);
10941                      }
10942                      if (arg2 & TARGET_PR_PAC_APDAKEY) {
10943                          ret |= qemu_guest_getrandom(&env->keys.apda,
10944                                                      sizeof(ARMPACKey), &err);
10945                      }
10946                      if (arg2 & TARGET_PR_PAC_APDBKEY) {
10947                          ret |= qemu_guest_getrandom(&env->keys.apdb,
10948                                                      sizeof(ARMPACKey), &err);
10949                      }
10950                      if (arg2 & TARGET_PR_PAC_APGAKEY) {
10951                          ret |= qemu_guest_getrandom(&env->keys.apga,
10952                                                      sizeof(ARMPACKey), &err);
10953                      }
10954                      if (ret != 0) {
10955                          /*
10956                           * Some unknown failure in the crypto.  The best
10957                           * we can do is log it and fail the syscall.
10958                           * The real syscall cannot fail this way.
10959                           */
10960                          qemu_log_mask(LOG_UNIMP,
10961                                        "PR_PAC_RESET_KEYS: Crypto failure: %s",
10962                                        error_get_pretty(err));
10963                          error_free(err);
10964                          return -TARGET_EIO;
10965                      }
10966                      return 0;
10967                  }
10968              }
10969              return -TARGET_EINVAL;
10970          case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10971              {
10972                  abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10973                  CPUARMState *env = cpu_env;
10974                  ARMCPU *cpu = env_archcpu(env);
10975  
10976                  if (cpu_isar_feature(aa64_mte, cpu)) {
10977                      valid_mask |= TARGET_PR_MTE_TCF_MASK;
10978                      valid_mask |= TARGET_PR_MTE_TAG_MASK;
10979                  }
10980  
10981                  if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10982                      return -TARGET_EINVAL;
10983                  }
10984                  env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10985  
10986                  if (cpu_isar_feature(aa64_mte, cpu)) {
10987                      switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10988                      case TARGET_PR_MTE_TCF_NONE:
10989                      case TARGET_PR_MTE_TCF_SYNC:
10990                      case TARGET_PR_MTE_TCF_ASYNC:
10991                          break;
10992                      default:
10993                          return -EINVAL;
10994                      }
10995  
10996                      /*
10997                       * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10998                       * Note that the syscall values are consistent with hw.
10999                       */
11000                      env->cp15.sctlr_el[1] =
11001                          deposit64(env->cp15.sctlr_el[1], 38, 2,
11002                                    arg2 >> TARGET_PR_MTE_TCF_SHIFT);
11003  
11004                      /*
11005                       * Write PR_MTE_TAG to GCR_EL1[Exclude].
11006                       * Note that the syscall uses an include mask,
11007                       * and hardware uses an exclude mask -- invert.
11008                       */
11009                      env->cp15.gcr_el1 =
11010                          deposit64(env->cp15.gcr_el1, 0, 16,
11011                                    ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
11012                      arm_rebuild_hflags(env);
11013                  }
11014                  return 0;
11015              }
11016          case TARGET_PR_GET_TAGGED_ADDR_CTRL:
11017              {
11018                  abi_long ret = 0;
11019                  CPUARMState *env = cpu_env;
11020                  ARMCPU *cpu = env_archcpu(env);
11021  
11022                  if (arg2 || arg3 || arg4 || arg5) {
11023                      return -TARGET_EINVAL;
11024                  }
11025                  if (env->tagged_addr_enable) {
11026                      ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
11027                  }
11028                  if (cpu_isar_feature(aa64_mte, cpu)) {
11029                      /* See above. */
11030                      ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
11031                              << TARGET_PR_MTE_TCF_SHIFT);
11032                      ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
11033                                      ~env->cp15.gcr_el1);
11034                  }
11035                  return ret;
11036              }
11037  #endif /* AARCH64 */
11038          case PR_GET_SECCOMP:
11039          case PR_SET_SECCOMP:
11040              /* Disable seccomp to prevent the target disabling syscalls we
11041               * need. */
11042              return -TARGET_EINVAL;
11043          default:
11044              /* Most prctl options have no pointer arguments */
11045              return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
11046          }
11047          break;
11048  #ifdef TARGET_NR_arch_prctl
11049      case TARGET_NR_arch_prctl:
11050          return do_arch_prctl(cpu_env, arg1, arg2);
11051  #endif
11052  #ifdef TARGET_NR_pread64
11053      case TARGET_NR_pread64:
11054          if (regpairs_aligned(cpu_env, num)) {
11055              arg4 = arg5;
11056              arg5 = arg6;
11057          }
11058          if (arg2 == 0 && arg3 == 0) {
11059              /* Special-case NULL buffer and zero length, which should succeed */
11060              p = 0;
11061          } else {
11062              p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11063              if (!p) {
11064                  return -TARGET_EFAULT;
11065              }
11066          }
11067          ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11068          unlock_user(p, arg2, ret);
11069          return ret;
11070      case TARGET_NR_pwrite64:
11071          if (regpairs_aligned(cpu_env, num)) {
11072              arg4 = arg5;
11073              arg5 = arg6;
11074          }
11075          if (arg2 == 0 && arg3 == 0) {
11076              /* Special-case NULL buffer and zero length, which should succeed */
11077              p = 0;
11078          } else {
11079              p = lock_user(VERIFY_READ, arg2, arg3, 1);
11080              if (!p) {
11081                  return -TARGET_EFAULT;
11082              }
11083          }
11084          ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11085          unlock_user(p, arg2, 0);
11086          return ret;
11087  #endif
11088      case TARGET_NR_getcwd:
11089          if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11090              return -TARGET_EFAULT;
11091          ret = get_errno(sys_getcwd1(p, arg2));
11092          unlock_user(p, arg1, ret);
11093          return ret;
11094      case TARGET_NR_capget:
11095      case TARGET_NR_capset:
11096      {
11097          struct target_user_cap_header *target_header;
11098          struct target_user_cap_data *target_data = NULL;
11099          struct __user_cap_header_struct header;
11100          struct __user_cap_data_struct data[2];
11101          struct __user_cap_data_struct *dataptr = NULL;
11102          int i, target_datalen;
11103          int data_items = 1;
11104  
11105          if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11106              return -TARGET_EFAULT;
11107          }
11108          header.version = tswap32(target_header->version);
11109          header.pid = tswap32(target_header->pid);
11110  
11111          if (header.version != _LINUX_CAPABILITY_VERSION) {
11112              /* Version 2 and up takes pointer to two user_data structs */
11113              data_items = 2;
11114          }
11115  
11116          target_datalen = sizeof(*target_data) * data_items;
11117  
11118          if (arg2) {
11119              if (num == TARGET_NR_capget) {
11120                  target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11121              } else {
11122                  target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11123              }
11124              if (!target_data) {
11125                  unlock_user_struct(target_header, arg1, 0);
11126                  return -TARGET_EFAULT;
11127              }
11128  
11129              if (num == TARGET_NR_capset) {
11130                  for (i = 0; i < data_items; i++) {
11131                      data[i].effective = tswap32(target_data[i].effective);
11132                      data[i].permitted = tswap32(target_data[i].permitted);
11133                      data[i].inheritable = tswap32(target_data[i].inheritable);
11134                  }
11135              }
11136  
11137              dataptr = data;
11138          }
11139  
11140          if (num == TARGET_NR_capget) {
11141              ret = get_errno(capget(&header, dataptr));
11142          } else {
11143              ret = get_errno(capset(&header, dataptr));
11144          }
11145  
11146          /* The kernel always updates version for both capget and capset */
11147          target_header->version = tswap32(header.version);
11148          unlock_user_struct(target_header, arg1, 1);
11149  
11150          if (arg2) {
11151              if (num == TARGET_NR_capget) {
11152                  for (i = 0; i < data_items; i++) {
11153                      target_data[i].effective = tswap32(data[i].effective);
11154                      target_data[i].permitted = tswap32(data[i].permitted);
11155                      target_data[i].inheritable = tswap32(data[i].inheritable);
11156                  }
11157                  unlock_user(target_data, arg2, target_datalen);
11158              } else {
11159                  unlock_user(target_data, arg2, 0);
11160              }
11161          }
11162          return ret;
11163      }
11164      case TARGET_NR_sigaltstack:
11165          return do_sigaltstack(arg1, arg2, cpu_env);
11166  
11167  #ifdef CONFIG_SENDFILE
11168  #ifdef TARGET_NR_sendfile
11169      case TARGET_NR_sendfile:
11170      {
11171          off_t *offp = NULL;
11172          off_t off;
11173          if (arg3) {
11174              ret = get_user_sal(off, arg3);
11175              if (is_error(ret)) {
11176                  return ret;
11177              }
11178              offp = &off;
11179          }
11180          ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11181          if (!is_error(ret) && arg3) {
11182              abi_long ret2 = put_user_sal(off, arg3);
11183              if (is_error(ret2)) {
11184                  ret = ret2;
11185              }
11186          }
11187          return ret;
11188      }
11189  #endif
11190  #ifdef TARGET_NR_sendfile64
11191      case TARGET_NR_sendfile64:
11192      {
11193          off_t *offp = NULL;
11194          off_t off;
11195          if (arg3) {
11196              ret = get_user_s64(off, arg3);
11197              if (is_error(ret)) {
11198                  return ret;
11199              }
11200              offp = &off;
11201          }
11202          ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11203          if (!is_error(ret) && arg3) {
11204              abi_long ret2 = put_user_s64(off, arg3);
11205              if (is_error(ret2)) {
11206                  ret = ret2;
11207              }
11208          }
11209          return ret;
11210      }
11211  #endif
11212  #endif
11213  #ifdef TARGET_NR_vfork
11214      case TARGET_NR_vfork:
11215          return get_errno(do_fork(cpu_env,
11216                           CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11217                           0, 0, 0, 0));
11218  #endif
11219  #ifdef TARGET_NR_ugetrlimit
11220      case TARGET_NR_ugetrlimit:
11221      {
11222  	struct rlimit rlim;
11223  	int resource = target_to_host_resource(arg1);
11224  	ret = get_errno(getrlimit(resource, &rlim));
11225  	if (!is_error(ret)) {
11226  	    struct target_rlimit *target_rlim;
11227              if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11228                  return -TARGET_EFAULT;
11229  	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11230  	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11231              unlock_user_struct(target_rlim, arg2, 1);
11232  	}
11233          return ret;
11234      }
11235  #endif
11236  #ifdef TARGET_NR_truncate64
11237      case TARGET_NR_truncate64:
11238          if (!(p = lock_user_string(arg1)))
11239              return -TARGET_EFAULT;
11240  	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11241          unlock_user(p, arg1, 0);
11242          return ret;
11243  #endif
11244  #ifdef TARGET_NR_ftruncate64
11245      case TARGET_NR_ftruncate64:
11246          return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11247  #endif
11248  #ifdef TARGET_NR_stat64
11249      case TARGET_NR_stat64:
11250          if (!(p = lock_user_string(arg1))) {
11251              return -TARGET_EFAULT;
11252          }
11253          ret = get_errno(stat(path(p), &st));
11254          unlock_user(p, arg1, 0);
11255          if (!is_error(ret))
11256              ret = host_to_target_stat64(cpu_env, arg2, &st);
11257          return ret;
11258  #endif
11259  #ifdef TARGET_NR_lstat64
11260      case TARGET_NR_lstat64:
11261          if (!(p = lock_user_string(arg1))) {
11262              return -TARGET_EFAULT;
11263          }
11264          ret = get_errno(lstat(path(p), &st));
11265          unlock_user(p, arg1, 0);
11266          if (!is_error(ret))
11267              ret = host_to_target_stat64(cpu_env, arg2, &st);
11268          return ret;
11269  #endif
11270  #ifdef TARGET_NR_fstat64
11271      case TARGET_NR_fstat64:
11272          ret = get_errno(fstat(arg1, &st));
11273          if (!is_error(ret))
11274              ret = host_to_target_stat64(cpu_env, arg2, &st);
11275          return ret;
11276  #endif
11277  #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11278  #ifdef TARGET_NR_fstatat64
11279      case TARGET_NR_fstatat64:
11280  #endif
11281  #ifdef TARGET_NR_newfstatat
11282      case TARGET_NR_newfstatat:
11283  #endif
11284          if (!(p = lock_user_string(arg2))) {
11285              return -TARGET_EFAULT;
11286          }
11287          ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11288          unlock_user(p, arg2, 0);
11289          if (!is_error(ret))
11290              ret = host_to_target_stat64(cpu_env, arg3, &st);
11291          return ret;
11292  #endif
11293  #if defined(TARGET_NR_statx)
11294      case TARGET_NR_statx:
11295          {
11296              struct target_statx *target_stx;
11297              int dirfd = arg1;
11298              int flags = arg3;
11299  
11300              p = lock_user_string(arg2);
11301              if (p == NULL) {
11302                  return -TARGET_EFAULT;
11303              }
11304  #if defined(__NR_statx)
11305              {
11306                  /*
11307                   * It is assumed that struct statx is architecture independent.
11308                   */
11309                  struct target_statx host_stx;
11310                  int mask = arg4;
11311  
11312                  ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11313                  if (!is_error(ret)) {
11314                      if (host_to_target_statx(&host_stx, arg5) != 0) {
11315                          unlock_user(p, arg2, 0);
11316                          return -TARGET_EFAULT;
11317                      }
11318                  }
11319  
11320                  if (ret != -TARGET_ENOSYS) {
11321                      unlock_user(p, arg2, 0);
11322                      return ret;
11323                  }
11324              }
11325  #endif
11326              ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11327              unlock_user(p, arg2, 0);
11328  
11329              if (!is_error(ret)) {
11330                  if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11331                      return -TARGET_EFAULT;
11332                  }
11333                  memset(target_stx, 0, sizeof(*target_stx));
11334                  __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11335                  __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11336                  __put_user(st.st_ino, &target_stx->stx_ino);
11337                  __put_user(st.st_mode, &target_stx->stx_mode);
11338                  __put_user(st.st_uid, &target_stx->stx_uid);
11339                  __put_user(st.st_gid, &target_stx->stx_gid);
11340                  __put_user(st.st_nlink, &target_stx->stx_nlink);
11341                  __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11342                  __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11343                  __put_user(st.st_size, &target_stx->stx_size);
11344                  __put_user(st.st_blksize, &target_stx->stx_blksize);
11345                  __put_user(st.st_blocks, &target_stx->stx_blocks);
11346                  __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11347                  __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11348                  __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11349                  unlock_user_struct(target_stx, arg5, 1);
11350              }
11351          }
11352          return ret;
11353  #endif
11354  #ifdef TARGET_NR_lchown
11355      case TARGET_NR_lchown:
11356          if (!(p = lock_user_string(arg1)))
11357              return -TARGET_EFAULT;
11358          ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11359          unlock_user(p, arg1, 0);
11360          return ret;
11361  #endif
11362  #ifdef TARGET_NR_getuid
11363      case TARGET_NR_getuid:
11364          return get_errno(high2lowuid(getuid()));
11365  #endif
11366  #ifdef TARGET_NR_getgid
11367      case TARGET_NR_getgid:
11368          return get_errno(high2lowgid(getgid()));
11369  #endif
11370  #ifdef TARGET_NR_geteuid
11371      case TARGET_NR_geteuid:
11372          return get_errno(high2lowuid(geteuid()));
11373  #endif
11374  #ifdef TARGET_NR_getegid
11375      case TARGET_NR_getegid:
11376          return get_errno(high2lowgid(getegid()));
11377  #endif
11378      case TARGET_NR_setreuid:
11379          return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11380      case TARGET_NR_setregid:
11381          return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11382      case TARGET_NR_getgroups:
11383          {
11384              int gidsetsize = arg1;
11385              target_id *target_grouplist;
11386              gid_t *grouplist;
11387              int i;
11388  
11389              grouplist = alloca(gidsetsize * sizeof(gid_t));
11390              ret = get_errno(getgroups(gidsetsize, grouplist));
11391              if (gidsetsize == 0)
11392                  return ret;
11393              if (!is_error(ret)) {
11394                  target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11395                  if (!target_grouplist)
11396                      return -TARGET_EFAULT;
11397                  for(i = 0;i < ret; i++)
11398                      target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11399                  unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11400              }
11401          }
11402          return ret;
11403      case TARGET_NR_setgroups:
11404          {
11405              int gidsetsize = arg1;
11406              target_id *target_grouplist;
11407              gid_t *grouplist = NULL;
11408              int i;
11409              if (gidsetsize) {
11410                  grouplist = alloca(gidsetsize * sizeof(gid_t));
11411                  target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11412                  if (!target_grouplist) {
11413                      return -TARGET_EFAULT;
11414                  }
11415                  for (i = 0; i < gidsetsize; i++) {
11416                      grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11417                  }
11418                  unlock_user(target_grouplist, arg2, 0);
11419              }
11420              return get_errno(setgroups(gidsetsize, grouplist));
11421          }
11422      case TARGET_NR_fchown:
11423          return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11424  #if defined(TARGET_NR_fchownat)
11425      case TARGET_NR_fchownat:
11426          if (!(p = lock_user_string(arg2)))
11427              return -TARGET_EFAULT;
11428          ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11429                                   low2highgid(arg4), arg5));
11430          unlock_user(p, arg2, 0);
11431          return ret;
11432  #endif
11433  #ifdef TARGET_NR_setresuid
11434      case TARGET_NR_setresuid:
11435          return get_errno(sys_setresuid(low2highuid(arg1),
11436                                         low2highuid(arg2),
11437                                         low2highuid(arg3)));
11438  #endif
11439  #ifdef TARGET_NR_getresuid
11440      case TARGET_NR_getresuid:
11441          {
11442              uid_t ruid, euid, suid;
11443              ret = get_errno(getresuid(&ruid, &euid, &suid));
11444              if (!is_error(ret)) {
11445                  if (put_user_id(high2lowuid(ruid), arg1)
11446                      || put_user_id(high2lowuid(euid), arg2)
11447                      || put_user_id(high2lowuid(suid), arg3))
11448                      return -TARGET_EFAULT;
11449              }
11450          }
11451          return ret;
11452  #endif
11453  #ifdef TARGET_NR_getresgid
11454      case TARGET_NR_setresgid:
11455          return get_errno(sys_setresgid(low2highgid(arg1),
11456                                         low2highgid(arg2),
11457                                         low2highgid(arg3)));
11458  #endif
11459  #ifdef TARGET_NR_getresgid
11460      case TARGET_NR_getresgid:
11461          {
11462              gid_t rgid, egid, sgid;
11463              ret = get_errno(getresgid(&rgid, &egid, &sgid));
11464              if (!is_error(ret)) {
11465                  if (put_user_id(high2lowgid(rgid), arg1)
11466                      || put_user_id(high2lowgid(egid), arg2)
11467                      || put_user_id(high2lowgid(sgid), arg3))
11468                      return -TARGET_EFAULT;
11469              }
11470          }
11471          return ret;
11472  #endif
11473  #ifdef TARGET_NR_chown
11474      case TARGET_NR_chown:
11475          if (!(p = lock_user_string(arg1)))
11476              return -TARGET_EFAULT;
11477          ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11478          unlock_user(p, arg1, 0);
11479          return ret;
11480  #endif
11481      case TARGET_NR_setuid:
11482          return get_errno(sys_setuid(low2highuid(arg1)));
11483      case TARGET_NR_setgid:
11484          return get_errno(sys_setgid(low2highgid(arg1)));
11485      case TARGET_NR_setfsuid:
11486          return get_errno(setfsuid(arg1));
11487      case TARGET_NR_setfsgid:
11488          return get_errno(setfsgid(arg1));
11489  
11490  #ifdef TARGET_NR_lchown32
11491      case TARGET_NR_lchown32:
11492          if (!(p = lock_user_string(arg1)))
11493              return -TARGET_EFAULT;
11494          ret = get_errno(lchown(p, arg2, arg3));
11495          unlock_user(p, arg1, 0);
11496          return ret;
11497  #endif
11498  #ifdef TARGET_NR_getuid32
11499      case TARGET_NR_getuid32:
11500          return get_errno(getuid());
11501  #endif
11502  
11503  #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11504     /* Alpha specific */
11505      case TARGET_NR_getxuid:
11506           {
11507              uid_t euid;
11508              euid=geteuid();
11509              ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11510           }
11511          return get_errno(getuid());
11512  #endif
11513  #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11514     /* Alpha specific */
11515      case TARGET_NR_getxgid:
11516           {
11517              uid_t egid;
11518              egid=getegid();
11519              ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11520           }
11521          return get_errno(getgid());
11522  #endif
11523  #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11524      /* Alpha specific */
11525      case TARGET_NR_osf_getsysinfo:
11526          ret = -TARGET_EOPNOTSUPP;
11527          switch (arg1) {
11528            case TARGET_GSI_IEEE_FP_CONTROL:
11529              {
11530                  uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11531                  uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11532  
11533                  swcr &= ~SWCR_STATUS_MASK;
11534                  swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11535  
11536                  if (put_user_u64 (swcr, arg2))
11537                          return -TARGET_EFAULT;
11538                  ret = 0;
11539              }
11540              break;
11541  
11542            /* case GSI_IEEE_STATE_AT_SIGNAL:
11543               -- Not implemented in linux kernel.
11544               case GSI_UACPROC:
11545               -- Retrieves current unaligned access state; not much used.
11546               case GSI_PROC_TYPE:
11547               -- Retrieves implver information; surely not used.
11548               case GSI_GET_HWRPB:
11549               -- Grabs a copy of the HWRPB; surely not used.
11550            */
11551          }
11552          return ret;
11553  #endif
11554  #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11555      /* Alpha specific */
11556      case TARGET_NR_osf_setsysinfo:
11557          ret = -TARGET_EOPNOTSUPP;
11558          switch (arg1) {
11559            case TARGET_SSI_IEEE_FP_CONTROL:
11560              {
11561                  uint64_t swcr, fpcr;
11562  
11563                  if (get_user_u64 (swcr, arg2)) {
11564                      return -TARGET_EFAULT;
11565                  }
11566  
11567                  /*
11568                   * The kernel calls swcr_update_status to update the
11569                   * status bits from the fpcr at every point that it
11570                   * could be queried.  Therefore, we store the status
11571                   * bits only in FPCR.
11572                   */
11573                  ((CPUAlphaState *)cpu_env)->swcr
11574                      = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11575  
11576                  fpcr = cpu_alpha_load_fpcr(cpu_env);
11577                  fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11578                  fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11579                  cpu_alpha_store_fpcr(cpu_env, fpcr);
11580                  ret = 0;
11581              }
11582              break;
11583  
11584            case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11585              {
11586                  uint64_t exc, fpcr, fex;
11587  
11588                  if (get_user_u64(exc, arg2)) {
11589                      return -TARGET_EFAULT;
11590                  }
11591                  exc &= SWCR_STATUS_MASK;
11592                  fpcr = cpu_alpha_load_fpcr(cpu_env);
11593  
11594                  /* Old exceptions are not signaled.  */
11595                  fex = alpha_ieee_fpcr_to_swcr(fpcr);
11596                  fex = exc & ~fex;
11597                  fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11598                  fex &= ((CPUArchState *)cpu_env)->swcr;
11599  
11600                  /* Update the hardware fpcr.  */
11601                  fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11602                  cpu_alpha_store_fpcr(cpu_env, fpcr);
11603  
11604                  if (fex) {
11605                      int si_code = TARGET_FPE_FLTUNK;
11606                      target_siginfo_t info;
11607  
11608                      if (fex & SWCR_TRAP_ENABLE_DNO) {
11609                          si_code = TARGET_FPE_FLTUND;
11610                      }
11611                      if (fex & SWCR_TRAP_ENABLE_INE) {
11612                          si_code = TARGET_FPE_FLTRES;
11613                      }
11614                      if (fex & SWCR_TRAP_ENABLE_UNF) {
11615                          si_code = TARGET_FPE_FLTUND;
11616                      }
11617                      if (fex & SWCR_TRAP_ENABLE_OVF) {
11618                          si_code = TARGET_FPE_FLTOVF;
11619                      }
11620                      if (fex & SWCR_TRAP_ENABLE_DZE) {
11621                          si_code = TARGET_FPE_FLTDIV;
11622                      }
11623                      if (fex & SWCR_TRAP_ENABLE_INV) {
11624                          si_code = TARGET_FPE_FLTINV;
11625                      }
11626  
11627                      info.si_signo = SIGFPE;
11628                      info.si_errno = 0;
11629                      info.si_code = si_code;
11630                      info._sifields._sigfault._addr
11631                          = ((CPUArchState *)cpu_env)->pc;
11632                      queue_signal((CPUArchState *)cpu_env, info.si_signo,
11633                                   QEMU_SI_FAULT, &info);
11634                  }
11635                  ret = 0;
11636              }
11637              break;
11638  
11639            /* case SSI_NVPAIRS:
11640               -- Used with SSIN_UACPROC to enable unaligned accesses.
11641               case SSI_IEEE_STATE_AT_SIGNAL:
11642               case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11643               -- Not implemented in linux kernel
11644            */
11645          }
11646          return ret;
11647  #endif
11648  #ifdef TARGET_NR_osf_sigprocmask
11649      /* Alpha specific.  */
11650      case TARGET_NR_osf_sigprocmask:
11651          {
11652              abi_ulong mask;
11653              int how;
11654              sigset_t set, oldset;
11655  
11656              switch(arg1) {
11657              case TARGET_SIG_BLOCK:
11658                  how = SIG_BLOCK;
11659                  break;
11660              case TARGET_SIG_UNBLOCK:
11661                  how = SIG_UNBLOCK;
11662                  break;
11663              case TARGET_SIG_SETMASK:
11664                  how = SIG_SETMASK;
11665                  break;
11666              default:
11667                  return -TARGET_EINVAL;
11668              }
11669              mask = arg2;
11670              target_to_host_old_sigset(&set, &mask);
11671              ret = do_sigprocmask(how, &set, &oldset);
11672              if (!ret) {
11673                  host_to_target_old_sigset(&mask, &oldset);
11674                  ret = mask;
11675              }
11676          }
11677          return ret;
11678  #endif
11679  
11680  #ifdef TARGET_NR_getgid32
11681      case TARGET_NR_getgid32:
11682          return get_errno(getgid());
11683  #endif
11684  #ifdef TARGET_NR_geteuid32
11685      case TARGET_NR_geteuid32:
11686          return get_errno(geteuid());
11687  #endif
11688  #ifdef TARGET_NR_getegid32
11689      case TARGET_NR_getegid32:
11690          return get_errno(getegid());
11691  #endif
11692  #ifdef TARGET_NR_setreuid32
11693      case TARGET_NR_setreuid32:
11694          return get_errno(setreuid(arg1, arg2));
11695  #endif
11696  #ifdef TARGET_NR_setregid32
11697      case TARGET_NR_setregid32:
11698          return get_errno(setregid(arg1, arg2));
11699  #endif
11700  #ifdef TARGET_NR_getgroups32
11701      case TARGET_NR_getgroups32:
11702          {
11703              int gidsetsize = arg1;
11704              uint32_t *target_grouplist;
11705              gid_t *grouplist;
11706              int i;
11707  
11708              grouplist = alloca(gidsetsize * sizeof(gid_t));
11709              ret = get_errno(getgroups(gidsetsize, grouplist));
11710              if (gidsetsize == 0)
11711                  return ret;
11712              if (!is_error(ret)) {
11713                  target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11714                  if (!target_grouplist) {
11715                      return -TARGET_EFAULT;
11716                  }
11717                  for(i = 0;i < ret; i++)
11718                      target_grouplist[i] = tswap32(grouplist[i]);
11719                  unlock_user(target_grouplist, arg2, gidsetsize * 4);
11720              }
11721          }
11722          return ret;
11723  #endif
11724  #ifdef TARGET_NR_setgroups32
11725      case TARGET_NR_setgroups32:
11726          {
11727              int gidsetsize = arg1;
11728              uint32_t *target_grouplist;
11729              gid_t *grouplist;
11730              int i;
11731  
11732              grouplist = alloca(gidsetsize * sizeof(gid_t));
11733              target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11734              if (!target_grouplist) {
11735                  return -TARGET_EFAULT;
11736              }
11737              for(i = 0;i < gidsetsize; i++)
11738                  grouplist[i] = tswap32(target_grouplist[i]);
11739              unlock_user(target_grouplist, arg2, 0);
11740              return get_errno(setgroups(gidsetsize, grouplist));
11741          }
11742  #endif
11743  #ifdef TARGET_NR_fchown32
11744      case TARGET_NR_fchown32:
11745          return get_errno(fchown(arg1, arg2, arg3));
11746  #endif
11747  #ifdef TARGET_NR_setresuid32
11748      case TARGET_NR_setresuid32:
11749          return get_errno(sys_setresuid(arg1, arg2, arg3));
11750  #endif
11751  #ifdef TARGET_NR_getresuid32
11752      case TARGET_NR_getresuid32:
11753          {
11754              uid_t ruid, euid, suid;
11755              ret = get_errno(getresuid(&ruid, &euid, &suid));
11756              if (!is_error(ret)) {
11757                  if (put_user_u32(ruid, arg1)
11758                      || put_user_u32(euid, arg2)
11759                      || put_user_u32(suid, arg3))
11760                      return -TARGET_EFAULT;
11761              }
11762          }
11763          return ret;
11764  #endif
11765  #ifdef TARGET_NR_setresgid32
11766      case TARGET_NR_setresgid32:
11767          return get_errno(sys_setresgid(arg1, arg2, arg3));
11768  #endif
11769  #ifdef TARGET_NR_getresgid32
11770      case TARGET_NR_getresgid32:
11771          {
11772              gid_t rgid, egid, sgid;
11773              ret = get_errno(getresgid(&rgid, &egid, &sgid));
11774              if (!is_error(ret)) {
11775                  if (put_user_u32(rgid, arg1)
11776                      || put_user_u32(egid, arg2)
11777                      || put_user_u32(sgid, arg3))
11778                      return -TARGET_EFAULT;
11779              }
11780          }
11781          return ret;
11782  #endif
11783  #ifdef TARGET_NR_chown32
11784      case TARGET_NR_chown32:
11785          if (!(p = lock_user_string(arg1)))
11786              return -TARGET_EFAULT;
11787          ret = get_errno(chown(p, arg2, arg3));
11788          unlock_user(p, arg1, 0);
11789          return ret;
11790  #endif
11791  #ifdef TARGET_NR_setuid32
11792      case TARGET_NR_setuid32:
11793          return get_errno(sys_setuid(arg1));
11794  #endif
11795  #ifdef TARGET_NR_setgid32
11796      case TARGET_NR_setgid32:
11797          return get_errno(sys_setgid(arg1));
11798  #endif
11799  #ifdef TARGET_NR_setfsuid32
11800      case TARGET_NR_setfsuid32:
11801          return get_errno(setfsuid(arg1));
11802  #endif
11803  #ifdef TARGET_NR_setfsgid32
11804      case TARGET_NR_setfsgid32:
11805          return get_errno(setfsgid(arg1));
11806  #endif
11807  #ifdef TARGET_NR_mincore
11808      case TARGET_NR_mincore:
11809          {
11810              void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11811              if (!a) {
11812                  return -TARGET_ENOMEM;
11813              }
11814              p = lock_user_string(arg3);
11815              if (!p) {
11816                  ret = -TARGET_EFAULT;
11817              } else {
11818                  ret = get_errno(mincore(a, arg2, p));
11819                  unlock_user(p, arg3, ret);
11820              }
11821              unlock_user(a, arg1, 0);
11822          }
11823          return ret;
11824  #endif
11825  #ifdef TARGET_NR_arm_fadvise64_64
11826      case TARGET_NR_arm_fadvise64_64:
11827          /* arm_fadvise64_64 looks like fadvise64_64 but
11828           * with different argument order: fd, advice, offset, len
11829           * rather than the usual fd, offset, len, advice.
11830           * Note that offset and len are both 64-bit so appear as
11831           * pairs of 32-bit registers.
11832           */
11833          ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11834                              target_offset64(arg5, arg6), arg2);
11835          return -host_to_target_errno(ret);
11836  #endif
11837  
11838  #if TARGET_ABI_BITS == 32
11839  
11840  #ifdef TARGET_NR_fadvise64_64
11841      case TARGET_NR_fadvise64_64:
11842  #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11843          /* 6 args: fd, advice, offset (high, low), len (high, low) */
11844          ret = arg2;
11845          arg2 = arg3;
11846          arg3 = arg4;
11847          arg4 = arg5;
11848          arg5 = arg6;
11849          arg6 = ret;
11850  #else
11851          /* 6 args: fd, offset (high, low), len (high, low), advice */
11852          if (regpairs_aligned(cpu_env, num)) {
11853              /* offset is in (3,4), len in (5,6) and advice in 7 */
11854              arg2 = arg3;
11855              arg3 = arg4;
11856              arg4 = arg5;
11857              arg5 = arg6;
11858              arg6 = arg7;
11859          }
11860  #endif
11861          ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11862                              target_offset64(arg4, arg5), arg6);
11863          return -host_to_target_errno(ret);
11864  #endif
11865  
11866  #ifdef TARGET_NR_fadvise64
11867      case TARGET_NR_fadvise64:
11868          /* 5 args: fd, offset (high, low), len, advice */
11869          if (regpairs_aligned(cpu_env, num)) {
11870              /* offset is in (3,4), len in 5 and advice in 6 */
11871              arg2 = arg3;
11872              arg3 = arg4;
11873              arg4 = arg5;
11874              arg5 = arg6;
11875          }
11876          ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11877          return -host_to_target_errno(ret);
11878  #endif
11879  
11880  #else /* not a 32-bit ABI */
11881  #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11882  #ifdef TARGET_NR_fadvise64_64
11883      case TARGET_NR_fadvise64_64:
11884  #endif
11885  #ifdef TARGET_NR_fadvise64
11886      case TARGET_NR_fadvise64:
11887  #endif
11888  #ifdef TARGET_S390X
11889          switch (arg4) {
11890          case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11891          case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11892          case 6: arg4 = POSIX_FADV_DONTNEED; break;
11893          case 7: arg4 = POSIX_FADV_NOREUSE; break;
11894          default: break;
11895          }
11896  #endif
11897          return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11898  #endif
11899  #endif /* end of 64-bit ABI fadvise handling */
11900  
11901  #ifdef TARGET_NR_madvise
11902      case TARGET_NR_madvise:
11903          /* A straight passthrough may not be safe because qemu sometimes
11904             turns private file-backed mappings into anonymous mappings.
11905             This will break MADV_DONTNEED.
11906             This is a hint, so ignoring and returning success is ok.  */
11907          return 0;
11908  #endif
11909  #ifdef TARGET_NR_fcntl64
11910      case TARGET_NR_fcntl64:
11911      {
11912          int cmd;
11913          struct flock64 fl;
11914          from_flock64_fn *copyfrom = copy_from_user_flock64;
11915          to_flock64_fn *copyto = copy_to_user_flock64;
11916  
11917  #ifdef TARGET_ARM
11918          if (!((CPUARMState *)cpu_env)->eabi) {
11919              copyfrom = copy_from_user_oabi_flock64;
11920              copyto = copy_to_user_oabi_flock64;
11921          }
11922  #endif
11923  
11924          cmd = target_to_host_fcntl_cmd(arg2);
11925          if (cmd == -TARGET_EINVAL) {
11926              return cmd;
11927          }
11928  
11929          switch(arg2) {
11930          case TARGET_F_GETLK64:
11931              ret = copyfrom(&fl, arg3);
11932              if (ret) {
11933                  break;
11934              }
11935              ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11936              if (ret == 0) {
11937                  ret = copyto(arg3, &fl);
11938              }
11939  	    break;
11940  
11941          case TARGET_F_SETLK64:
11942          case TARGET_F_SETLKW64:
11943              ret = copyfrom(&fl, arg3);
11944              if (ret) {
11945                  break;
11946              }
11947              ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11948  	    break;
11949          default:
11950              ret = do_fcntl(arg1, arg2, arg3);
11951              break;
11952          }
11953          return ret;
11954      }
11955  #endif
11956  #ifdef TARGET_NR_cacheflush
11957      case TARGET_NR_cacheflush:
11958          /* self-modifying code is handled automatically, so nothing needed */
11959          return 0;
11960  #endif
11961  #ifdef TARGET_NR_getpagesize
11962      case TARGET_NR_getpagesize:
11963          return TARGET_PAGE_SIZE;
11964  #endif
11965      case TARGET_NR_gettid:
11966          return get_errno(sys_gettid());
11967  #ifdef TARGET_NR_readahead
11968      case TARGET_NR_readahead:
11969  #if TARGET_ABI_BITS == 32
11970          if (regpairs_aligned(cpu_env, num)) {
11971              arg2 = arg3;
11972              arg3 = arg4;
11973              arg4 = arg5;
11974          }
11975          ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11976  #else
11977          ret = get_errno(readahead(arg1, arg2, arg3));
11978  #endif
11979          return ret;
11980  #endif
11981  #ifdef CONFIG_ATTR
11982  #ifdef TARGET_NR_setxattr
11983      case TARGET_NR_listxattr:
11984      case TARGET_NR_llistxattr:
11985      {
11986          void *p, *b = 0;
11987          if (arg2) {
11988              b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11989              if (!b) {
11990                  return -TARGET_EFAULT;
11991              }
11992          }
11993          p = lock_user_string(arg1);
11994          if (p) {
11995              if (num == TARGET_NR_listxattr) {
11996                  ret = get_errno(listxattr(p, b, arg3));
11997              } else {
11998                  ret = get_errno(llistxattr(p, b, arg3));
11999              }
12000          } else {
12001              ret = -TARGET_EFAULT;
12002          }
12003          unlock_user(p, arg1, 0);
12004          unlock_user(b, arg2, arg3);
12005          return ret;
12006      }
12007      case TARGET_NR_flistxattr:
12008      {
12009          void *b = 0;
12010          if (arg2) {
12011              b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12012              if (!b) {
12013                  return -TARGET_EFAULT;
12014              }
12015          }
12016          ret = get_errno(flistxattr(arg1, b, arg3));
12017          unlock_user(b, arg2, arg3);
12018          return ret;
12019      }
12020      case TARGET_NR_setxattr:
12021      case TARGET_NR_lsetxattr:
12022          {
12023              void *p, *n, *v = 0;
12024              if (arg3) {
12025                  v = lock_user(VERIFY_READ, arg3, arg4, 1);
12026                  if (!v) {
12027                      return -TARGET_EFAULT;
12028                  }
12029              }
12030              p = lock_user_string(arg1);
12031              n = lock_user_string(arg2);
12032              if (p && n) {
12033                  if (num == TARGET_NR_setxattr) {
12034                      ret = get_errno(setxattr(p, n, v, arg4, arg5));
12035                  } else {
12036                      ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12037                  }
12038              } else {
12039                  ret = -TARGET_EFAULT;
12040              }
12041              unlock_user(p, arg1, 0);
12042              unlock_user(n, arg2, 0);
12043              unlock_user(v, arg3, 0);
12044          }
12045          return ret;
12046      case TARGET_NR_fsetxattr:
12047          {
12048              void *n, *v = 0;
12049              if (arg3) {
12050                  v = lock_user(VERIFY_READ, arg3, arg4, 1);
12051                  if (!v) {
12052                      return -TARGET_EFAULT;
12053                  }
12054              }
12055              n = lock_user_string(arg2);
12056              if (n) {
12057                  ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12058              } else {
12059                  ret = -TARGET_EFAULT;
12060              }
12061              unlock_user(n, arg2, 0);
12062              unlock_user(v, arg3, 0);
12063          }
12064          return ret;
12065      case TARGET_NR_getxattr:
12066      case TARGET_NR_lgetxattr:
12067          {
12068              void *p, *n, *v = 0;
12069              if (arg3) {
12070                  v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12071                  if (!v) {
12072                      return -TARGET_EFAULT;
12073                  }
12074              }
12075              p = lock_user_string(arg1);
12076              n = lock_user_string(arg2);
12077              if (p && n) {
12078                  if (num == TARGET_NR_getxattr) {
12079                      ret = get_errno(getxattr(p, n, v, arg4));
12080                  } else {
12081                      ret = get_errno(lgetxattr(p, n, v, arg4));
12082                  }
12083              } else {
12084                  ret = -TARGET_EFAULT;
12085              }
12086              unlock_user(p, arg1, 0);
12087              unlock_user(n, arg2, 0);
12088              unlock_user(v, arg3, arg4);
12089          }
12090          return ret;
12091      case TARGET_NR_fgetxattr:
12092          {
12093              void *n, *v = 0;
12094              if (arg3) {
12095                  v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12096                  if (!v) {
12097                      return -TARGET_EFAULT;
12098                  }
12099              }
12100              n = lock_user_string(arg2);
12101              if (n) {
12102                  ret = get_errno(fgetxattr(arg1, n, v, arg4));
12103              } else {
12104                  ret = -TARGET_EFAULT;
12105              }
12106              unlock_user(n, arg2, 0);
12107              unlock_user(v, arg3, arg4);
12108          }
12109          return ret;
12110      case TARGET_NR_removexattr:
12111      case TARGET_NR_lremovexattr:
12112          {
12113              void *p, *n;
12114              p = lock_user_string(arg1);
12115              n = lock_user_string(arg2);
12116              if (p && n) {
12117                  if (num == TARGET_NR_removexattr) {
12118                      ret = get_errno(removexattr(p, n));
12119                  } else {
12120                      ret = get_errno(lremovexattr(p, n));
12121                  }
12122              } else {
12123                  ret = -TARGET_EFAULT;
12124              }
12125              unlock_user(p, arg1, 0);
12126              unlock_user(n, arg2, 0);
12127          }
12128          return ret;
12129      case TARGET_NR_fremovexattr:
12130          {
12131              void *n;
12132              n = lock_user_string(arg2);
12133              if (n) {
12134                  ret = get_errno(fremovexattr(arg1, n));
12135              } else {
12136                  ret = -TARGET_EFAULT;
12137              }
12138              unlock_user(n, arg2, 0);
12139          }
12140          return ret;
12141  #endif
12142  #endif /* CONFIG_ATTR */
12143  #ifdef TARGET_NR_set_thread_area
12144      case TARGET_NR_set_thread_area:
12145  #if defined(TARGET_MIPS)
12146        ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12147        return 0;
12148  #elif defined(TARGET_CRIS)
12149        if (arg1 & 0xff)
12150            ret = -TARGET_EINVAL;
12151        else {
12152            ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12153            ret = 0;
12154        }
12155        return ret;
12156  #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12157        return do_set_thread_area(cpu_env, arg1);
12158  #elif defined(TARGET_M68K)
12159        {
12160            TaskState *ts = cpu->opaque;
12161            ts->tp_value = arg1;
12162            return 0;
12163        }
12164  #else
12165        return -TARGET_ENOSYS;
12166  #endif
12167  #endif
12168  #ifdef TARGET_NR_get_thread_area
12169      case TARGET_NR_get_thread_area:
12170  #if defined(TARGET_I386) && defined(TARGET_ABI32)
12171          return do_get_thread_area(cpu_env, arg1);
12172  #elif defined(TARGET_M68K)
12173          {
12174              TaskState *ts = cpu->opaque;
12175              return ts->tp_value;
12176          }
12177  #else
12178          return -TARGET_ENOSYS;
12179  #endif
12180  #endif
12181  #ifdef TARGET_NR_getdomainname
12182      case TARGET_NR_getdomainname:
12183          return -TARGET_ENOSYS;
12184  #endif
12185  
12186  #ifdef TARGET_NR_clock_settime
12187      case TARGET_NR_clock_settime:
12188      {
12189          struct timespec ts;
12190  
12191          ret = target_to_host_timespec(&ts, arg2);
12192          if (!is_error(ret)) {
12193              ret = get_errno(clock_settime(arg1, &ts));
12194          }
12195          return ret;
12196      }
12197  #endif
12198  #ifdef TARGET_NR_clock_settime64
12199      case TARGET_NR_clock_settime64:
12200      {
12201          struct timespec ts;
12202  
12203          ret = target_to_host_timespec64(&ts, arg2);
12204          if (!is_error(ret)) {
12205              ret = get_errno(clock_settime(arg1, &ts));
12206          }
12207          return ret;
12208      }
12209  #endif
12210  #ifdef TARGET_NR_clock_gettime
12211      case TARGET_NR_clock_gettime:
12212      {
12213          struct timespec ts;
12214          ret = get_errno(clock_gettime(arg1, &ts));
12215          if (!is_error(ret)) {
12216              ret = host_to_target_timespec(arg2, &ts);
12217          }
12218          return ret;
12219      }
12220  #endif
12221  #ifdef TARGET_NR_clock_gettime64
12222      case TARGET_NR_clock_gettime64:
12223      {
12224          struct timespec ts;
12225          ret = get_errno(clock_gettime(arg1, &ts));
12226          if (!is_error(ret)) {
12227              ret = host_to_target_timespec64(arg2, &ts);
12228          }
12229          return ret;
12230      }
12231  #endif
12232  #ifdef TARGET_NR_clock_getres
12233      case TARGET_NR_clock_getres:
12234      {
12235          struct timespec ts;
12236          ret = get_errno(clock_getres(arg1, &ts));
12237          if (!is_error(ret)) {
12238              host_to_target_timespec(arg2, &ts);
12239          }
12240          return ret;
12241      }
12242  #endif
12243  #ifdef TARGET_NR_clock_getres_time64
12244      case TARGET_NR_clock_getres_time64:
12245      {
12246          struct timespec ts;
12247          ret = get_errno(clock_getres(arg1, &ts));
12248          if (!is_error(ret)) {
12249              host_to_target_timespec64(arg2, &ts);
12250          }
12251          return ret;
12252      }
12253  #endif
12254  #ifdef TARGET_NR_clock_nanosleep
12255      case TARGET_NR_clock_nanosleep:
12256      {
12257          struct timespec ts;
12258          if (target_to_host_timespec(&ts, arg3)) {
12259              return -TARGET_EFAULT;
12260          }
12261          ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12262                                               &ts, arg4 ? &ts : NULL));
12263          /*
12264           * if the call is interrupted by a signal handler, it fails
12265           * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12266           * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12267           */
12268          if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12269              host_to_target_timespec(arg4, &ts)) {
12270                return -TARGET_EFAULT;
12271          }
12272  
12273          return ret;
12274      }
12275  #endif
12276  #ifdef TARGET_NR_clock_nanosleep_time64
12277      case TARGET_NR_clock_nanosleep_time64:
12278      {
12279          struct timespec ts;
12280  
12281          if (target_to_host_timespec64(&ts, arg3)) {
12282              return -TARGET_EFAULT;
12283          }
12284  
12285          ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12286                                               &ts, arg4 ? &ts : NULL));
12287  
12288          if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12289              host_to_target_timespec64(arg4, &ts)) {
12290              return -TARGET_EFAULT;
12291          }
12292          return ret;
12293      }
12294  #endif
12295  
12296  #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12297      case TARGET_NR_set_tid_address:
12298          return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12299  #endif
12300  
12301      case TARGET_NR_tkill:
12302          return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12303  
12304      case TARGET_NR_tgkill:
12305          return get_errno(safe_tgkill((int)arg1, (int)arg2,
12306                           target_to_host_signal(arg3)));
12307  
12308  #ifdef TARGET_NR_set_robust_list
12309      case TARGET_NR_set_robust_list:
12310      case TARGET_NR_get_robust_list:
12311          /* The ABI for supporting robust futexes has userspace pass
12312           * the kernel a pointer to a linked list which is updated by
12313           * userspace after the syscall; the list is walked by the kernel
12314           * when the thread exits. Since the linked list in QEMU guest
12315           * memory isn't a valid linked list for the host and we have
12316           * no way to reliably intercept the thread-death event, we can't
12317           * support these. Silently return ENOSYS so that guest userspace
12318           * falls back to a non-robust futex implementation (which should
12319           * be OK except in the corner case of the guest crashing while
12320           * holding a mutex that is shared with another process via
12321           * shared memory).
12322           */
12323          return -TARGET_ENOSYS;
12324  #endif
12325  
12326  #if defined(TARGET_NR_utimensat)
12327      case TARGET_NR_utimensat:
12328          {
12329              struct timespec *tsp, ts[2];
12330              if (!arg3) {
12331                  tsp = NULL;
12332              } else {
12333                  if (target_to_host_timespec(ts, arg3)) {
12334                      return -TARGET_EFAULT;
12335                  }
12336                  if (target_to_host_timespec(ts + 1, arg3 +
12337                                              sizeof(struct target_timespec))) {
12338                      return -TARGET_EFAULT;
12339                  }
12340                  tsp = ts;
12341              }
12342              if (!arg2)
12343                  ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12344              else {
12345                  if (!(p = lock_user_string(arg2))) {
12346                      return -TARGET_EFAULT;
12347                  }
12348                  ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12349                  unlock_user(p, arg2, 0);
12350              }
12351          }
12352          return ret;
12353  #endif
12354  #ifdef TARGET_NR_utimensat_time64
12355      case TARGET_NR_utimensat_time64:
12356          {
12357              struct timespec *tsp, ts[2];
12358              if (!arg3) {
12359                  tsp = NULL;
12360              } else {
12361                  if (target_to_host_timespec64(ts, arg3)) {
12362                      return -TARGET_EFAULT;
12363                  }
12364                  if (target_to_host_timespec64(ts + 1, arg3 +
12365                                       sizeof(struct target__kernel_timespec))) {
12366                      return -TARGET_EFAULT;
12367                  }
12368                  tsp = ts;
12369              }
12370              if (!arg2)
12371                  ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12372              else {
12373                  p = lock_user_string(arg2);
12374                  if (!p) {
12375                      return -TARGET_EFAULT;
12376                  }
12377                  ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12378                  unlock_user(p, arg2, 0);
12379              }
12380          }
12381          return ret;
12382  #endif
12383  #ifdef TARGET_NR_futex
12384      case TARGET_NR_futex:
12385          return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12386  #endif
12387  #ifdef TARGET_NR_futex_time64
12388      case TARGET_NR_futex_time64:
12389          return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12390  #endif
12391  #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12392      case TARGET_NR_inotify_init:
12393          ret = get_errno(sys_inotify_init());
12394          if (ret >= 0) {
12395              fd_trans_register(ret, &target_inotify_trans);
12396          }
12397          return ret;
12398  #endif
12399  #ifdef CONFIG_INOTIFY1
12400  #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12401      case TARGET_NR_inotify_init1:
12402          ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12403                                            fcntl_flags_tbl)));
12404          if (ret >= 0) {
12405              fd_trans_register(ret, &target_inotify_trans);
12406          }
12407          return ret;
12408  #endif
12409  #endif
12410  #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12411      case TARGET_NR_inotify_add_watch:
12412          p = lock_user_string(arg2);
12413          ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12414          unlock_user(p, arg2, 0);
12415          return ret;
12416  #endif
12417  #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12418      case TARGET_NR_inotify_rm_watch:
12419          return get_errno(sys_inotify_rm_watch(arg1, arg2));
12420  #endif
12421  
12422  #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12423      case TARGET_NR_mq_open:
12424          {
12425              struct mq_attr posix_mq_attr;
12426              struct mq_attr *pposix_mq_attr;
12427              int host_flags;
12428  
12429              host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12430              pposix_mq_attr = NULL;
12431              if (arg4) {
12432                  if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12433                      return -TARGET_EFAULT;
12434                  }
12435                  pposix_mq_attr = &posix_mq_attr;
12436              }
12437              p = lock_user_string(arg1 - 1);
12438              if (!p) {
12439                  return -TARGET_EFAULT;
12440              }
12441              ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12442              unlock_user (p, arg1, 0);
12443          }
12444          return ret;
12445  
12446      case TARGET_NR_mq_unlink:
12447          p = lock_user_string(arg1 - 1);
12448          if (!p) {
12449              return -TARGET_EFAULT;
12450          }
12451          ret = get_errno(mq_unlink(p));
12452          unlock_user (p, arg1, 0);
12453          return ret;
12454  
12455  #ifdef TARGET_NR_mq_timedsend
12456      case TARGET_NR_mq_timedsend:
12457          {
12458              struct timespec ts;
12459  
12460              p = lock_user (VERIFY_READ, arg2, arg3, 1);
12461              if (arg5 != 0) {
12462                  if (target_to_host_timespec(&ts, arg5)) {
12463                      return -TARGET_EFAULT;
12464                  }
12465                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12466                  if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12467                      return -TARGET_EFAULT;
12468                  }
12469              } else {
12470                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12471              }
12472              unlock_user (p, arg2, arg3);
12473          }
12474          return ret;
12475  #endif
12476  #ifdef TARGET_NR_mq_timedsend_time64
12477      case TARGET_NR_mq_timedsend_time64:
12478          {
12479              struct timespec ts;
12480  
12481              p = lock_user(VERIFY_READ, arg2, arg3, 1);
12482              if (arg5 != 0) {
12483                  if (target_to_host_timespec64(&ts, arg5)) {
12484                      return -TARGET_EFAULT;
12485                  }
12486                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12487                  if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12488                      return -TARGET_EFAULT;
12489                  }
12490              } else {
12491                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12492              }
12493              unlock_user(p, arg2, arg3);
12494          }
12495          return ret;
12496  #endif
12497  
12498  #ifdef TARGET_NR_mq_timedreceive
12499      case TARGET_NR_mq_timedreceive:
12500          {
12501              struct timespec ts;
12502              unsigned int prio;
12503  
12504              p = lock_user (VERIFY_READ, arg2, arg3, 1);
12505              if (arg5 != 0) {
12506                  if (target_to_host_timespec(&ts, arg5)) {
12507                      return -TARGET_EFAULT;
12508                  }
12509                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12510                                                       &prio, &ts));
12511                  if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12512                      return -TARGET_EFAULT;
12513                  }
12514              } else {
12515                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12516                                                       &prio, NULL));
12517              }
12518              unlock_user (p, arg2, arg3);
12519              if (arg4 != 0)
12520                  put_user_u32(prio, arg4);
12521          }
12522          return ret;
12523  #endif
12524  #ifdef TARGET_NR_mq_timedreceive_time64
12525      case TARGET_NR_mq_timedreceive_time64:
12526          {
12527              struct timespec ts;
12528              unsigned int prio;
12529  
12530              p = lock_user(VERIFY_READ, arg2, arg3, 1);
12531              if (arg5 != 0) {
12532                  if (target_to_host_timespec64(&ts, arg5)) {
12533                      return -TARGET_EFAULT;
12534                  }
12535                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12536                                                       &prio, &ts));
12537                  if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12538                      return -TARGET_EFAULT;
12539                  }
12540              } else {
12541                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12542                                                       &prio, NULL));
12543              }
12544              unlock_user(p, arg2, arg3);
12545              if (arg4 != 0) {
12546                  put_user_u32(prio, arg4);
12547              }
12548          }
12549          return ret;
12550  #endif
12551  
12552      /* Not implemented for now... */
12553  /*     case TARGET_NR_mq_notify: */
12554  /*         break; */
12555  
12556      case TARGET_NR_mq_getsetattr:
12557          {
12558              struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12559              ret = 0;
12560              if (arg2 != 0) {
12561                  copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12562                  ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12563                                             &posix_mq_attr_out));
12564              } else if (arg3 != 0) {
12565                  ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12566              }
12567              if (ret == 0 && arg3 != 0) {
12568                  copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12569              }
12570          }
12571          return ret;
12572  #endif
12573  
12574  #ifdef CONFIG_SPLICE
12575  #ifdef TARGET_NR_tee
12576      case TARGET_NR_tee:
12577          {
12578              ret = get_errno(tee(arg1,arg2,arg3,arg4));
12579          }
12580          return ret;
12581  #endif
12582  #ifdef TARGET_NR_splice
12583      case TARGET_NR_splice:
12584          {
12585              loff_t loff_in, loff_out;
12586              loff_t *ploff_in = NULL, *ploff_out = NULL;
12587              if (arg2) {
12588                  if (get_user_u64(loff_in, arg2)) {
12589                      return -TARGET_EFAULT;
12590                  }
12591                  ploff_in = &loff_in;
12592              }
12593              if (arg4) {
12594                  if (get_user_u64(loff_out, arg4)) {
12595                      return -TARGET_EFAULT;
12596                  }
12597                  ploff_out = &loff_out;
12598              }
12599              ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12600              if (arg2) {
12601                  if (put_user_u64(loff_in, arg2)) {
12602                      return -TARGET_EFAULT;
12603                  }
12604              }
12605              if (arg4) {
12606                  if (put_user_u64(loff_out, arg4)) {
12607                      return -TARGET_EFAULT;
12608                  }
12609              }
12610          }
12611          return ret;
12612  #endif
12613  #ifdef TARGET_NR_vmsplice
12614  	case TARGET_NR_vmsplice:
12615          {
12616              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12617              if (vec != NULL) {
12618                  ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12619                  unlock_iovec(vec, arg2, arg3, 0);
12620              } else {
12621                  ret = -host_to_target_errno(errno);
12622              }
12623          }
12624          return ret;
12625  #endif
12626  #endif /* CONFIG_SPLICE */
12627  #ifdef CONFIG_EVENTFD
12628  #if defined(TARGET_NR_eventfd)
12629      case TARGET_NR_eventfd:
12630          ret = get_errno(eventfd(arg1, 0));
12631          if (ret >= 0) {
12632              fd_trans_register(ret, &target_eventfd_trans);
12633          }
12634          return ret;
12635  #endif
12636  #if defined(TARGET_NR_eventfd2)
12637      case TARGET_NR_eventfd2:
12638      {
12639          int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12640          if (arg2 & TARGET_O_NONBLOCK) {
12641              host_flags |= O_NONBLOCK;
12642          }
12643          if (arg2 & TARGET_O_CLOEXEC) {
12644              host_flags |= O_CLOEXEC;
12645          }
12646          ret = get_errno(eventfd(arg1, host_flags));
12647          if (ret >= 0) {
12648              fd_trans_register(ret, &target_eventfd_trans);
12649          }
12650          return ret;
12651      }
12652  #endif
12653  #endif /* CONFIG_EVENTFD  */
12654  #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12655      case TARGET_NR_fallocate:
12656  #if TARGET_ABI_BITS == 32
12657          ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12658                                    target_offset64(arg5, arg6)));
12659  #else
12660          ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12661  #endif
12662          return ret;
12663  #endif
12664  #if defined(CONFIG_SYNC_FILE_RANGE)
12665  #if defined(TARGET_NR_sync_file_range)
12666      case TARGET_NR_sync_file_range:
12667  #if TARGET_ABI_BITS == 32
12668  #if defined(TARGET_MIPS)
12669          ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12670                                          target_offset64(arg5, arg6), arg7));
12671  #else
12672          ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12673                                          target_offset64(arg4, arg5), arg6));
12674  #endif /* !TARGET_MIPS */
12675  #else
12676          ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12677  #endif
12678          return ret;
12679  #endif
12680  #if defined(TARGET_NR_sync_file_range2) || \
12681      defined(TARGET_NR_arm_sync_file_range)
12682  #if defined(TARGET_NR_sync_file_range2)
12683      case TARGET_NR_sync_file_range2:
12684  #endif
12685  #if defined(TARGET_NR_arm_sync_file_range)
12686      case TARGET_NR_arm_sync_file_range:
12687  #endif
12688          /* This is like sync_file_range but the arguments are reordered */
12689  #if TARGET_ABI_BITS == 32
12690          ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12691                                          target_offset64(arg5, arg6), arg2));
12692  #else
12693          ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12694  #endif
12695          return ret;
12696  #endif
12697  #endif
12698  #if defined(TARGET_NR_signalfd4)
12699      case TARGET_NR_signalfd4:
12700          return do_signalfd4(arg1, arg2, arg4);
12701  #endif
12702  #if defined(TARGET_NR_signalfd)
12703      case TARGET_NR_signalfd:
12704          return do_signalfd4(arg1, arg2, 0);
12705  #endif
12706  #if defined(CONFIG_EPOLL)
12707  #if defined(TARGET_NR_epoll_create)
12708      case TARGET_NR_epoll_create:
12709          return get_errno(epoll_create(arg1));
12710  #endif
12711  #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12712      case TARGET_NR_epoll_create1:
12713          return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12714  #endif
12715  #if defined(TARGET_NR_epoll_ctl)
12716      case TARGET_NR_epoll_ctl:
12717      {
12718          struct epoll_event ep;
12719          struct epoll_event *epp = 0;
12720          if (arg4) {
12721              if (arg2 != EPOLL_CTL_DEL) {
12722                  struct target_epoll_event *target_ep;
12723                  if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12724                      return -TARGET_EFAULT;
12725                  }
12726                  ep.events = tswap32(target_ep->events);
12727                  /*
12728                   * The epoll_data_t union is just opaque data to the kernel,
12729                   * so we transfer all 64 bits across and need not worry what
12730                   * actual data type it is.
12731                   */
12732                  ep.data.u64 = tswap64(target_ep->data.u64);
12733                  unlock_user_struct(target_ep, arg4, 0);
12734              }
12735              /*
12736               * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12737               * non-null pointer, even though this argument is ignored.
12738               *
12739               */
12740              epp = &ep;
12741          }
12742          return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12743      }
12744  #endif
12745  
12746  #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12747  #if defined(TARGET_NR_epoll_wait)
12748      case TARGET_NR_epoll_wait:
12749  #endif
12750  #if defined(TARGET_NR_epoll_pwait)
12751      case TARGET_NR_epoll_pwait:
12752  #endif
12753      {
12754          struct target_epoll_event *target_ep;
12755          struct epoll_event *ep;
12756          int epfd = arg1;
12757          int maxevents = arg3;
12758          int timeout = arg4;
12759  
12760          if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12761              return -TARGET_EINVAL;
12762          }
12763  
12764          target_ep = lock_user(VERIFY_WRITE, arg2,
12765                                maxevents * sizeof(struct target_epoll_event), 1);
12766          if (!target_ep) {
12767              return -TARGET_EFAULT;
12768          }
12769  
12770          ep = g_try_new(struct epoll_event, maxevents);
12771          if (!ep) {
12772              unlock_user(target_ep, arg2, 0);
12773              return -TARGET_ENOMEM;
12774          }
12775  
12776          switch (num) {
12777  #if defined(TARGET_NR_epoll_pwait)
12778          case TARGET_NR_epoll_pwait:
12779          {
12780              target_sigset_t *target_set;
12781              sigset_t _set, *set = &_set;
12782  
12783              if (arg5) {
12784                  if (arg6 != sizeof(target_sigset_t)) {
12785                      ret = -TARGET_EINVAL;
12786                      break;
12787                  }
12788  
12789                  target_set = lock_user(VERIFY_READ, arg5,
12790                                         sizeof(target_sigset_t), 1);
12791                  if (!target_set) {
12792                      ret = -TARGET_EFAULT;
12793                      break;
12794                  }
12795                  target_to_host_sigset(set, target_set);
12796                  unlock_user(target_set, arg5, 0);
12797              } else {
12798                  set = NULL;
12799              }
12800  
12801              ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12802                                               set, SIGSET_T_SIZE));
12803              break;
12804          }
12805  #endif
12806  #if defined(TARGET_NR_epoll_wait)
12807          case TARGET_NR_epoll_wait:
12808              ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12809                                               NULL, 0));
12810              break;
12811  #endif
12812          default:
12813              ret = -TARGET_ENOSYS;
12814          }
12815          if (!is_error(ret)) {
12816              int i;
12817              for (i = 0; i < ret; i++) {
12818                  target_ep[i].events = tswap32(ep[i].events);
12819                  target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12820              }
12821              unlock_user(target_ep, arg2,
12822                          ret * sizeof(struct target_epoll_event));
12823          } else {
12824              unlock_user(target_ep, arg2, 0);
12825          }
12826          g_free(ep);
12827          return ret;
12828      }
12829  #endif
12830  #endif
12831  #ifdef TARGET_NR_prlimit64
12832      case TARGET_NR_prlimit64:
12833      {
12834          /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12835          struct target_rlimit64 *target_rnew, *target_rold;
12836          struct host_rlimit64 rnew, rold, *rnewp = 0;
12837          int resource = target_to_host_resource(arg2);
12838  
12839          if (arg3 && (resource != RLIMIT_AS &&
12840                       resource != RLIMIT_DATA &&
12841                       resource != RLIMIT_STACK)) {
12842              if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12843                  return -TARGET_EFAULT;
12844              }
12845              rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12846              rnew.rlim_max = tswap64(target_rnew->rlim_max);
12847              unlock_user_struct(target_rnew, arg3, 0);
12848              rnewp = &rnew;
12849          }
12850  
12851          ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12852          if (!is_error(ret) && arg4) {
12853              if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12854                  return -TARGET_EFAULT;
12855              }
12856              target_rold->rlim_cur = tswap64(rold.rlim_cur);
12857              target_rold->rlim_max = tswap64(rold.rlim_max);
12858              unlock_user_struct(target_rold, arg4, 1);
12859          }
12860          return ret;
12861      }
12862  #endif
12863  #ifdef TARGET_NR_gethostname
12864      case TARGET_NR_gethostname:
12865      {
12866          char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12867          if (name) {
12868              ret = get_errno(gethostname(name, arg2));
12869              unlock_user(name, arg1, arg2);
12870          } else {
12871              ret = -TARGET_EFAULT;
12872          }
12873          return ret;
12874      }
12875  #endif
12876  #ifdef TARGET_NR_atomic_cmpxchg_32
12877      case TARGET_NR_atomic_cmpxchg_32:
12878      {
12879          /* should use start_exclusive from main.c */
12880          abi_ulong mem_value;
12881          if (get_user_u32(mem_value, arg6)) {
12882              target_siginfo_t info;
12883              info.si_signo = SIGSEGV;
12884              info.si_errno = 0;
12885              info.si_code = TARGET_SEGV_MAPERR;
12886              info._sifields._sigfault._addr = arg6;
12887              queue_signal((CPUArchState *)cpu_env, info.si_signo,
12888                           QEMU_SI_FAULT, &info);
12889              ret = 0xdeadbeef;
12890  
12891          }
12892          if (mem_value == arg2)
12893              put_user_u32(arg1, arg6);
12894          return mem_value;
12895      }
12896  #endif
12897  #ifdef TARGET_NR_atomic_barrier
12898      case TARGET_NR_atomic_barrier:
12899          /* Like the kernel implementation and the
12900             qemu arm barrier, no-op this? */
12901          return 0;
12902  #endif
12903  
12904  #ifdef TARGET_NR_timer_create
12905      case TARGET_NR_timer_create:
12906      {
12907          /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12908  
12909          struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12910  
12911          int clkid = arg1;
12912          int timer_index = next_free_host_timer();
12913  
12914          if (timer_index < 0) {
12915              ret = -TARGET_EAGAIN;
12916          } else {
12917              timer_t *phtimer = g_posix_timers  + timer_index;
12918  
12919              if (arg2) {
12920                  phost_sevp = &host_sevp;
12921                  ret = target_to_host_sigevent(phost_sevp, arg2);
12922                  if (ret != 0) {
12923                      return ret;
12924                  }
12925              }
12926  
12927              ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12928              if (ret) {
12929                  phtimer = NULL;
12930              } else {
12931                  if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12932                      return -TARGET_EFAULT;
12933                  }
12934              }
12935          }
12936          return ret;
12937      }
12938  #endif
12939  
12940  #ifdef TARGET_NR_timer_settime
12941      case TARGET_NR_timer_settime:
12942      {
12943          /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12944           * struct itimerspec * old_value */
12945          target_timer_t timerid = get_timer_id(arg1);
12946  
12947          if (timerid < 0) {
12948              ret = timerid;
12949          } else if (arg3 == 0) {
12950              ret = -TARGET_EINVAL;
12951          } else {
12952              timer_t htimer = g_posix_timers[timerid];
12953              struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12954  
12955              if (target_to_host_itimerspec(&hspec_new, arg3)) {
12956                  return -TARGET_EFAULT;
12957              }
12958              ret = get_errno(
12959                            timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12960              if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12961                  return -TARGET_EFAULT;
12962              }
12963          }
12964          return ret;
12965      }
12966  #endif
12967  
12968  #ifdef TARGET_NR_timer_settime64
12969      case TARGET_NR_timer_settime64:
12970      {
12971          target_timer_t timerid = get_timer_id(arg1);
12972  
12973          if (timerid < 0) {
12974              ret = timerid;
12975          } else if (arg3 == 0) {
12976              ret = -TARGET_EINVAL;
12977          } else {
12978              timer_t htimer = g_posix_timers[timerid];
12979              struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12980  
12981              if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12982                  return -TARGET_EFAULT;
12983              }
12984              ret = get_errno(
12985                            timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12986              if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12987                  return -TARGET_EFAULT;
12988              }
12989          }
12990          return ret;
12991      }
12992  #endif
12993  
12994  #ifdef TARGET_NR_timer_gettime
12995      case TARGET_NR_timer_gettime:
12996      {
12997          /* args: timer_t timerid, struct itimerspec *curr_value */
12998          target_timer_t timerid = get_timer_id(arg1);
12999  
13000          if (timerid < 0) {
13001              ret = timerid;
13002          } else if (!arg2) {
13003              ret = -TARGET_EFAULT;
13004          } else {
13005              timer_t htimer = g_posix_timers[timerid];
13006              struct itimerspec hspec;
13007              ret = get_errno(timer_gettime(htimer, &hspec));
13008  
13009              if (host_to_target_itimerspec(arg2, &hspec)) {
13010                  ret = -TARGET_EFAULT;
13011              }
13012          }
13013          return ret;
13014      }
13015  #endif
13016  
13017  #ifdef TARGET_NR_timer_gettime64
13018      case TARGET_NR_timer_gettime64:
13019      {
13020          /* args: timer_t timerid, struct itimerspec64 *curr_value */
13021          target_timer_t timerid = get_timer_id(arg1);
13022  
13023          if (timerid < 0) {
13024              ret = timerid;
13025          } else if (!arg2) {
13026              ret = -TARGET_EFAULT;
13027          } else {
13028              timer_t htimer = g_posix_timers[timerid];
13029              struct itimerspec hspec;
13030              ret = get_errno(timer_gettime(htimer, &hspec));
13031  
13032              if (host_to_target_itimerspec64(arg2, &hspec)) {
13033                  ret = -TARGET_EFAULT;
13034              }
13035          }
13036          return ret;
13037      }
13038  #endif
13039  
13040  #ifdef TARGET_NR_timer_getoverrun
13041      case TARGET_NR_timer_getoverrun:
13042      {
13043          /* args: timer_t timerid */
13044          target_timer_t timerid = get_timer_id(arg1);
13045  
13046          if (timerid < 0) {
13047              ret = timerid;
13048          } else {
13049              timer_t htimer = g_posix_timers[timerid];
13050              ret = get_errno(timer_getoverrun(htimer));
13051          }
13052          return ret;
13053      }
13054  #endif
13055  
13056  #ifdef TARGET_NR_timer_delete
13057      case TARGET_NR_timer_delete:
13058      {
13059          /* args: timer_t timerid */
13060          target_timer_t timerid = get_timer_id(arg1);
13061  
13062          if (timerid < 0) {
13063              ret = timerid;
13064          } else {
13065              timer_t htimer = g_posix_timers[timerid];
13066              ret = get_errno(timer_delete(htimer));
13067              g_posix_timers[timerid] = 0;
13068          }
13069          return ret;
13070      }
13071  #endif
13072  
13073  #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13074      case TARGET_NR_timerfd_create:
13075          return get_errno(timerfd_create(arg1,
13076                            target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13077  #endif
13078  
13079  #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13080      case TARGET_NR_timerfd_gettime:
13081          {
13082              struct itimerspec its_curr;
13083  
13084              ret = get_errno(timerfd_gettime(arg1, &its_curr));
13085  
13086              if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13087                  return -TARGET_EFAULT;
13088              }
13089          }
13090          return ret;
13091  #endif
13092  
13093  #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13094      case TARGET_NR_timerfd_gettime64:
13095          {
13096              struct itimerspec its_curr;
13097  
13098              ret = get_errno(timerfd_gettime(arg1, &its_curr));
13099  
13100              if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13101                  return -TARGET_EFAULT;
13102              }
13103          }
13104          return ret;
13105  #endif
13106  
13107  #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13108      case TARGET_NR_timerfd_settime:
13109          {
13110              struct itimerspec its_new, its_old, *p_new;
13111  
13112              if (arg3) {
13113                  if (target_to_host_itimerspec(&its_new, arg3)) {
13114                      return -TARGET_EFAULT;
13115                  }
13116                  p_new = &its_new;
13117              } else {
13118                  p_new = NULL;
13119              }
13120  
13121              ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13122  
13123              if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13124                  return -TARGET_EFAULT;
13125              }
13126          }
13127          return ret;
13128  #endif
13129  
13130  #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13131      case TARGET_NR_timerfd_settime64:
13132          {
13133              struct itimerspec its_new, its_old, *p_new;
13134  
13135              if (arg3) {
13136                  if (target_to_host_itimerspec64(&its_new, arg3)) {
13137                      return -TARGET_EFAULT;
13138                  }
13139                  p_new = &its_new;
13140              } else {
13141                  p_new = NULL;
13142              }
13143  
13144              ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13145  
13146              if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13147                  return -TARGET_EFAULT;
13148              }
13149          }
13150          return ret;
13151  #endif
13152  
13153  #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13154      case TARGET_NR_ioprio_get:
13155          return get_errno(ioprio_get(arg1, arg2));
13156  #endif
13157  
13158  #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13159      case TARGET_NR_ioprio_set:
13160          return get_errno(ioprio_set(arg1, arg2, arg3));
13161  #endif
13162  
13163  #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13164      case TARGET_NR_setns:
13165          return get_errno(setns(arg1, arg2));
13166  #endif
13167  #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13168      case TARGET_NR_unshare:
13169          return get_errno(unshare(arg1));
13170  #endif
13171  #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13172      case TARGET_NR_kcmp:
13173          return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13174  #endif
13175  #ifdef TARGET_NR_swapcontext
13176      case TARGET_NR_swapcontext:
13177          /* PowerPC specific.  */
13178          return do_swapcontext(cpu_env, arg1, arg2, arg3);
13179  #endif
13180  #ifdef TARGET_NR_memfd_create
13181      case TARGET_NR_memfd_create:
13182          p = lock_user_string(arg1);
13183          if (!p) {
13184              return -TARGET_EFAULT;
13185          }
13186          ret = get_errno(memfd_create(p, arg2));
13187          fd_trans_unregister(ret);
13188          unlock_user(p, arg1, 0);
13189          return ret;
13190  #endif
13191  #if defined TARGET_NR_membarrier && defined __NR_membarrier
13192      case TARGET_NR_membarrier:
13193          return get_errno(membarrier(arg1, arg2));
13194  #endif
13195  
13196  #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13197      case TARGET_NR_copy_file_range:
13198          {
13199              loff_t inoff, outoff;
13200              loff_t *pinoff = NULL, *poutoff = NULL;
13201  
13202              if (arg2) {
13203                  if (get_user_u64(inoff, arg2)) {
13204                      return -TARGET_EFAULT;
13205                  }
13206                  pinoff = &inoff;
13207              }
13208              if (arg4) {
13209                  if (get_user_u64(outoff, arg4)) {
13210                      return -TARGET_EFAULT;
13211                  }
13212                  poutoff = &outoff;
13213              }
13214              /* Do not sign-extend the count parameter. */
13215              ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13216                                                   (abi_ulong)arg5, arg6));
13217              if (!is_error(ret) && ret > 0) {
13218                  if (arg2) {
13219                      if (put_user_u64(inoff, arg2)) {
13220                          return -TARGET_EFAULT;
13221                      }
13222                  }
13223                  if (arg4) {
13224                      if (put_user_u64(outoff, arg4)) {
13225                          return -TARGET_EFAULT;
13226                      }
13227                  }
13228              }
13229          }
13230          return ret;
13231  #endif
13232  
13233  #if defined(TARGET_NR_pivot_root)
13234      case TARGET_NR_pivot_root:
13235          {
13236              void *p2;
13237              p = lock_user_string(arg1); /* new_root */
13238              p2 = lock_user_string(arg2); /* put_old */
13239              if (!p || !p2) {
13240                  ret = -TARGET_EFAULT;
13241              } else {
13242                  ret = get_errno(pivot_root(p, p2));
13243              }
13244              unlock_user(p2, arg2, 0);
13245              unlock_user(p, arg1, 0);
13246          }
13247          return ret;
13248  #endif
13249  
13250      default:
13251          qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13252          return -TARGET_ENOSYS;
13253      }
13254      return ret;
13255  }
13256  
13257  abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13258                      abi_long arg2, abi_long arg3, abi_long arg4,
13259                      abi_long arg5, abi_long arg6, abi_long arg7,
13260                      abi_long arg8)
13261  {
13262      CPUState *cpu = env_cpu(cpu_env);
13263      abi_long ret;
13264  
13265  #ifdef DEBUG_ERESTARTSYS
13266      /* Debug-only code for exercising the syscall-restart code paths
13267       * in the per-architecture cpu main loops: restart every syscall
13268       * the guest makes once before letting it through.
13269       */
13270      {
13271          static bool flag;
13272          flag = !flag;
13273          if (flag) {
13274              return -TARGET_ERESTARTSYS;
13275          }
13276      }
13277  #endif
13278  
13279      record_syscall_start(cpu, num, arg1,
13280                           arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13281  
13282      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13283          print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13284      }
13285  
13286      ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13287                        arg5, arg6, arg7, arg8);
13288  
13289      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13290          print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13291                            arg3, arg4, arg5, arg6);
13292      }
13293  
13294      record_syscall_return(cpu, num, ret);
13295      return ret;
13296  }
13297