xref: /openbmc/qemu/linux-user/syscall.c (revision 4f8f199fa569492bb07efee02489f521629d275d)
1  /*
2   *  Linux syscalls
3   *
4   *  Copyright (c) 2003 Fabrice Bellard
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
8   *  the Free Software Foundation; either version 2 of the License, or
9   *  (at your option) any later version.
10   *
11   *  This program is distributed in the hope that it will be useful,
12   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   *  GNU General Public License for more details.
15   *
16   *  You should have received a copy of the GNU General Public License
17   *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18   */
19  #define _ATFILE_SOURCE
20  #include "qemu/osdep.h"
21  #include "qemu/cutils.h"
22  #include "qemu/path.h"
23  #include "qemu/memfd.h"
24  #include "qemu/queue.h"
25  #include "qemu/plugin.h"
26  #include "tcg/startup.h"
27  #include "target_mman.h"
28  #include <elf.h>
29  #include <endian.h>
30  #include <grp.h>
31  #include <sys/ipc.h>
32  #include <sys/msg.h>
33  #include <sys/wait.h>
34  #include <sys/mount.h>
35  #include <sys/file.h>
36  #include <sys/fsuid.h>
37  #include <sys/personality.h>
38  #include <sys/prctl.h>
39  #include <sys/resource.h>
40  #include <sys/swap.h>
41  #include <linux/capability.h>
42  #include <sched.h>
43  #include <sys/timex.h>
44  #include <sys/socket.h>
45  #include <linux/sockios.h>
46  #include <sys/un.h>
47  #include <sys/uio.h>
48  #include <poll.h>
49  #include <sys/times.h>
50  #include <sys/shm.h>
51  #include <sys/sem.h>
52  #include <sys/statfs.h>
53  #include <utime.h>
54  #include <sys/sysinfo.h>
55  #include <sys/signalfd.h>
56  //#include <sys/user.h>
57  #include <netinet/in.h>
58  #include <netinet/ip.h>
59  #include <netinet/tcp.h>
60  #include <netinet/udp.h>
61  #include <linux/wireless.h>
62  #include <linux/icmp.h>
63  #include <linux/icmpv6.h>
64  #include <linux/if_tun.h>
65  #include <linux/in6.h>
66  #include <linux/errqueue.h>
67  #include <linux/random.h>
68  #ifdef CONFIG_TIMERFD
69  #include <sys/timerfd.h>
70  #endif
71  #ifdef CONFIG_EVENTFD
72  #include <sys/eventfd.h>
73  #endif
74  #ifdef CONFIG_EPOLL
75  #include <sys/epoll.h>
76  #endif
77  #ifdef CONFIG_ATTR
78  #include "qemu/xattr.h"
79  #endif
80  #ifdef CONFIG_SENDFILE
81  #include <sys/sendfile.h>
82  #endif
83  #ifdef HAVE_SYS_KCOV_H
84  #include <sys/kcov.h>
85  #endif
86  
87  #define termios host_termios
88  #define winsize host_winsize
89  #define termio host_termio
90  #define sgttyb host_sgttyb /* same as target */
91  #define tchars host_tchars /* same as target */
92  #define ltchars host_ltchars /* same as target */
93  
94  #include <linux/termios.h>
95  #include <linux/unistd.h>
96  #include <linux/cdrom.h>
97  #include <linux/hdreg.h>
98  #include <linux/soundcard.h>
99  #include <linux/kd.h>
100  #include <linux/mtio.h>
101  #include <linux/fs.h>
102  #include <linux/fd.h>
103  #if defined(CONFIG_FIEMAP)
104  #include <linux/fiemap.h>
105  #endif
106  #include <linux/fb.h>
107  #if defined(CONFIG_USBFS)
108  #include <linux/usbdevice_fs.h>
109  #include <linux/usb/ch9.h>
110  #endif
111  #include <linux/vt.h>
112  #include <linux/dm-ioctl.h>
113  #include <linux/reboot.h>
114  #include <linux/route.h>
115  #include <linux/filter.h>
116  #include <linux/blkpg.h>
117  #include <netpacket/packet.h>
118  #include <linux/netlink.h>
119  #include <linux/if_alg.h>
120  #include <linux/rtc.h>
121  #include <sound/asound.h>
122  #ifdef HAVE_BTRFS_H
123  #include <linux/btrfs.h>
124  #endif
125  #ifdef HAVE_DRM_H
126  #include <libdrm/drm.h>
127  #include <libdrm/i915_drm.h>
128  #endif
129  #include "linux_loop.h"
130  #include "uname.h"
131  
132  #include "qemu.h"
133  #include "user-internals.h"
134  #include "strace.h"
135  #include "signal-common.h"
136  #include "loader.h"
137  #include "user-mmap.h"
138  #include "user/safe-syscall.h"
139  #include "qemu/guest-random.h"
140  #include "qemu/selfmap.h"
141  #include "user/syscall-trace.h"
142  #include "special-errno.h"
143  #include "qapi/error.h"
144  #include "fd-trans.h"
145  #include "cpu_loop-common.h"
146  
147  #ifndef CLONE_IO
148  #define CLONE_IO                0x80000000      /* Clone io context */
149  #endif
150  
151  /* We can't directly call the host clone syscall, because this will
152   * badly confuse libc (breaking mutexes, for example). So we must
153   * divide clone flags into:
154   *  * flag combinations that look like pthread_create()
155   *  * flag combinations that look like fork()
156   *  * flags we can implement within QEMU itself
157   *  * flags we can't support and will return an error for
158   */
159  /* For thread creation, all these flags must be present; for
160   * fork, none must be present.
161   */
162  #define CLONE_THREAD_FLAGS                              \
163      (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164       CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165  
166  /* These flags are ignored:
167   * CLONE_DETACHED is now ignored by the kernel;
168   * CLONE_IO is just an optimisation hint to the I/O scheduler
169   */
170  #define CLONE_IGNORED_FLAGS                     \
171      (CLONE_DETACHED | CLONE_IO)
172  
173  #ifndef CLONE_PIDFD
174  # define CLONE_PIDFD 0x00001000
175  #endif
176  
177  /* Flags for fork which we can implement within QEMU itself */
178  #define CLONE_OPTIONAL_FORK_FLAGS               \
179      (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180       CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181  
182  /* Flags for thread creation which we can implement within QEMU itself */
183  #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184      (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185       CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186  
187  #define CLONE_INVALID_FORK_FLAGS                                        \
188      (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189  
190  #define CLONE_INVALID_THREAD_FLAGS                                      \
191      (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192         CLONE_IGNORED_FLAGS))
193  
194  /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195   * have almost all been allocated. We cannot support any of
196   * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197   * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198   * The checks against the invalid thread masks above will catch these.
199   * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200   */
201  
202  /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203   * once. This exercises the codepaths for restart.
204   */
205  //#define DEBUG_ERESTARTSYS
206  
207  //#include <linux/msdos_fs.h>
208  #define VFAT_IOCTL_READDIR_BOTH \
209      _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210  #define VFAT_IOCTL_READDIR_SHORT \
211      _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212  
213  #undef _syscall0
214  #undef _syscall1
215  #undef _syscall2
216  #undef _syscall3
217  #undef _syscall4
218  #undef _syscall5
219  #undef _syscall6
220  
221  #define _syscall0(type,name)		\
222  static type name (void)			\
223  {					\
224  	return syscall(__NR_##name);	\
225  }
226  
227  #define _syscall1(type,name,type1,arg1)		\
228  static type name (type1 arg1)			\
229  {						\
230  	return syscall(__NR_##name, arg1);	\
231  }
232  
233  #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234  static type name (type1 arg1,type2 arg2)		\
235  {							\
236  	return syscall(__NR_##name, arg1, arg2);	\
237  }
238  
239  #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240  static type name (type1 arg1,type2 arg2,type3 arg3)		\
241  {								\
242  	return syscall(__NR_##name, arg1, arg2, arg3);		\
243  }
244  
245  #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247  {										\
248  	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249  }
250  
251  #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252  		  type5,arg5)							\
253  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254  {										\
255  	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256  }
257  
258  
259  #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260  		  type5,arg5,type6,arg6)					\
261  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                    type6 arg6)							\
263  {										\
264  	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265  }
266  
267  
268  #define __NR_sys_uname __NR_uname
269  #define __NR_sys_getcwd1 __NR_getcwd
270  #define __NR_sys_getdents __NR_getdents
271  #define __NR_sys_getdents64 __NR_getdents64
272  #define __NR_sys_getpriority __NR_getpriority
273  #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274  #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275  #define __NR_sys_syslog __NR_syslog
276  #if defined(__NR_futex)
277  # define __NR_sys_futex __NR_futex
278  #endif
279  #if defined(__NR_futex_time64)
280  # define __NR_sys_futex_time64 __NR_futex_time64
281  #endif
282  #define __NR_sys_statx __NR_statx
283  
284  #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285  #define __NR__llseek __NR_lseek
286  #endif
287  
288  /* Newer kernel ports have llseek() instead of _llseek() */
289  #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290  #define TARGET_NR__llseek TARGET_NR_llseek
291  #endif
292  
293  /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294  #ifndef TARGET_O_NONBLOCK_MASK
295  #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296  #endif
297  
298  #define __NR_sys_gettid __NR_gettid
299  _syscall0(int, sys_gettid)
300  
301  /* For the 64-bit guest on 32-bit host case we must emulate
302   * getdents using getdents64, because otherwise the host
303   * might hand us back more dirent records than we can fit
304   * into the guest buffer after structure format conversion.
305   * Otherwise we emulate getdents with getdents if the host has it.
306   */
307  #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308  #define EMULATE_GETDENTS_WITH_GETDENTS
309  #endif
310  
311  #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312  _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313  #endif
314  #if (defined(TARGET_NR_getdents) && \
315        !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316      (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317  _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318  #endif
319  #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320  _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321            loff_t *, res, unsigned int, wh);
322  #endif
323  _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324  _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325            siginfo_t *, uinfo)
326  _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327  #ifdef __NR_exit_group
328  _syscall1(int,exit_group,int,error_code)
329  #endif
330  #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331  #define __NR_sys_close_range __NR_close_range
332  _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333  #ifndef CLOSE_RANGE_CLOEXEC
334  #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335  #endif
336  #endif
337  #if defined(__NR_futex)
338  _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339            const struct timespec *,timeout,int *,uaddr2,int,val3)
340  #endif
341  #if defined(__NR_futex_time64)
342  _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343            const struct timespec *,timeout,int *,uaddr2,int,val3)
344  #endif
345  #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346  _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347  #endif
348  #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349  _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                               unsigned int, flags);
351  #endif
352  #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353  _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354  #endif
355  #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356  _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357            unsigned long *, user_mask_ptr);
358  #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359  _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360            unsigned long *, user_mask_ptr);
361  /* sched_attr is not defined in glibc */
362  struct sched_attr {
363      uint32_t size;
364      uint32_t sched_policy;
365      uint64_t sched_flags;
366      int32_t sched_nice;
367      uint32_t sched_priority;
368      uint64_t sched_runtime;
369      uint64_t sched_deadline;
370      uint64_t sched_period;
371      uint32_t sched_util_min;
372      uint32_t sched_util_max;
373  };
374  #define __NR_sys_sched_getattr __NR_sched_getattr
375  _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376            unsigned int, size, unsigned int, flags);
377  #define __NR_sys_sched_setattr __NR_sched_setattr
378  _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379            unsigned int, flags);
380  #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381  _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382  #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383  _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384            const struct sched_param *, param);
385  #define __NR_sys_sched_getparam __NR_sched_getparam
386  _syscall2(int, sys_sched_getparam, pid_t, pid,
387            struct sched_param *, param);
388  #define __NR_sys_sched_setparam __NR_sched_setparam
389  _syscall2(int, sys_sched_setparam, pid_t, pid,
390            const struct sched_param *, param);
391  #define __NR_sys_getcpu __NR_getcpu
392  _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393  _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394            void *, arg);
395  _syscall2(int, capget, struct __user_cap_header_struct *, header,
396            struct __user_cap_data_struct *, data);
397  _syscall2(int, capset, struct __user_cap_header_struct *, header,
398            struct __user_cap_data_struct *, data);
399  #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400  _syscall2(int, ioprio_get, int, which, int, who)
401  #endif
402  #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403  _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404  #endif
405  #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406  _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407  #endif
408  
409  #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410  _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411            unsigned long, idx1, unsigned long, idx2)
412  #endif
413  
414  /*
415   * It is assumed that struct statx is architecture independent.
416   */
417  #if defined(TARGET_NR_statx) && defined(__NR_statx)
418  _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419            unsigned int, mask, struct target_statx *, statxbuf)
420  #endif
421  #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422  _syscall2(int, membarrier, int, cmd, int, flags)
423  #endif
424  
425  static const bitmask_transtbl fcntl_flags_tbl[] = {
426    { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427    { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428    { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429    { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430    { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431    { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432    { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433    { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434    { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435    { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436    { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437    { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438    { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439  #if defined(O_DIRECT)
440    { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441  #endif
442  #if defined(O_NOATIME)
443    { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444  #endif
445  #if defined(O_CLOEXEC)
446    { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447  #endif
448  #if defined(O_PATH)
449    { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450  #endif
451  #if defined(O_TMPFILE)
452    { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453  #endif
454    /* Don't terminate the list prematurely on 64-bit host+guest.  */
455  #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456    { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457  #endif
458  };
459  
460  _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
461  
462  #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
463  #if defined(__NR_utimensat)
464  #define __NR_sys_utimensat __NR_utimensat
465  _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
466            const struct timespec *,tsp,int,flags)
467  #else
468  static int sys_utimensat(int dirfd, const char *pathname,
469                           const struct timespec times[2], int flags)
470  {
471      errno = ENOSYS;
472      return -1;
473  }
474  #endif
475  #endif /* TARGET_NR_utimensat */
476  
477  #ifdef TARGET_NR_renameat2
478  #if defined(__NR_renameat2)
479  #define __NR_sys_renameat2 __NR_renameat2
480  _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
481            const char *, new, unsigned int, flags)
482  #else
483  static int sys_renameat2(int oldfd, const char *old,
484                           int newfd, const char *new, int flags)
485  {
486      if (flags == 0) {
487          return renameat(oldfd, old, newfd, new);
488      }
489      errno = ENOSYS;
490      return -1;
491  }
492  #endif
493  #endif /* TARGET_NR_renameat2 */
494  
495  #ifdef CONFIG_INOTIFY
496  #include <sys/inotify.h>
497  #else
498  /* Userspace can usually survive runtime without inotify */
499  #undef TARGET_NR_inotify_init
500  #undef TARGET_NR_inotify_init1
501  #undef TARGET_NR_inotify_add_watch
502  #undef TARGET_NR_inotify_rm_watch
503  #endif /* CONFIG_INOTIFY  */
504  
505  #if defined(TARGET_NR_prlimit64)
506  #ifndef __NR_prlimit64
507  # define __NR_prlimit64 -1
508  #endif
509  #define __NR_sys_prlimit64 __NR_prlimit64
510  /* The glibc rlimit structure may not be that used by the underlying syscall */
511  struct host_rlimit64 {
512      uint64_t rlim_cur;
513      uint64_t rlim_max;
514  };
515  _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
516            const struct host_rlimit64 *, new_limit,
517            struct host_rlimit64 *, old_limit)
518  #endif
519  
520  
521  #if defined(TARGET_NR_timer_create)
522  /* Maximum of 32 active POSIX timers allowed at any one time. */
523  #define GUEST_TIMER_MAX 32
524  static timer_t g_posix_timers[GUEST_TIMER_MAX];
525  static int g_posix_timer_allocated[GUEST_TIMER_MAX];
526  
527  static inline int next_free_host_timer(void)
528  {
529      int k;
530      for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
531          if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
532              return k;
533          }
534      }
535      return -1;
536  }
537  
538  static inline void free_host_timer_slot(int id)
539  {
540      qatomic_store_release(g_posix_timer_allocated + id, 0);
541  }
542  #endif
543  
544  static inline int host_to_target_errno(int host_errno)
545  {
546      switch (host_errno) {
547  #define E(X)  case X: return TARGET_##X;
548  #include "errnos.c.inc"
549  #undef E
550      default:
551          return host_errno;
552      }
553  }
554  
555  static inline int target_to_host_errno(int target_errno)
556  {
557      switch (target_errno) {
558  #define E(X)  case TARGET_##X: return X;
559  #include "errnos.c.inc"
560  #undef E
561      default:
562          return target_errno;
563      }
564  }
565  
566  abi_long get_errno(abi_long ret)
567  {
568      if (ret == -1)
569          return -host_to_target_errno(errno);
570      else
571          return ret;
572  }
573  
574  const char *target_strerror(int err)
575  {
576      if (err == QEMU_ERESTARTSYS) {
577          return "To be restarted";
578      }
579      if (err == QEMU_ESIGRETURN) {
580          return "Successful exit from sigreturn";
581      }
582  
583      return strerror(target_to_host_errno(err));
584  }
585  
586  static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
587  {
588      int i;
589      uint8_t b;
590      if (usize <= ksize) {
591          return 1;
592      }
593      for (i = ksize; i < usize; i++) {
594          if (get_user_u8(b, addr + i)) {
595              return -TARGET_EFAULT;
596          }
597          if (b != 0) {
598              return 0;
599          }
600      }
601      return 1;
602  }
603  
604  #define safe_syscall0(type, name) \
605  static type safe_##name(void) \
606  { \
607      return safe_syscall(__NR_##name); \
608  }
609  
610  #define safe_syscall1(type, name, type1, arg1) \
611  static type safe_##name(type1 arg1) \
612  { \
613      return safe_syscall(__NR_##name, arg1); \
614  }
615  
616  #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
617  static type safe_##name(type1 arg1, type2 arg2) \
618  { \
619      return safe_syscall(__NR_##name, arg1, arg2); \
620  }
621  
622  #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
623  static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
624  { \
625      return safe_syscall(__NR_##name, arg1, arg2, arg3); \
626  }
627  
628  #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
629      type4, arg4) \
630  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
631  { \
632      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
633  }
634  
635  #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
636      type4, arg4, type5, arg5) \
637  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
638      type5 arg5) \
639  { \
640      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
641  }
642  
643  #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
644      type4, arg4, type5, arg5, type6, arg6) \
645  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
646      type5 arg5, type6 arg6) \
647  { \
648      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
649  }
650  
651  safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
652  safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
653  safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
654                int, flags, mode_t, mode)
655  #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
656  safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
657                struct rusage *, rusage)
658  #endif
659  safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
660                int, options, struct rusage *, rusage)
661  safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
662  safe_syscall5(int, execveat, int, dirfd, const char *, filename,
663                char **, argv, char **, envp, int, flags)
664  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
665      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
666  safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
667                fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
668  #endif
669  #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
670  safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
671                struct timespec *, tsp, const sigset_t *, sigmask,
672                size_t, sigsetsize)
673  #endif
674  safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
675                int, maxevents, int, timeout, const sigset_t *, sigmask,
676                size_t, sigsetsize)
677  #if defined(__NR_futex)
678  safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
679                const struct timespec *,timeout,int *,uaddr2,int,val3)
680  #endif
681  #if defined(__NR_futex_time64)
682  safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
683                const struct timespec *,timeout,int *,uaddr2,int,val3)
684  #endif
685  safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
686  safe_syscall2(int, kill, pid_t, pid, int, sig)
687  safe_syscall2(int, tkill, int, tid, int, sig)
688  safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
689  safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
690  safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
691  safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
692                unsigned long, pos_l, unsigned long, pos_h)
693  safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
694                unsigned long, pos_l, unsigned long, pos_h)
695  safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
696                socklen_t, addrlen)
697  safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
698                int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
699  safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
700                int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
701  safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
702  safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
703  safe_syscall2(int, flock, int, fd, int, operation)
704  #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
705  safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
706                const struct timespec *, uts, size_t, sigsetsize)
707  #endif
708  safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
709                int, flags)
710  #if defined(TARGET_NR_nanosleep)
711  safe_syscall2(int, nanosleep, const struct timespec *, req,
712                struct timespec *, rem)
713  #endif
714  #if defined(TARGET_NR_clock_nanosleep) || \
715      defined(TARGET_NR_clock_nanosleep_time64)
716  safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
717                const struct timespec *, req, struct timespec *, rem)
718  #endif
719  #ifdef __NR_ipc
720  #ifdef __s390x__
721  safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
722                void *, ptr)
723  #else
724  safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
725                void *, ptr, long, fifth)
726  #endif
727  #endif
728  #ifdef __NR_msgsnd
729  safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
730                int, flags)
731  #endif
732  #ifdef __NR_msgrcv
733  safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
734                long, msgtype, int, flags)
735  #endif
736  #ifdef __NR_semtimedop
737  safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
738                unsigned, nsops, const struct timespec *, timeout)
739  #endif
740  #if defined(TARGET_NR_mq_timedsend) || \
741      defined(TARGET_NR_mq_timedsend_time64)
742  safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
743                size_t, len, unsigned, prio, const struct timespec *, timeout)
744  #endif
745  #if defined(TARGET_NR_mq_timedreceive) || \
746      defined(TARGET_NR_mq_timedreceive_time64)
747  safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
748                size_t, len, unsigned *, prio, const struct timespec *, timeout)
749  #endif
750  #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
751  safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
752                int, outfd, loff_t *, poutoff, size_t, length,
753                unsigned int, flags)
754  #endif
755  
756  /* We do ioctl like this rather than via safe_syscall3 to preserve the
757   * "third argument might be integer or pointer or not present" behaviour of
758   * the libc function.
759   */
760  #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
761  /* Similarly for fcntl. Note that callers must always:
762   *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
763   *  use the flock64 struct rather than unsuffixed flock
764   * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
765   */
766  #ifdef __NR_fcntl64
767  #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
768  #else
769  #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
770  #endif
771  
772  static inline int host_to_target_sock_type(int host_type)
773  {
774      int target_type;
775  
776      switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
777      case SOCK_DGRAM:
778          target_type = TARGET_SOCK_DGRAM;
779          break;
780      case SOCK_STREAM:
781          target_type = TARGET_SOCK_STREAM;
782          break;
783      default:
784          target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
785          break;
786      }
787  
788  #if defined(SOCK_CLOEXEC)
789      if (host_type & SOCK_CLOEXEC) {
790          target_type |= TARGET_SOCK_CLOEXEC;
791      }
792  #endif
793  
794  #if defined(SOCK_NONBLOCK)
795      if (host_type & SOCK_NONBLOCK) {
796          target_type |= TARGET_SOCK_NONBLOCK;
797      }
798  #endif
799  
800      return target_type;
801  }
802  
803  static abi_ulong target_brk, initial_target_brk;
804  
805  void target_set_brk(abi_ulong new_brk)
806  {
807      target_brk = TARGET_PAGE_ALIGN(new_brk);
808      initial_target_brk = target_brk;
809  }
810  
811  /* do_brk() must return target values and target errnos. */
812  abi_long do_brk(abi_ulong brk_val)
813  {
814      abi_long mapped_addr;
815      abi_ulong new_brk;
816      abi_ulong old_brk;
817  
818      /* brk pointers are always untagged */
819  
820      /* do not allow to shrink below initial brk value */
821      if (brk_val < initial_target_brk) {
822          return target_brk;
823      }
824  
825      new_brk = TARGET_PAGE_ALIGN(brk_val);
826      old_brk = TARGET_PAGE_ALIGN(target_brk);
827  
828      /* new and old target_brk might be on the same page */
829      if (new_brk == old_brk) {
830          target_brk = brk_val;
831          return target_brk;
832      }
833  
834      /* Release heap if necessary */
835      if (new_brk < old_brk) {
836          target_munmap(new_brk, old_brk - new_brk);
837  
838          target_brk = brk_val;
839          return target_brk;
840      }
841  
842      mapped_addr = target_mmap(old_brk, new_brk - old_brk,
843                                PROT_READ | PROT_WRITE,
844                                MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
845                                -1, 0);
846  
847      if (mapped_addr == old_brk) {
848          target_brk = brk_val;
849          return target_brk;
850      }
851  
852  #if defined(TARGET_ALPHA)
853      /* We (partially) emulate OSF/1 on Alpha, which requires we
854         return a proper errno, not an unchanged brk value.  */
855      return -TARGET_ENOMEM;
856  #endif
857      /* For everything else, return the previous break. */
858      return target_brk;
859  }
860  
861  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
862      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
863  static inline abi_long copy_from_user_fdset(fd_set *fds,
864                                              abi_ulong target_fds_addr,
865                                              int n)
866  {
867      int i, nw, j, k;
868      abi_ulong b, *target_fds;
869  
870      nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
871      if (!(target_fds = lock_user(VERIFY_READ,
872                                   target_fds_addr,
873                                   sizeof(abi_ulong) * nw,
874                                   1)))
875          return -TARGET_EFAULT;
876  
877      FD_ZERO(fds);
878      k = 0;
879      for (i = 0; i < nw; i++) {
880          /* grab the abi_ulong */
881          __get_user(b, &target_fds[i]);
882          for (j = 0; j < TARGET_ABI_BITS; j++) {
883              /* check the bit inside the abi_ulong */
884              if ((b >> j) & 1)
885                  FD_SET(k, fds);
886              k++;
887          }
888      }
889  
890      unlock_user(target_fds, target_fds_addr, 0);
891  
892      return 0;
893  }
894  
895  static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
896                                                   abi_ulong target_fds_addr,
897                                                   int n)
898  {
899      if (target_fds_addr) {
900          if (copy_from_user_fdset(fds, target_fds_addr, n))
901              return -TARGET_EFAULT;
902          *fds_ptr = fds;
903      } else {
904          *fds_ptr = NULL;
905      }
906      return 0;
907  }
908  
909  static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
910                                            const fd_set *fds,
911                                            int n)
912  {
913      int i, nw, j, k;
914      abi_long v;
915      abi_ulong *target_fds;
916  
917      nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
918      if (!(target_fds = lock_user(VERIFY_WRITE,
919                                   target_fds_addr,
920                                   sizeof(abi_ulong) * nw,
921                                   0)))
922          return -TARGET_EFAULT;
923  
924      k = 0;
925      for (i = 0; i < nw; i++) {
926          v = 0;
927          for (j = 0; j < TARGET_ABI_BITS; j++) {
928              v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
929              k++;
930          }
931          __put_user(v, &target_fds[i]);
932      }
933  
934      unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
935  
936      return 0;
937  }
938  #endif
939  
940  #if defined(__alpha__)
941  #define HOST_HZ 1024
942  #else
943  #define HOST_HZ 100
944  #endif
945  
946  static inline abi_long host_to_target_clock_t(long ticks)
947  {
948  #if HOST_HZ == TARGET_HZ
949      return ticks;
950  #else
951      return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
952  #endif
953  }
954  
955  static inline abi_long host_to_target_rusage(abi_ulong target_addr,
956                                               const struct rusage *rusage)
957  {
958      struct target_rusage *target_rusage;
959  
960      if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
961          return -TARGET_EFAULT;
962      target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
963      target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
964      target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
965      target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
966      target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
967      target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
968      target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
969      target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
970      target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
971      target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
972      target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
973      target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
974      target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
975      target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
976      target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
977      target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
978      target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
979      target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
980      unlock_user_struct(target_rusage, target_addr, 1);
981  
982      return 0;
983  }
984  
985  #ifdef TARGET_NR_setrlimit
986  static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
987  {
988      abi_ulong target_rlim_swap;
989      rlim_t result;
990  
991      target_rlim_swap = tswapal(target_rlim);
992      if (target_rlim_swap == TARGET_RLIM_INFINITY)
993          return RLIM_INFINITY;
994  
995      result = target_rlim_swap;
996      if (target_rlim_swap != (rlim_t)result)
997          return RLIM_INFINITY;
998  
999      return result;
1000  }
1001  #endif
1002  
1003  #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1004  static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1005  {
1006      abi_ulong target_rlim_swap;
1007      abi_ulong result;
1008  
1009      if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1010          target_rlim_swap = TARGET_RLIM_INFINITY;
1011      else
1012          target_rlim_swap = rlim;
1013      result = tswapal(target_rlim_swap);
1014  
1015      return result;
1016  }
1017  #endif
1018  
1019  static inline int target_to_host_resource(int code)
1020  {
1021      switch (code) {
1022      case TARGET_RLIMIT_AS:
1023          return RLIMIT_AS;
1024      case TARGET_RLIMIT_CORE:
1025          return RLIMIT_CORE;
1026      case TARGET_RLIMIT_CPU:
1027          return RLIMIT_CPU;
1028      case TARGET_RLIMIT_DATA:
1029          return RLIMIT_DATA;
1030      case TARGET_RLIMIT_FSIZE:
1031          return RLIMIT_FSIZE;
1032      case TARGET_RLIMIT_LOCKS:
1033          return RLIMIT_LOCKS;
1034      case TARGET_RLIMIT_MEMLOCK:
1035          return RLIMIT_MEMLOCK;
1036      case TARGET_RLIMIT_MSGQUEUE:
1037          return RLIMIT_MSGQUEUE;
1038      case TARGET_RLIMIT_NICE:
1039          return RLIMIT_NICE;
1040      case TARGET_RLIMIT_NOFILE:
1041          return RLIMIT_NOFILE;
1042      case TARGET_RLIMIT_NPROC:
1043          return RLIMIT_NPROC;
1044      case TARGET_RLIMIT_RSS:
1045          return RLIMIT_RSS;
1046      case TARGET_RLIMIT_RTPRIO:
1047          return RLIMIT_RTPRIO;
1048  #ifdef RLIMIT_RTTIME
1049      case TARGET_RLIMIT_RTTIME:
1050          return RLIMIT_RTTIME;
1051  #endif
1052      case TARGET_RLIMIT_SIGPENDING:
1053          return RLIMIT_SIGPENDING;
1054      case TARGET_RLIMIT_STACK:
1055          return RLIMIT_STACK;
1056      default:
1057          return code;
1058      }
1059  }
1060  
1061  static inline abi_long copy_from_user_timeval(struct timeval *tv,
1062                                                abi_ulong target_tv_addr)
1063  {
1064      struct target_timeval *target_tv;
1065  
1066      if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1067          return -TARGET_EFAULT;
1068      }
1069  
1070      __get_user(tv->tv_sec, &target_tv->tv_sec);
1071      __get_user(tv->tv_usec, &target_tv->tv_usec);
1072  
1073      unlock_user_struct(target_tv, target_tv_addr, 0);
1074  
1075      return 0;
1076  }
1077  
1078  static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1079                                              const struct timeval *tv)
1080  {
1081      struct target_timeval *target_tv;
1082  
1083      if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1084          return -TARGET_EFAULT;
1085      }
1086  
1087      __put_user(tv->tv_sec, &target_tv->tv_sec);
1088      __put_user(tv->tv_usec, &target_tv->tv_usec);
1089  
1090      unlock_user_struct(target_tv, target_tv_addr, 1);
1091  
1092      return 0;
1093  }
1094  
1095  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1096  static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1097                                                  abi_ulong target_tv_addr)
1098  {
1099      struct target__kernel_sock_timeval *target_tv;
1100  
1101      if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1102          return -TARGET_EFAULT;
1103      }
1104  
1105      __get_user(tv->tv_sec, &target_tv->tv_sec);
1106      __get_user(tv->tv_usec, &target_tv->tv_usec);
1107  
1108      unlock_user_struct(target_tv, target_tv_addr, 0);
1109  
1110      return 0;
1111  }
1112  #endif
1113  
1114  static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1115                                                const struct timeval *tv)
1116  {
1117      struct target__kernel_sock_timeval *target_tv;
1118  
1119      if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120          return -TARGET_EFAULT;
1121      }
1122  
1123      __put_user(tv->tv_sec, &target_tv->tv_sec);
1124      __put_user(tv->tv_usec, &target_tv->tv_usec);
1125  
1126      unlock_user_struct(target_tv, target_tv_addr, 1);
1127  
1128      return 0;
1129  }
1130  
1131  #if defined(TARGET_NR_futex) || \
1132      defined(TARGET_NR_rt_sigtimedwait) || \
1133      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1134      defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1135      defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1136      defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1137      defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1138      defined(TARGET_NR_timer_settime) || \
1139      (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1140  static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1141                                                 abi_ulong target_addr)
1142  {
1143      struct target_timespec *target_ts;
1144  
1145      if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1146          return -TARGET_EFAULT;
1147      }
1148      __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1149      __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1150      unlock_user_struct(target_ts, target_addr, 0);
1151      return 0;
1152  }
1153  #endif
1154  
1155  #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1156      defined(TARGET_NR_timer_settime64) || \
1157      defined(TARGET_NR_mq_timedsend_time64) || \
1158      defined(TARGET_NR_mq_timedreceive_time64) || \
1159      (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1160      defined(TARGET_NR_clock_nanosleep_time64) || \
1161      defined(TARGET_NR_rt_sigtimedwait_time64) || \
1162      defined(TARGET_NR_utimensat) || \
1163      defined(TARGET_NR_utimensat_time64) || \
1164      defined(TARGET_NR_semtimedop_time64) || \
1165      defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1166  static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1167                                                   abi_ulong target_addr)
1168  {
1169      struct target__kernel_timespec *target_ts;
1170  
1171      if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1172          return -TARGET_EFAULT;
1173      }
1174      __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1175      __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1176      /* in 32bit mode, this drops the padding */
1177      host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1178      unlock_user_struct(target_ts, target_addr, 0);
1179      return 0;
1180  }
1181  #endif
1182  
1183  static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1184                                                 struct timespec *host_ts)
1185  {
1186      struct target_timespec *target_ts;
1187  
1188      if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1189          return -TARGET_EFAULT;
1190      }
1191      __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1192      __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1193      unlock_user_struct(target_ts, target_addr, 1);
1194      return 0;
1195  }
1196  
1197  static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1198                                                   struct timespec *host_ts)
1199  {
1200      struct target__kernel_timespec *target_ts;
1201  
1202      if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1203          return -TARGET_EFAULT;
1204      }
1205      __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1206      __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1207      unlock_user_struct(target_ts, target_addr, 1);
1208      return 0;
1209  }
1210  
1211  #if defined(TARGET_NR_gettimeofday)
1212  static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1213                                               struct timezone *tz)
1214  {
1215      struct target_timezone *target_tz;
1216  
1217      if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1218          return -TARGET_EFAULT;
1219      }
1220  
1221      __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1222      __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1223  
1224      unlock_user_struct(target_tz, target_tz_addr, 1);
1225  
1226      return 0;
1227  }
1228  #endif
1229  
1230  #if defined(TARGET_NR_settimeofday)
1231  static inline abi_long copy_from_user_timezone(struct timezone *tz,
1232                                                 abi_ulong target_tz_addr)
1233  {
1234      struct target_timezone *target_tz;
1235  
1236      if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1237          return -TARGET_EFAULT;
1238      }
1239  
1240      __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1241      __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1242  
1243      unlock_user_struct(target_tz, target_tz_addr, 0);
1244  
1245      return 0;
1246  }
1247  #endif
1248  
1249  #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1250  #include <mqueue.h>
1251  
1252  static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1253                                                abi_ulong target_mq_attr_addr)
1254  {
1255      struct target_mq_attr *target_mq_attr;
1256  
1257      if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1258                            target_mq_attr_addr, 1))
1259          return -TARGET_EFAULT;
1260  
1261      __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1262      __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1263      __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1264      __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1265  
1266      unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1267  
1268      return 0;
1269  }
1270  
1271  static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1272                                              const struct mq_attr *attr)
1273  {
1274      struct target_mq_attr *target_mq_attr;
1275  
1276      if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1277                            target_mq_attr_addr, 0))
1278          return -TARGET_EFAULT;
1279  
1280      __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1281      __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1282      __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1283      __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1284  
1285      unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1286  
1287      return 0;
1288  }
1289  #endif
1290  
1291  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1292  /* do_select() must return target values and target errnos. */
1293  static abi_long do_select(int n,
1294                            abi_ulong rfd_addr, abi_ulong wfd_addr,
1295                            abi_ulong efd_addr, abi_ulong target_tv_addr)
1296  {
1297      fd_set rfds, wfds, efds;
1298      fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1299      struct timeval tv;
1300      struct timespec ts, *ts_ptr;
1301      abi_long ret;
1302  
1303      ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1304      if (ret) {
1305          return ret;
1306      }
1307      ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1308      if (ret) {
1309          return ret;
1310      }
1311      ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1312      if (ret) {
1313          return ret;
1314      }
1315  
1316      if (target_tv_addr) {
1317          if (copy_from_user_timeval(&tv, target_tv_addr))
1318              return -TARGET_EFAULT;
1319          ts.tv_sec = tv.tv_sec;
1320          ts.tv_nsec = tv.tv_usec * 1000;
1321          ts_ptr = &ts;
1322      } else {
1323          ts_ptr = NULL;
1324      }
1325  
1326      ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1327                                    ts_ptr, NULL));
1328  
1329      if (!is_error(ret)) {
1330          if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1331              return -TARGET_EFAULT;
1332          if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1333              return -TARGET_EFAULT;
1334          if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1335              return -TARGET_EFAULT;
1336  
1337          if (target_tv_addr) {
1338              tv.tv_sec = ts.tv_sec;
1339              tv.tv_usec = ts.tv_nsec / 1000;
1340              if (copy_to_user_timeval(target_tv_addr, &tv)) {
1341                  return -TARGET_EFAULT;
1342              }
1343          }
1344      }
1345  
1346      return ret;
1347  }
1348  
1349  #if defined(TARGET_WANT_OLD_SYS_SELECT)
1350  static abi_long do_old_select(abi_ulong arg1)
1351  {
1352      struct target_sel_arg_struct *sel;
1353      abi_ulong inp, outp, exp, tvp;
1354      long nsel;
1355  
1356      if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1357          return -TARGET_EFAULT;
1358      }
1359  
1360      nsel = tswapal(sel->n);
1361      inp = tswapal(sel->inp);
1362      outp = tswapal(sel->outp);
1363      exp = tswapal(sel->exp);
1364      tvp = tswapal(sel->tvp);
1365  
1366      unlock_user_struct(sel, arg1, 0);
1367  
1368      return do_select(nsel, inp, outp, exp, tvp);
1369  }
1370  #endif
1371  #endif
1372  
1373  #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1374  static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1375                              abi_long arg4, abi_long arg5, abi_long arg6,
1376                              bool time64)
1377  {
1378      abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1379      fd_set rfds, wfds, efds;
1380      fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1381      struct timespec ts, *ts_ptr;
1382      abi_long ret;
1383  
1384      /*
1385       * The 6th arg is actually two args smashed together,
1386       * so we cannot use the C library.
1387       */
1388      struct {
1389          sigset_t *set;
1390          size_t size;
1391      } sig, *sig_ptr;
1392  
1393      abi_ulong arg_sigset, arg_sigsize, *arg7;
1394  
1395      n = arg1;
1396      rfd_addr = arg2;
1397      wfd_addr = arg3;
1398      efd_addr = arg4;
1399      ts_addr = arg5;
1400  
1401      ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1402      if (ret) {
1403          return ret;
1404      }
1405      ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1406      if (ret) {
1407          return ret;
1408      }
1409      ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1410      if (ret) {
1411          return ret;
1412      }
1413  
1414      /*
1415       * This takes a timespec, and not a timeval, so we cannot
1416       * use the do_select() helper ...
1417       */
1418      if (ts_addr) {
1419          if (time64) {
1420              if (target_to_host_timespec64(&ts, ts_addr)) {
1421                  return -TARGET_EFAULT;
1422              }
1423          } else {
1424              if (target_to_host_timespec(&ts, ts_addr)) {
1425                  return -TARGET_EFAULT;
1426              }
1427          }
1428              ts_ptr = &ts;
1429      } else {
1430          ts_ptr = NULL;
1431      }
1432  
1433      /* Extract the two packed args for the sigset */
1434      sig_ptr = NULL;
1435      if (arg6) {
1436          arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1437          if (!arg7) {
1438              return -TARGET_EFAULT;
1439          }
1440          arg_sigset = tswapal(arg7[0]);
1441          arg_sigsize = tswapal(arg7[1]);
1442          unlock_user(arg7, arg6, 0);
1443  
1444          if (arg_sigset) {
1445              ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1446              if (ret != 0) {
1447                  return ret;
1448              }
1449              sig_ptr = &sig;
1450              sig.size = SIGSET_T_SIZE;
1451          }
1452      }
1453  
1454      ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1455                                    ts_ptr, sig_ptr));
1456  
1457      if (sig_ptr) {
1458          finish_sigsuspend_mask(ret);
1459      }
1460  
1461      if (!is_error(ret)) {
1462          if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1463              return -TARGET_EFAULT;
1464          }
1465          if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1466              return -TARGET_EFAULT;
1467          }
1468          if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1469              return -TARGET_EFAULT;
1470          }
1471          if (time64) {
1472              if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1473                  return -TARGET_EFAULT;
1474              }
1475          } else {
1476              if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1477                  return -TARGET_EFAULT;
1478              }
1479          }
1480      }
1481      return ret;
1482  }
1483  #endif
1484  
1485  #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1486      defined(TARGET_NR_ppoll_time64)
1487  static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1488                           abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1489  {
1490      struct target_pollfd *target_pfd;
1491      unsigned int nfds = arg2;
1492      struct pollfd *pfd;
1493      unsigned int i;
1494      abi_long ret;
1495  
1496      pfd = NULL;
1497      target_pfd = NULL;
1498      if (nfds) {
1499          if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1500              return -TARGET_EINVAL;
1501          }
1502          target_pfd = lock_user(VERIFY_WRITE, arg1,
1503                                 sizeof(struct target_pollfd) * nfds, 1);
1504          if (!target_pfd) {
1505              return -TARGET_EFAULT;
1506          }
1507  
1508          pfd = alloca(sizeof(struct pollfd) * nfds);
1509          for (i = 0; i < nfds; i++) {
1510              pfd[i].fd = tswap32(target_pfd[i].fd);
1511              pfd[i].events = tswap16(target_pfd[i].events);
1512          }
1513      }
1514      if (ppoll) {
1515          struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1516          sigset_t *set = NULL;
1517  
1518          if (arg3) {
1519              if (time64) {
1520                  if (target_to_host_timespec64(timeout_ts, arg3)) {
1521                      unlock_user(target_pfd, arg1, 0);
1522                      return -TARGET_EFAULT;
1523                  }
1524              } else {
1525                  if (target_to_host_timespec(timeout_ts, arg3)) {
1526                      unlock_user(target_pfd, arg1, 0);
1527                      return -TARGET_EFAULT;
1528                  }
1529              }
1530          } else {
1531              timeout_ts = NULL;
1532          }
1533  
1534          if (arg4) {
1535              ret = process_sigsuspend_mask(&set, arg4, arg5);
1536              if (ret != 0) {
1537                  unlock_user(target_pfd, arg1, 0);
1538                  return ret;
1539              }
1540          }
1541  
1542          ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1543                                     set, SIGSET_T_SIZE));
1544  
1545          if (set) {
1546              finish_sigsuspend_mask(ret);
1547          }
1548          if (!is_error(ret) && arg3) {
1549              if (time64) {
1550                  if (host_to_target_timespec64(arg3, timeout_ts)) {
1551                      return -TARGET_EFAULT;
1552                  }
1553              } else {
1554                  if (host_to_target_timespec(arg3, timeout_ts)) {
1555                      return -TARGET_EFAULT;
1556                  }
1557              }
1558          }
1559      } else {
1560            struct timespec ts, *pts;
1561  
1562            if (arg3 >= 0) {
1563                /* Convert ms to secs, ns */
1564                ts.tv_sec = arg3 / 1000;
1565                ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1566                pts = &ts;
1567            } else {
1568                /* -ve poll() timeout means "infinite" */
1569                pts = NULL;
1570            }
1571            ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1572      }
1573  
1574      if (!is_error(ret)) {
1575          for (i = 0; i < nfds; i++) {
1576              target_pfd[i].revents = tswap16(pfd[i].revents);
1577          }
1578      }
1579      unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1580      return ret;
1581  }
1582  #endif
1583  
1584  static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1585                          int flags, int is_pipe2)
1586  {
1587      int host_pipe[2];
1588      abi_long ret;
1589      ret = pipe2(host_pipe, flags);
1590  
1591      if (is_error(ret))
1592          return get_errno(ret);
1593  
1594      /* Several targets have special calling conventions for the original
1595         pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1596      if (!is_pipe2) {
1597  #if defined(TARGET_ALPHA)
1598          cpu_env->ir[IR_A4] = host_pipe[1];
1599          return host_pipe[0];
1600  #elif defined(TARGET_MIPS)
1601          cpu_env->active_tc.gpr[3] = host_pipe[1];
1602          return host_pipe[0];
1603  #elif defined(TARGET_SH4)
1604          cpu_env->gregs[1] = host_pipe[1];
1605          return host_pipe[0];
1606  #elif defined(TARGET_SPARC)
1607          cpu_env->regwptr[1] = host_pipe[1];
1608          return host_pipe[0];
1609  #endif
1610      }
1611  
1612      if (put_user_s32(host_pipe[0], pipedes)
1613          || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1614          return -TARGET_EFAULT;
1615      return get_errno(ret);
1616  }
1617  
1618  static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1619                                                abi_ulong target_addr,
1620                                                socklen_t len)
1621  {
1622      struct target_ip_mreqn *target_smreqn;
1623  
1624      target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1625      if (!target_smreqn)
1626          return -TARGET_EFAULT;
1627      mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1628      mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1629      if (len == sizeof(struct target_ip_mreqn))
1630          mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1631      unlock_user(target_smreqn, target_addr, 0);
1632  
1633      return 0;
1634  }
1635  
1636  static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1637                                                 abi_ulong target_addr,
1638                                                 socklen_t len)
1639  {
1640      const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1641      sa_family_t sa_family;
1642      struct target_sockaddr *target_saddr;
1643  
1644      if (fd_trans_target_to_host_addr(fd)) {
1645          return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1646      }
1647  
1648      target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1649      if (!target_saddr)
1650          return -TARGET_EFAULT;
1651  
1652      sa_family = tswap16(target_saddr->sa_family);
1653  
1654      /* Oops. The caller might send a incomplete sun_path; sun_path
1655       * must be terminated by \0 (see the manual page), but
1656       * unfortunately it is quite common to specify sockaddr_un
1657       * length as "strlen(x->sun_path)" while it should be
1658       * "strlen(...) + 1". We'll fix that here if needed.
1659       * Linux kernel has a similar feature.
1660       */
1661  
1662      if (sa_family == AF_UNIX) {
1663          if (len < unix_maxlen && len > 0) {
1664              char *cp = (char*)target_saddr;
1665  
1666              if ( cp[len-1] && !cp[len] )
1667                  len++;
1668          }
1669          if (len > unix_maxlen)
1670              len = unix_maxlen;
1671      }
1672  
1673      memcpy(addr, target_saddr, len);
1674      addr->sa_family = sa_family;
1675      if (sa_family == AF_NETLINK) {
1676          struct sockaddr_nl *nladdr;
1677  
1678          nladdr = (struct sockaddr_nl *)addr;
1679          nladdr->nl_pid = tswap32(nladdr->nl_pid);
1680          nladdr->nl_groups = tswap32(nladdr->nl_groups);
1681      } else if (sa_family == AF_PACKET) {
1682  	struct target_sockaddr_ll *lladdr;
1683  
1684  	lladdr = (struct target_sockaddr_ll *)addr;
1685  	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1686  	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1687      } else if (sa_family == AF_INET6) {
1688          struct sockaddr_in6 *in6addr;
1689  
1690          in6addr = (struct sockaddr_in6 *)addr;
1691          in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1692      }
1693      unlock_user(target_saddr, target_addr, 0);
1694  
1695      return 0;
1696  }
1697  
1698  static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1699                                                 struct sockaddr *addr,
1700                                                 socklen_t len)
1701  {
1702      struct target_sockaddr *target_saddr;
1703  
1704      if (len == 0) {
1705          return 0;
1706      }
1707      assert(addr);
1708  
1709      target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1710      if (!target_saddr)
1711          return -TARGET_EFAULT;
1712      memcpy(target_saddr, addr, len);
1713      if (len >= offsetof(struct target_sockaddr, sa_family) +
1714          sizeof(target_saddr->sa_family)) {
1715          target_saddr->sa_family = tswap16(addr->sa_family);
1716      }
1717      if (addr->sa_family == AF_NETLINK &&
1718          len >= sizeof(struct target_sockaddr_nl)) {
1719          struct target_sockaddr_nl *target_nl =
1720                 (struct target_sockaddr_nl *)target_saddr;
1721          target_nl->nl_pid = tswap32(target_nl->nl_pid);
1722          target_nl->nl_groups = tswap32(target_nl->nl_groups);
1723      } else if (addr->sa_family == AF_PACKET) {
1724          struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1725          target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1726          target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1727      } else if (addr->sa_family == AF_INET6 &&
1728                 len >= sizeof(struct target_sockaddr_in6)) {
1729          struct target_sockaddr_in6 *target_in6 =
1730                 (struct target_sockaddr_in6 *)target_saddr;
1731          target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1732      }
1733      unlock_user(target_saddr, target_addr, len);
1734  
1735      return 0;
1736  }
1737  
1738  static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1739                                             struct target_msghdr *target_msgh)
1740  {
1741      struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1742      abi_long msg_controllen;
1743      abi_ulong target_cmsg_addr;
1744      struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1745      socklen_t space = 0;
1746  
1747      msg_controllen = tswapal(target_msgh->msg_controllen);
1748      if (msg_controllen < sizeof (struct target_cmsghdr))
1749          goto the_end;
1750      target_cmsg_addr = tswapal(target_msgh->msg_control);
1751      target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1752      target_cmsg_start = target_cmsg;
1753      if (!target_cmsg)
1754          return -TARGET_EFAULT;
1755  
1756      while (cmsg && target_cmsg) {
1757          void *data = CMSG_DATA(cmsg);
1758          void *target_data = TARGET_CMSG_DATA(target_cmsg);
1759  
1760          int len = tswapal(target_cmsg->cmsg_len)
1761              - sizeof(struct target_cmsghdr);
1762  
1763          space += CMSG_SPACE(len);
1764          if (space > msgh->msg_controllen) {
1765              space -= CMSG_SPACE(len);
1766              /* This is a QEMU bug, since we allocated the payload
1767               * area ourselves (unlike overflow in host-to-target
1768               * conversion, which is just the guest giving us a buffer
1769               * that's too small). It can't happen for the payload types
1770               * we currently support; if it becomes an issue in future
1771               * we would need to improve our allocation strategy to
1772               * something more intelligent than "twice the size of the
1773               * target buffer we're reading from".
1774               */
1775              qemu_log_mask(LOG_UNIMP,
1776                            ("Unsupported ancillary data %d/%d: "
1777                             "unhandled msg size\n"),
1778                            tswap32(target_cmsg->cmsg_level),
1779                            tswap32(target_cmsg->cmsg_type));
1780              break;
1781          }
1782  
1783          if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1784              cmsg->cmsg_level = SOL_SOCKET;
1785          } else {
1786              cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1787          }
1788          cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1789          cmsg->cmsg_len = CMSG_LEN(len);
1790  
1791          if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1792              int *fd = (int *)data;
1793              int *target_fd = (int *)target_data;
1794              int i, numfds = len / sizeof(int);
1795  
1796              for (i = 0; i < numfds; i++) {
1797                  __get_user(fd[i], target_fd + i);
1798              }
1799          } else if (cmsg->cmsg_level == SOL_SOCKET
1800                 &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1801              struct ucred *cred = (struct ucred *)data;
1802              struct target_ucred *target_cred =
1803                  (struct target_ucred *)target_data;
1804  
1805              __get_user(cred->pid, &target_cred->pid);
1806              __get_user(cred->uid, &target_cred->uid);
1807              __get_user(cred->gid, &target_cred->gid);
1808          } else if (cmsg->cmsg_level == SOL_ALG) {
1809              uint32_t *dst = (uint32_t *)data;
1810  
1811              memcpy(dst, target_data, len);
1812              /* fix endianness of first 32-bit word */
1813              if (len >= sizeof(uint32_t)) {
1814                  *dst = tswap32(*dst);
1815              }
1816          } else {
1817              qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1818                            cmsg->cmsg_level, cmsg->cmsg_type);
1819              memcpy(data, target_data, len);
1820          }
1821  
1822          cmsg = CMSG_NXTHDR(msgh, cmsg);
1823          target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1824                                           target_cmsg_start);
1825      }
1826      unlock_user(target_cmsg, target_cmsg_addr, 0);
1827   the_end:
1828      msgh->msg_controllen = space;
1829      return 0;
1830  }
1831  
1832  static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1833                                             struct msghdr *msgh)
1834  {
1835      struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1836      abi_long msg_controllen;
1837      abi_ulong target_cmsg_addr;
1838      struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1839      socklen_t space = 0;
1840  
1841      msg_controllen = tswapal(target_msgh->msg_controllen);
1842      if (msg_controllen < sizeof (struct target_cmsghdr))
1843          goto the_end;
1844      target_cmsg_addr = tswapal(target_msgh->msg_control);
1845      target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1846      target_cmsg_start = target_cmsg;
1847      if (!target_cmsg)
1848          return -TARGET_EFAULT;
1849  
1850      while (cmsg && target_cmsg) {
1851          void *data = CMSG_DATA(cmsg);
1852          void *target_data = TARGET_CMSG_DATA(target_cmsg);
1853  
1854          int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1855          int tgt_len, tgt_space;
1856  
1857          /* We never copy a half-header but may copy half-data;
1858           * this is Linux's behaviour in put_cmsg(). Note that
1859           * truncation here is a guest problem (which we report
1860           * to the guest via the CTRUNC bit), unlike truncation
1861           * in target_to_host_cmsg, which is a QEMU bug.
1862           */
1863          if (msg_controllen < sizeof(struct target_cmsghdr)) {
1864              target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1865              break;
1866          }
1867  
1868          if (cmsg->cmsg_level == SOL_SOCKET) {
1869              target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1870          } else {
1871              target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1872          }
1873          target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1874  
1875          /* Payload types which need a different size of payload on
1876           * the target must adjust tgt_len here.
1877           */
1878          tgt_len = len;
1879          switch (cmsg->cmsg_level) {
1880          case SOL_SOCKET:
1881              switch (cmsg->cmsg_type) {
1882              case SO_TIMESTAMP:
1883                  tgt_len = sizeof(struct target_timeval);
1884                  break;
1885              default:
1886                  break;
1887              }
1888              break;
1889          default:
1890              break;
1891          }
1892  
1893          if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1894              target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1895              tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1896          }
1897  
1898          /* We must now copy-and-convert len bytes of payload
1899           * into tgt_len bytes of destination space. Bear in mind
1900           * that in both source and destination we may be dealing
1901           * with a truncated value!
1902           */
1903          switch (cmsg->cmsg_level) {
1904          case SOL_SOCKET:
1905              switch (cmsg->cmsg_type) {
1906              case SCM_RIGHTS:
1907              {
1908                  int *fd = (int *)data;
1909                  int *target_fd = (int *)target_data;
1910                  int i, numfds = tgt_len / sizeof(int);
1911  
1912                  for (i = 0; i < numfds; i++) {
1913                      __put_user(fd[i], target_fd + i);
1914                  }
1915                  break;
1916              }
1917              case SO_TIMESTAMP:
1918              {
1919                  struct timeval *tv = (struct timeval *)data;
1920                  struct target_timeval *target_tv =
1921                      (struct target_timeval *)target_data;
1922  
1923                  if (len != sizeof(struct timeval) ||
1924                      tgt_len != sizeof(struct target_timeval)) {
1925                      goto unimplemented;
1926                  }
1927  
1928                  /* copy struct timeval to target */
1929                  __put_user(tv->tv_sec, &target_tv->tv_sec);
1930                  __put_user(tv->tv_usec, &target_tv->tv_usec);
1931                  break;
1932              }
1933              case SCM_CREDENTIALS:
1934              {
1935                  struct ucred *cred = (struct ucred *)data;
1936                  struct target_ucred *target_cred =
1937                      (struct target_ucred *)target_data;
1938  
1939                  __put_user(cred->pid, &target_cred->pid);
1940                  __put_user(cred->uid, &target_cred->uid);
1941                  __put_user(cred->gid, &target_cred->gid);
1942                  break;
1943              }
1944              default:
1945                  goto unimplemented;
1946              }
1947              break;
1948  
1949          case SOL_IP:
1950              switch (cmsg->cmsg_type) {
1951              case IP_TTL:
1952              {
1953                  uint32_t *v = (uint32_t *)data;
1954                  uint32_t *t_int = (uint32_t *)target_data;
1955  
1956                  if (len != sizeof(uint32_t) ||
1957                      tgt_len != sizeof(uint32_t)) {
1958                      goto unimplemented;
1959                  }
1960                  __put_user(*v, t_int);
1961                  break;
1962              }
1963              case IP_RECVERR:
1964              {
1965                  struct errhdr_t {
1966                     struct sock_extended_err ee;
1967                     struct sockaddr_in offender;
1968                  };
1969                  struct errhdr_t *errh = (struct errhdr_t *)data;
1970                  struct errhdr_t *target_errh =
1971                      (struct errhdr_t *)target_data;
1972  
1973                  if (len != sizeof(struct errhdr_t) ||
1974                      tgt_len != sizeof(struct errhdr_t)) {
1975                      goto unimplemented;
1976                  }
1977                  __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1978                  __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1979                  __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1980                  __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1981                  __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1982                  __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1983                  __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1984                  host_to_target_sockaddr((unsigned long) &target_errh->offender,
1985                      (void *) &errh->offender, sizeof(errh->offender));
1986                  break;
1987              }
1988              default:
1989                  goto unimplemented;
1990              }
1991              break;
1992  
1993          case SOL_IPV6:
1994              switch (cmsg->cmsg_type) {
1995              case IPV6_HOPLIMIT:
1996              {
1997                  uint32_t *v = (uint32_t *)data;
1998                  uint32_t *t_int = (uint32_t *)target_data;
1999  
2000                  if (len != sizeof(uint32_t) ||
2001                      tgt_len != sizeof(uint32_t)) {
2002                      goto unimplemented;
2003                  }
2004                  __put_user(*v, t_int);
2005                  break;
2006              }
2007              case IPV6_RECVERR:
2008              {
2009                  struct errhdr6_t {
2010                     struct sock_extended_err ee;
2011                     struct sockaddr_in6 offender;
2012                  };
2013                  struct errhdr6_t *errh = (struct errhdr6_t *)data;
2014                  struct errhdr6_t *target_errh =
2015                      (struct errhdr6_t *)target_data;
2016  
2017                  if (len != sizeof(struct errhdr6_t) ||
2018                      tgt_len != sizeof(struct errhdr6_t)) {
2019                      goto unimplemented;
2020                  }
2021                  __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2022                  __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2023                  __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2024                  __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2025                  __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2026                  __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2027                  __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2028                  host_to_target_sockaddr((unsigned long) &target_errh->offender,
2029                      (void *) &errh->offender, sizeof(errh->offender));
2030                  break;
2031              }
2032              default:
2033                  goto unimplemented;
2034              }
2035              break;
2036  
2037          default:
2038          unimplemented:
2039              qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2040                            cmsg->cmsg_level, cmsg->cmsg_type);
2041              memcpy(target_data, data, MIN(len, tgt_len));
2042              if (tgt_len > len) {
2043                  memset(target_data + len, 0, tgt_len - len);
2044              }
2045          }
2046  
2047          target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2048          tgt_space = TARGET_CMSG_SPACE(tgt_len);
2049          if (msg_controllen < tgt_space) {
2050              tgt_space = msg_controllen;
2051          }
2052          msg_controllen -= tgt_space;
2053          space += tgt_space;
2054          cmsg = CMSG_NXTHDR(msgh, cmsg);
2055          target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2056                                           target_cmsg_start);
2057      }
2058      unlock_user(target_cmsg, target_cmsg_addr, space);
2059   the_end:
2060      target_msgh->msg_controllen = tswapal(space);
2061      return 0;
2062  }
2063  
2064  /* do_setsockopt() Must return target values and target errnos. */
2065  static abi_long do_setsockopt(int sockfd, int level, int optname,
2066                                abi_ulong optval_addr, socklen_t optlen)
2067  {
2068      abi_long ret;
2069      int val;
2070      struct ip_mreqn *ip_mreq;
2071      struct ip_mreq_source *ip_mreq_source;
2072  
2073      switch(level) {
2074      case SOL_TCP:
2075      case SOL_UDP:
2076          /* TCP and UDP options all take an 'int' value.  */
2077          if (optlen < sizeof(uint32_t))
2078              return -TARGET_EINVAL;
2079  
2080          if (get_user_u32(val, optval_addr))
2081              return -TARGET_EFAULT;
2082          ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2083          break;
2084      case SOL_IP:
2085          switch(optname) {
2086          case IP_TOS:
2087          case IP_TTL:
2088          case IP_HDRINCL:
2089          case IP_ROUTER_ALERT:
2090          case IP_RECVOPTS:
2091          case IP_RETOPTS:
2092          case IP_PKTINFO:
2093          case IP_MTU_DISCOVER:
2094          case IP_RECVERR:
2095          case IP_RECVTTL:
2096          case IP_RECVTOS:
2097  #ifdef IP_FREEBIND
2098          case IP_FREEBIND:
2099  #endif
2100          case IP_MULTICAST_TTL:
2101          case IP_MULTICAST_LOOP:
2102              val = 0;
2103              if (optlen >= sizeof(uint32_t)) {
2104                  if (get_user_u32(val, optval_addr))
2105                      return -TARGET_EFAULT;
2106              } else if (optlen >= 1) {
2107                  if (get_user_u8(val, optval_addr))
2108                      return -TARGET_EFAULT;
2109              }
2110              ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2111              break;
2112          case IP_ADD_MEMBERSHIP:
2113          case IP_DROP_MEMBERSHIP:
2114              if (optlen < sizeof (struct target_ip_mreq) ||
2115                  optlen > sizeof (struct target_ip_mreqn))
2116                  return -TARGET_EINVAL;
2117  
2118              ip_mreq = (struct ip_mreqn *) alloca(optlen);
2119              target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2120              ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2121              break;
2122  
2123          case IP_BLOCK_SOURCE:
2124          case IP_UNBLOCK_SOURCE:
2125          case IP_ADD_SOURCE_MEMBERSHIP:
2126          case IP_DROP_SOURCE_MEMBERSHIP:
2127              if (optlen != sizeof (struct target_ip_mreq_source))
2128                  return -TARGET_EINVAL;
2129  
2130              ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2131              if (!ip_mreq_source) {
2132                  return -TARGET_EFAULT;
2133              }
2134              ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2135              unlock_user (ip_mreq_source, optval_addr, 0);
2136              break;
2137  
2138          default:
2139              goto unimplemented;
2140          }
2141          break;
2142      case SOL_IPV6:
2143          switch (optname) {
2144          case IPV6_MTU_DISCOVER:
2145          case IPV6_MTU:
2146          case IPV6_V6ONLY:
2147          case IPV6_RECVPKTINFO:
2148          case IPV6_UNICAST_HOPS:
2149          case IPV6_MULTICAST_HOPS:
2150          case IPV6_MULTICAST_LOOP:
2151          case IPV6_RECVERR:
2152          case IPV6_RECVHOPLIMIT:
2153          case IPV6_2292HOPLIMIT:
2154          case IPV6_CHECKSUM:
2155          case IPV6_ADDRFORM:
2156          case IPV6_2292PKTINFO:
2157          case IPV6_RECVTCLASS:
2158          case IPV6_RECVRTHDR:
2159          case IPV6_2292RTHDR:
2160          case IPV6_RECVHOPOPTS:
2161          case IPV6_2292HOPOPTS:
2162          case IPV6_RECVDSTOPTS:
2163          case IPV6_2292DSTOPTS:
2164          case IPV6_TCLASS:
2165          case IPV6_ADDR_PREFERENCES:
2166  #ifdef IPV6_RECVPATHMTU
2167          case IPV6_RECVPATHMTU:
2168  #endif
2169  #ifdef IPV6_TRANSPARENT
2170          case IPV6_TRANSPARENT:
2171  #endif
2172  #ifdef IPV6_FREEBIND
2173          case IPV6_FREEBIND:
2174  #endif
2175  #ifdef IPV6_RECVORIGDSTADDR
2176          case IPV6_RECVORIGDSTADDR:
2177  #endif
2178              val = 0;
2179              if (optlen < sizeof(uint32_t)) {
2180                  return -TARGET_EINVAL;
2181              }
2182              if (get_user_u32(val, optval_addr)) {
2183                  return -TARGET_EFAULT;
2184              }
2185              ret = get_errno(setsockopt(sockfd, level, optname,
2186                                         &val, sizeof(val)));
2187              break;
2188          case IPV6_PKTINFO:
2189          {
2190              struct in6_pktinfo pki;
2191  
2192              if (optlen < sizeof(pki)) {
2193                  return -TARGET_EINVAL;
2194              }
2195  
2196              if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2197                  return -TARGET_EFAULT;
2198              }
2199  
2200              pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2201  
2202              ret = get_errno(setsockopt(sockfd, level, optname,
2203                                         &pki, sizeof(pki)));
2204              break;
2205          }
2206          case IPV6_ADD_MEMBERSHIP:
2207          case IPV6_DROP_MEMBERSHIP:
2208          {
2209              struct ipv6_mreq ipv6mreq;
2210  
2211              if (optlen < sizeof(ipv6mreq)) {
2212                  return -TARGET_EINVAL;
2213              }
2214  
2215              if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2216                  return -TARGET_EFAULT;
2217              }
2218  
2219              ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2220  
2221              ret = get_errno(setsockopt(sockfd, level, optname,
2222                                         &ipv6mreq, sizeof(ipv6mreq)));
2223              break;
2224          }
2225          default:
2226              goto unimplemented;
2227          }
2228          break;
2229      case SOL_ICMPV6:
2230          switch (optname) {
2231          case ICMPV6_FILTER:
2232          {
2233              struct icmp6_filter icmp6f;
2234  
2235              if (optlen > sizeof(icmp6f)) {
2236                  optlen = sizeof(icmp6f);
2237              }
2238  
2239              if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2240                  return -TARGET_EFAULT;
2241              }
2242  
2243              for (val = 0; val < 8; val++) {
2244                  icmp6f.data[val] = tswap32(icmp6f.data[val]);
2245              }
2246  
2247              ret = get_errno(setsockopt(sockfd, level, optname,
2248                                         &icmp6f, optlen));
2249              break;
2250          }
2251          default:
2252              goto unimplemented;
2253          }
2254          break;
2255      case SOL_RAW:
2256          switch (optname) {
2257          case ICMP_FILTER:
2258          case IPV6_CHECKSUM:
2259              /* those take an u32 value */
2260              if (optlen < sizeof(uint32_t)) {
2261                  return -TARGET_EINVAL;
2262              }
2263  
2264              if (get_user_u32(val, optval_addr)) {
2265                  return -TARGET_EFAULT;
2266              }
2267              ret = get_errno(setsockopt(sockfd, level, optname,
2268                                         &val, sizeof(val)));
2269              break;
2270  
2271          default:
2272              goto unimplemented;
2273          }
2274          break;
2275  #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2276      case SOL_ALG:
2277          switch (optname) {
2278          case ALG_SET_KEY:
2279          {
2280              char *alg_key = g_malloc(optlen);
2281  
2282              if (!alg_key) {
2283                  return -TARGET_ENOMEM;
2284              }
2285              if (copy_from_user(alg_key, optval_addr, optlen)) {
2286                  g_free(alg_key);
2287                  return -TARGET_EFAULT;
2288              }
2289              ret = get_errno(setsockopt(sockfd, level, optname,
2290                                         alg_key, optlen));
2291              g_free(alg_key);
2292              break;
2293          }
2294          case ALG_SET_AEAD_AUTHSIZE:
2295          {
2296              ret = get_errno(setsockopt(sockfd, level, optname,
2297                                         NULL, optlen));
2298              break;
2299          }
2300          default:
2301              goto unimplemented;
2302          }
2303          break;
2304  #endif
2305      case TARGET_SOL_SOCKET:
2306          switch (optname) {
2307          case TARGET_SO_RCVTIMEO:
2308          {
2309                  struct timeval tv;
2310  
2311                  optname = SO_RCVTIMEO;
2312  
2313  set_timeout:
2314                  if (optlen != sizeof(struct target_timeval)) {
2315                      return -TARGET_EINVAL;
2316                  }
2317  
2318                  if (copy_from_user_timeval(&tv, optval_addr)) {
2319                      return -TARGET_EFAULT;
2320                  }
2321  
2322                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2323                                  &tv, sizeof(tv)));
2324                  return ret;
2325          }
2326          case TARGET_SO_SNDTIMEO:
2327                  optname = SO_SNDTIMEO;
2328                  goto set_timeout;
2329          case TARGET_SO_ATTACH_FILTER:
2330          {
2331                  struct target_sock_fprog *tfprog;
2332                  struct target_sock_filter *tfilter;
2333                  struct sock_fprog fprog;
2334                  struct sock_filter *filter;
2335                  int i;
2336  
2337                  if (optlen != sizeof(*tfprog)) {
2338                      return -TARGET_EINVAL;
2339                  }
2340                  if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2341                      return -TARGET_EFAULT;
2342                  }
2343                  if (!lock_user_struct(VERIFY_READ, tfilter,
2344                                        tswapal(tfprog->filter), 0)) {
2345                      unlock_user_struct(tfprog, optval_addr, 1);
2346                      return -TARGET_EFAULT;
2347                  }
2348  
2349                  fprog.len = tswap16(tfprog->len);
2350                  filter = g_try_new(struct sock_filter, fprog.len);
2351                  if (filter == NULL) {
2352                      unlock_user_struct(tfilter, tfprog->filter, 1);
2353                      unlock_user_struct(tfprog, optval_addr, 1);
2354                      return -TARGET_ENOMEM;
2355                  }
2356                  for (i = 0; i < fprog.len; i++) {
2357                      filter[i].code = tswap16(tfilter[i].code);
2358                      filter[i].jt = tfilter[i].jt;
2359                      filter[i].jf = tfilter[i].jf;
2360                      filter[i].k = tswap32(tfilter[i].k);
2361                  }
2362                  fprog.filter = filter;
2363  
2364                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2365                                  SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2366                  g_free(filter);
2367  
2368                  unlock_user_struct(tfilter, tfprog->filter, 1);
2369                  unlock_user_struct(tfprog, optval_addr, 1);
2370                  return ret;
2371          }
2372  	case TARGET_SO_BINDTODEVICE:
2373  	{
2374  		char *dev_ifname, *addr_ifname;
2375  
2376  		if (optlen > IFNAMSIZ - 1) {
2377  		    optlen = IFNAMSIZ - 1;
2378  		}
2379  		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2380  		if (!dev_ifname) {
2381  		    return -TARGET_EFAULT;
2382  		}
2383  		optname = SO_BINDTODEVICE;
2384  		addr_ifname = alloca(IFNAMSIZ);
2385  		memcpy(addr_ifname, dev_ifname, optlen);
2386  		addr_ifname[optlen] = 0;
2387  		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2388                                             addr_ifname, optlen));
2389  		unlock_user (dev_ifname, optval_addr, 0);
2390  		return ret;
2391  	}
2392          case TARGET_SO_LINGER:
2393          {
2394                  struct linger lg;
2395                  struct target_linger *tlg;
2396  
2397                  if (optlen != sizeof(struct target_linger)) {
2398                      return -TARGET_EINVAL;
2399                  }
2400                  if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2401                      return -TARGET_EFAULT;
2402                  }
2403                  __get_user(lg.l_onoff, &tlg->l_onoff);
2404                  __get_user(lg.l_linger, &tlg->l_linger);
2405                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2406                                  &lg, sizeof(lg)));
2407                  unlock_user_struct(tlg, optval_addr, 0);
2408                  return ret;
2409          }
2410              /* Options with 'int' argument.  */
2411          case TARGET_SO_DEBUG:
2412  		optname = SO_DEBUG;
2413  		break;
2414          case TARGET_SO_REUSEADDR:
2415  		optname = SO_REUSEADDR;
2416  		break;
2417  #ifdef SO_REUSEPORT
2418          case TARGET_SO_REUSEPORT:
2419                  optname = SO_REUSEPORT;
2420                  break;
2421  #endif
2422          case TARGET_SO_TYPE:
2423  		optname = SO_TYPE;
2424  		break;
2425          case TARGET_SO_ERROR:
2426  		optname = SO_ERROR;
2427  		break;
2428          case TARGET_SO_DONTROUTE:
2429  		optname = SO_DONTROUTE;
2430  		break;
2431          case TARGET_SO_BROADCAST:
2432  		optname = SO_BROADCAST;
2433  		break;
2434          case TARGET_SO_SNDBUF:
2435  		optname = SO_SNDBUF;
2436  		break;
2437          case TARGET_SO_SNDBUFFORCE:
2438                  optname = SO_SNDBUFFORCE;
2439                  break;
2440          case TARGET_SO_RCVBUF:
2441  		optname = SO_RCVBUF;
2442  		break;
2443          case TARGET_SO_RCVBUFFORCE:
2444                  optname = SO_RCVBUFFORCE;
2445                  break;
2446          case TARGET_SO_KEEPALIVE:
2447  		optname = SO_KEEPALIVE;
2448  		break;
2449          case TARGET_SO_OOBINLINE:
2450  		optname = SO_OOBINLINE;
2451  		break;
2452          case TARGET_SO_NO_CHECK:
2453  		optname = SO_NO_CHECK;
2454  		break;
2455          case TARGET_SO_PRIORITY:
2456  		optname = SO_PRIORITY;
2457  		break;
2458  #ifdef SO_BSDCOMPAT
2459          case TARGET_SO_BSDCOMPAT:
2460  		optname = SO_BSDCOMPAT;
2461  		break;
2462  #endif
2463          case TARGET_SO_PASSCRED:
2464  		optname = SO_PASSCRED;
2465  		break;
2466          case TARGET_SO_PASSSEC:
2467                  optname = SO_PASSSEC;
2468                  break;
2469          case TARGET_SO_TIMESTAMP:
2470  		optname = SO_TIMESTAMP;
2471  		break;
2472          case TARGET_SO_RCVLOWAT:
2473  		optname = SO_RCVLOWAT;
2474  		break;
2475          default:
2476              goto unimplemented;
2477          }
2478  	if (optlen < sizeof(uint32_t))
2479              return -TARGET_EINVAL;
2480  
2481  	if (get_user_u32(val, optval_addr))
2482              return -TARGET_EFAULT;
2483  	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2484          break;
2485  #ifdef SOL_NETLINK
2486      case SOL_NETLINK:
2487          switch (optname) {
2488          case NETLINK_PKTINFO:
2489          case NETLINK_ADD_MEMBERSHIP:
2490          case NETLINK_DROP_MEMBERSHIP:
2491          case NETLINK_BROADCAST_ERROR:
2492          case NETLINK_NO_ENOBUFS:
2493  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2494          case NETLINK_LISTEN_ALL_NSID:
2495          case NETLINK_CAP_ACK:
2496  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2497  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2498          case NETLINK_EXT_ACK:
2499  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2500  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2501          case NETLINK_GET_STRICT_CHK:
2502  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2503              break;
2504          default:
2505              goto unimplemented;
2506          }
2507          val = 0;
2508          if (optlen < sizeof(uint32_t)) {
2509              return -TARGET_EINVAL;
2510          }
2511          if (get_user_u32(val, optval_addr)) {
2512              return -TARGET_EFAULT;
2513          }
2514          ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2515                                     sizeof(val)));
2516          break;
2517  #endif /* SOL_NETLINK */
2518      default:
2519      unimplemented:
2520          qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2521                        level, optname);
2522          ret = -TARGET_ENOPROTOOPT;
2523      }
2524      return ret;
2525  }
2526  
2527  /* do_getsockopt() Must return target values and target errnos. */
2528  static abi_long do_getsockopt(int sockfd, int level, int optname,
2529                                abi_ulong optval_addr, abi_ulong optlen)
2530  {
2531      abi_long ret;
2532      int len, val;
2533      socklen_t lv;
2534  
2535      switch(level) {
2536      case TARGET_SOL_SOCKET:
2537          level = SOL_SOCKET;
2538          switch (optname) {
2539          /* These don't just return a single integer */
2540          case TARGET_SO_PEERNAME:
2541              goto unimplemented;
2542          case TARGET_SO_RCVTIMEO: {
2543              struct timeval tv;
2544              socklen_t tvlen;
2545  
2546              optname = SO_RCVTIMEO;
2547  
2548  get_timeout:
2549              if (get_user_u32(len, optlen)) {
2550                  return -TARGET_EFAULT;
2551              }
2552              if (len < 0) {
2553                  return -TARGET_EINVAL;
2554              }
2555  
2556              tvlen = sizeof(tv);
2557              ret = get_errno(getsockopt(sockfd, level, optname,
2558                                         &tv, &tvlen));
2559              if (ret < 0) {
2560                  return ret;
2561              }
2562              if (len > sizeof(struct target_timeval)) {
2563                  len = sizeof(struct target_timeval);
2564              }
2565              if (copy_to_user_timeval(optval_addr, &tv)) {
2566                  return -TARGET_EFAULT;
2567              }
2568              if (put_user_u32(len, optlen)) {
2569                  return -TARGET_EFAULT;
2570              }
2571              break;
2572          }
2573          case TARGET_SO_SNDTIMEO:
2574              optname = SO_SNDTIMEO;
2575              goto get_timeout;
2576          case TARGET_SO_PEERCRED: {
2577              struct ucred cr;
2578              socklen_t crlen;
2579              struct target_ucred *tcr;
2580  
2581              if (get_user_u32(len, optlen)) {
2582                  return -TARGET_EFAULT;
2583              }
2584              if (len < 0) {
2585                  return -TARGET_EINVAL;
2586              }
2587  
2588              crlen = sizeof(cr);
2589              ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2590                                         &cr, &crlen));
2591              if (ret < 0) {
2592                  return ret;
2593              }
2594              if (len > crlen) {
2595                  len = crlen;
2596              }
2597              if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2598                  return -TARGET_EFAULT;
2599              }
2600              __put_user(cr.pid, &tcr->pid);
2601              __put_user(cr.uid, &tcr->uid);
2602              __put_user(cr.gid, &tcr->gid);
2603              unlock_user_struct(tcr, optval_addr, 1);
2604              if (put_user_u32(len, optlen)) {
2605                  return -TARGET_EFAULT;
2606              }
2607              break;
2608          }
2609          case TARGET_SO_PEERSEC: {
2610              char *name;
2611  
2612              if (get_user_u32(len, optlen)) {
2613                  return -TARGET_EFAULT;
2614              }
2615              if (len < 0) {
2616                  return -TARGET_EINVAL;
2617              }
2618              name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2619              if (!name) {
2620                  return -TARGET_EFAULT;
2621              }
2622              lv = len;
2623              ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2624                                         name, &lv));
2625              if (put_user_u32(lv, optlen)) {
2626                  ret = -TARGET_EFAULT;
2627              }
2628              unlock_user(name, optval_addr, lv);
2629              break;
2630          }
2631          case TARGET_SO_LINGER:
2632          {
2633              struct linger lg;
2634              socklen_t lglen;
2635              struct target_linger *tlg;
2636  
2637              if (get_user_u32(len, optlen)) {
2638                  return -TARGET_EFAULT;
2639              }
2640              if (len < 0) {
2641                  return -TARGET_EINVAL;
2642              }
2643  
2644              lglen = sizeof(lg);
2645              ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2646                                         &lg, &lglen));
2647              if (ret < 0) {
2648                  return ret;
2649              }
2650              if (len > lglen) {
2651                  len = lglen;
2652              }
2653              if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2654                  return -TARGET_EFAULT;
2655              }
2656              __put_user(lg.l_onoff, &tlg->l_onoff);
2657              __put_user(lg.l_linger, &tlg->l_linger);
2658              unlock_user_struct(tlg, optval_addr, 1);
2659              if (put_user_u32(len, optlen)) {
2660                  return -TARGET_EFAULT;
2661              }
2662              break;
2663          }
2664          /* Options with 'int' argument.  */
2665          case TARGET_SO_DEBUG:
2666              optname = SO_DEBUG;
2667              goto int_case;
2668          case TARGET_SO_REUSEADDR:
2669              optname = SO_REUSEADDR;
2670              goto int_case;
2671  #ifdef SO_REUSEPORT
2672          case TARGET_SO_REUSEPORT:
2673              optname = SO_REUSEPORT;
2674              goto int_case;
2675  #endif
2676          case TARGET_SO_TYPE:
2677              optname = SO_TYPE;
2678              goto int_case;
2679          case TARGET_SO_ERROR:
2680              optname = SO_ERROR;
2681              goto int_case;
2682          case TARGET_SO_DONTROUTE:
2683              optname = SO_DONTROUTE;
2684              goto int_case;
2685          case TARGET_SO_BROADCAST:
2686              optname = SO_BROADCAST;
2687              goto int_case;
2688          case TARGET_SO_SNDBUF:
2689              optname = SO_SNDBUF;
2690              goto int_case;
2691          case TARGET_SO_RCVBUF:
2692              optname = SO_RCVBUF;
2693              goto int_case;
2694          case TARGET_SO_KEEPALIVE:
2695              optname = SO_KEEPALIVE;
2696              goto int_case;
2697          case TARGET_SO_OOBINLINE:
2698              optname = SO_OOBINLINE;
2699              goto int_case;
2700          case TARGET_SO_NO_CHECK:
2701              optname = SO_NO_CHECK;
2702              goto int_case;
2703          case TARGET_SO_PRIORITY:
2704              optname = SO_PRIORITY;
2705              goto int_case;
2706  #ifdef SO_BSDCOMPAT
2707          case TARGET_SO_BSDCOMPAT:
2708              optname = SO_BSDCOMPAT;
2709              goto int_case;
2710  #endif
2711          case TARGET_SO_PASSCRED:
2712              optname = SO_PASSCRED;
2713              goto int_case;
2714          case TARGET_SO_TIMESTAMP:
2715              optname = SO_TIMESTAMP;
2716              goto int_case;
2717          case TARGET_SO_RCVLOWAT:
2718              optname = SO_RCVLOWAT;
2719              goto int_case;
2720          case TARGET_SO_ACCEPTCONN:
2721              optname = SO_ACCEPTCONN;
2722              goto int_case;
2723          case TARGET_SO_PROTOCOL:
2724              optname = SO_PROTOCOL;
2725              goto int_case;
2726          case TARGET_SO_DOMAIN:
2727              optname = SO_DOMAIN;
2728              goto int_case;
2729          default:
2730              goto int_case;
2731          }
2732          break;
2733      case SOL_TCP:
2734      case SOL_UDP:
2735          /* TCP and UDP options all take an 'int' value.  */
2736      int_case:
2737          if (get_user_u32(len, optlen))
2738              return -TARGET_EFAULT;
2739          if (len < 0)
2740              return -TARGET_EINVAL;
2741          lv = sizeof(lv);
2742          ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2743          if (ret < 0)
2744              return ret;
2745          switch (optname) {
2746          case SO_TYPE:
2747              val = host_to_target_sock_type(val);
2748              break;
2749          case SO_ERROR:
2750              val = host_to_target_errno(val);
2751              break;
2752          }
2753          if (len > lv)
2754              len = lv;
2755          if (len == 4) {
2756              if (put_user_u32(val, optval_addr))
2757                  return -TARGET_EFAULT;
2758          } else {
2759              if (put_user_u8(val, optval_addr))
2760                  return -TARGET_EFAULT;
2761          }
2762          if (put_user_u32(len, optlen))
2763              return -TARGET_EFAULT;
2764          break;
2765      case SOL_IP:
2766          switch(optname) {
2767          case IP_TOS:
2768          case IP_TTL:
2769          case IP_HDRINCL:
2770          case IP_ROUTER_ALERT:
2771          case IP_RECVOPTS:
2772          case IP_RETOPTS:
2773          case IP_PKTINFO:
2774          case IP_MTU_DISCOVER:
2775          case IP_RECVERR:
2776          case IP_RECVTOS:
2777  #ifdef IP_FREEBIND
2778          case IP_FREEBIND:
2779  #endif
2780          case IP_MULTICAST_TTL:
2781          case IP_MULTICAST_LOOP:
2782              if (get_user_u32(len, optlen))
2783                  return -TARGET_EFAULT;
2784              if (len < 0)
2785                  return -TARGET_EINVAL;
2786              lv = sizeof(lv);
2787              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2788              if (ret < 0)
2789                  return ret;
2790              if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2791                  len = 1;
2792                  if (put_user_u32(len, optlen)
2793                      || put_user_u8(val, optval_addr))
2794                      return -TARGET_EFAULT;
2795              } else {
2796                  if (len > sizeof(int))
2797                      len = sizeof(int);
2798                  if (put_user_u32(len, optlen)
2799                      || put_user_u32(val, optval_addr))
2800                      return -TARGET_EFAULT;
2801              }
2802              break;
2803          default:
2804              ret = -TARGET_ENOPROTOOPT;
2805              break;
2806          }
2807          break;
2808      case SOL_IPV6:
2809          switch (optname) {
2810          case IPV6_MTU_DISCOVER:
2811          case IPV6_MTU:
2812          case IPV6_V6ONLY:
2813          case IPV6_RECVPKTINFO:
2814          case IPV6_UNICAST_HOPS:
2815          case IPV6_MULTICAST_HOPS:
2816          case IPV6_MULTICAST_LOOP:
2817          case IPV6_RECVERR:
2818          case IPV6_RECVHOPLIMIT:
2819          case IPV6_2292HOPLIMIT:
2820          case IPV6_CHECKSUM:
2821          case IPV6_ADDRFORM:
2822          case IPV6_2292PKTINFO:
2823          case IPV6_RECVTCLASS:
2824          case IPV6_RECVRTHDR:
2825          case IPV6_2292RTHDR:
2826          case IPV6_RECVHOPOPTS:
2827          case IPV6_2292HOPOPTS:
2828          case IPV6_RECVDSTOPTS:
2829          case IPV6_2292DSTOPTS:
2830          case IPV6_TCLASS:
2831          case IPV6_ADDR_PREFERENCES:
2832  #ifdef IPV6_RECVPATHMTU
2833          case IPV6_RECVPATHMTU:
2834  #endif
2835  #ifdef IPV6_TRANSPARENT
2836          case IPV6_TRANSPARENT:
2837  #endif
2838  #ifdef IPV6_FREEBIND
2839          case IPV6_FREEBIND:
2840  #endif
2841  #ifdef IPV6_RECVORIGDSTADDR
2842          case IPV6_RECVORIGDSTADDR:
2843  #endif
2844              if (get_user_u32(len, optlen))
2845                  return -TARGET_EFAULT;
2846              if (len < 0)
2847                  return -TARGET_EINVAL;
2848              lv = sizeof(lv);
2849              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2850              if (ret < 0)
2851                  return ret;
2852              if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2853                  len = 1;
2854                  if (put_user_u32(len, optlen)
2855                      || put_user_u8(val, optval_addr))
2856                      return -TARGET_EFAULT;
2857              } else {
2858                  if (len > sizeof(int))
2859                      len = sizeof(int);
2860                  if (put_user_u32(len, optlen)
2861                      || put_user_u32(val, optval_addr))
2862                      return -TARGET_EFAULT;
2863              }
2864              break;
2865          default:
2866              ret = -TARGET_ENOPROTOOPT;
2867              break;
2868          }
2869          break;
2870  #ifdef SOL_NETLINK
2871      case SOL_NETLINK:
2872          switch (optname) {
2873          case NETLINK_PKTINFO:
2874          case NETLINK_BROADCAST_ERROR:
2875          case NETLINK_NO_ENOBUFS:
2876  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2877          case NETLINK_LISTEN_ALL_NSID:
2878          case NETLINK_CAP_ACK:
2879  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2880  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2881          case NETLINK_EXT_ACK:
2882  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2883  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2884          case NETLINK_GET_STRICT_CHK:
2885  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2886              if (get_user_u32(len, optlen)) {
2887                  return -TARGET_EFAULT;
2888              }
2889              if (len != sizeof(val)) {
2890                  return -TARGET_EINVAL;
2891              }
2892              lv = len;
2893              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2894              if (ret < 0) {
2895                  return ret;
2896              }
2897              if (put_user_u32(lv, optlen)
2898                  || put_user_u32(val, optval_addr)) {
2899                  return -TARGET_EFAULT;
2900              }
2901              break;
2902  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2903          case NETLINK_LIST_MEMBERSHIPS:
2904          {
2905              uint32_t *results;
2906              int i;
2907              if (get_user_u32(len, optlen)) {
2908                  return -TARGET_EFAULT;
2909              }
2910              if (len < 0) {
2911                  return -TARGET_EINVAL;
2912              }
2913              results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2914              if (!results && len > 0) {
2915                  return -TARGET_EFAULT;
2916              }
2917              lv = len;
2918              ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2919              if (ret < 0) {
2920                  unlock_user(results, optval_addr, 0);
2921                  return ret;
2922              }
2923              /* swap host endianness to target endianness. */
2924              for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2925                  results[i] = tswap32(results[i]);
2926              }
2927              if (put_user_u32(lv, optlen)) {
2928                  return -TARGET_EFAULT;
2929              }
2930              unlock_user(results, optval_addr, 0);
2931              break;
2932          }
2933  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2934          default:
2935              goto unimplemented;
2936          }
2937          break;
2938  #endif /* SOL_NETLINK */
2939      default:
2940      unimplemented:
2941          qemu_log_mask(LOG_UNIMP,
2942                        "getsockopt level=%d optname=%d not yet supported\n",
2943                        level, optname);
2944          ret = -TARGET_EOPNOTSUPP;
2945          break;
2946      }
2947      return ret;
2948  }
2949  
2950  /* Convert target low/high pair representing file offset into the host
2951   * low/high pair. This function doesn't handle offsets bigger than 64 bits
2952   * as the kernel doesn't handle them either.
2953   */
2954  static void target_to_host_low_high(abi_ulong tlow,
2955                                      abi_ulong thigh,
2956                                      unsigned long *hlow,
2957                                      unsigned long *hhigh)
2958  {
2959      uint64_t off = tlow |
2960          ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2961          TARGET_LONG_BITS / 2;
2962  
2963      *hlow = off;
2964      *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2965  }
2966  
2967  static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2968                                  abi_ulong count, int copy)
2969  {
2970      struct target_iovec *target_vec;
2971      struct iovec *vec;
2972      abi_ulong total_len, max_len;
2973      int i;
2974      int err = 0;
2975      bool bad_address = false;
2976  
2977      if (count == 0) {
2978          errno = 0;
2979          return NULL;
2980      }
2981      if (count > IOV_MAX) {
2982          errno = EINVAL;
2983          return NULL;
2984      }
2985  
2986      vec = g_try_new0(struct iovec, count);
2987      if (vec == NULL) {
2988          errno = ENOMEM;
2989          return NULL;
2990      }
2991  
2992      target_vec = lock_user(VERIFY_READ, target_addr,
2993                             count * sizeof(struct target_iovec), 1);
2994      if (target_vec == NULL) {
2995          err = EFAULT;
2996          goto fail2;
2997      }
2998  
2999      /* ??? If host page size > target page size, this will result in a
3000         value larger than what we can actually support.  */
3001      max_len = 0x7fffffff & TARGET_PAGE_MASK;
3002      total_len = 0;
3003  
3004      for (i = 0; i < count; i++) {
3005          abi_ulong base = tswapal(target_vec[i].iov_base);
3006          abi_long len = tswapal(target_vec[i].iov_len);
3007  
3008          if (len < 0) {
3009              err = EINVAL;
3010              goto fail;
3011          } else if (len == 0) {
3012              /* Zero length pointer is ignored.  */
3013              vec[i].iov_base = 0;
3014          } else {
3015              vec[i].iov_base = lock_user(type, base, len, copy);
3016              /* If the first buffer pointer is bad, this is a fault.  But
3017               * subsequent bad buffers will result in a partial write; this
3018               * is realized by filling the vector with null pointers and
3019               * zero lengths. */
3020              if (!vec[i].iov_base) {
3021                  if (i == 0) {
3022                      err = EFAULT;
3023                      goto fail;
3024                  } else {
3025                      bad_address = true;
3026                  }
3027              }
3028              if (bad_address) {
3029                  len = 0;
3030              }
3031              if (len > max_len - total_len) {
3032                  len = max_len - total_len;
3033              }
3034          }
3035          vec[i].iov_len = len;
3036          total_len += len;
3037      }
3038  
3039      unlock_user(target_vec, target_addr, 0);
3040      return vec;
3041  
3042   fail:
3043      while (--i >= 0) {
3044          if (tswapal(target_vec[i].iov_len) > 0) {
3045              unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3046          }
3047      }
3048      unlock_user(target_vec, target_addr, 0);
3049   fail2:
3050      g_free(vec);
3051      errno = err;
3052      return NULL;
3053  }
3054  
3055  static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3056                           abi_ulong count, int copy)
3057  {
3058      struct target_iovec *target_vec;
3059      int i;
3060  
3061      target_vec = lock_user(VERIFY_READ, target_addr,
3062                             count * sizeof(struct target_iovec), 1);
3063      if (target_vec) {
3064          for (i = 0; i < count; i++) {
3065              abi_ulong base = tswapal(target_vec[i].iov_base);
3066              abi_long len = tswapal(target_vec[i].iov_len);
3067              if (len < 0) {
3068                  break;
3069              }
3070              unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3071          }
3072          unlock_user(target_vec, target_addr, 0);
3073      }
3074  
3075      g_free(vec);
3076  }
3077  
3078  static inline int target_to_host_sock_type(int *type)
3079  {
3080      int host_type = 0;
3081      int target_type = *type;
3082  
3083      switch (target_type & TARGET_SOCK_TYPE_MASK) {
3084      case TARGET_SOCK_DGRAM:
3085          host_type = SOCK_DGRAM;
3086          break;
3087      case TARGET_SOCK_STREAM:
3088          host_type = SOCK_STREAM;
3089          break;
3090      default:
3091          host_type = target_type & TARGET_SOCK_TYPE_MASK;
3092          break;
3093      }
3094      if (target_type & TARGET_SOCK_CLOEXEC) {
3095  #if defined(SOCK_CLOEXEC)
3096          host_type |= SOCK_CLOEXEC;
3097  #else
3098          return -TARGET_EINVAL;
3099  #endif
3100      }
3101      if (target_type & TARGET_SOCK_NONBLOCK) {
3102  #if defined(SOCK_NONBLOCK)
3103          host_type |= SOCK_NONBLOCK;
3104  #elif !defined(O_NONBLOCK)
3105          return -TARGET_EINVAL;
3106  #endif
3107      }
3108      *type = host_type;
3109      return 0;
3110  }
3111  
3112  /* Try to emulate socket type flags after socket creation.  */
3113  static int sock_flags_fixup(int fd, int target_type)
3114  {
3115  #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3116      if (target_type & TARGET_SOCK_NONBLOCK) {
3117          int flags = fcntl(fd, F_GETFL);
3118          if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3119              close(fd);
3120              return -TARGET_EINVAL;
3121          }
3122      }
3123  #endif
3124      return fd;
3125  }
3126  
3127  /* do_socket() Must return target values and target errnos. */
3128  static abi_long do_socket(int domain, int type, int protocol)
3129  {
3130      int target_type = type;
3131      int ret;
3132  
3133      ret = target_to_host_sock_type(&type);
3134      if (ret) {
3135          return ret;
3136      }
3137  
3138      if (domain == PF_NETLINK && !(
3139  #ifdef CONFIG_RTNETLINK
3140           protocol == NETLINK_ROUTE ||
3141  #endif
3142           protocol == NETLINK_KOBJECT_UEVENT ||
3143           protocol == NETLINK_AUDIT)) {
3144          return -TARGET_EPROTONOSUPPORT;
3145      }
3146  
3147      if (domain == AF_PACKET ||
3148          (domain == AF_INET && type == SOCK_PACKET)) {
3149          protocol = tswap16(protocol);
3150      }
3151  
3152      ret = get_errno(socket(domain, type, protocol));
3153      if (ret >= 0) {
3154          ret = sock_flags_fixup(ret, target_type);
3155          if (type == SOCK_PACKET) {
3156              /* Manage an obsolete case :
3157               * if socket type is SOCK_PACKET, bind by name
3158               */
3159              fd_trans_register(ret, &target_packet_trans);
3160          } else if (domain == PF_NETLINK) {
3161              switch (protocol) {
3162  #ifdef CONFIG_RTNETLINK
3163              case NETLINK_ROUTE:
3164                  fd_trans_register(ret, &target_netlink_route_trans);
3165                  break;
3166  #endif
3167              case NETLINK_KOBJECT_UEVENT:
3168                  /* nothing to do: messages are strings */
3169                  break;
3170              case NETLINK_AUDIT:
3171                  fd_trans_register(ret, &target_netlink_audit_trans);
3172                  break;
3173              default:
3174                  g_assert_not_reached();
3175              }
3176          }
3177      }
3178      return ret;
3179  }
3180  
3181  /* do_bind() Must return target values and target errnos. */
3182  static abi_long do_bind(int sockfd, abi_ulong target_addr,
3183                          socklen_t addrlen)
3184  {
3185      void *addr;
3186      abi_long ret;
3187  
3188      if ((int)addrlen < 0) {
3189          return -TARGET_EINVAL;
3190      }
3191  
3192      addr = alloca(addrlen+1);
3193  
3194      ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3195      if (ret)
3196          return ret;
3197  
3198      return get_errno(bind(sockfd, addr, addrlen));
3199  }
3200  
3201  /* do_connect() Must return target values and target errnos. */
3202  static abi_long do_connect(int sockfd, abi_ulong target_addr,
3203                             socklen_t addrlen)
3204  {
3205      void *addr;
3206      abi_long ret;
3207  
3208      if ((int)addrlen < 0) {
3209          return -TARGET_EINVAL;
3210      }
3211  
3212      addr = alloca(addrlen+1);
3213  
3214      ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3215      if (ret)
3216          return ret;
3217  
3218      return get_errno(safe_connect(sockfd, addr, addrlen));
3219  }
3220  
3221  /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3222  static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3223                                        int flags, int send)
3224  {
3225      abi_long ret, len;
3226      struct msghdr msg;
3227      abi_ulong count;
3228      struct iovec *vec;
3229      abi_ulong target_vec;
3230  
3231      if (msgp->msg_name) {
3232          msg.msg_namelen = tswap32(msgp->msg_namelen);
3233          msg.msg_name = alloca(msg.msg_namelen+1);
3234          ret = target_to_host_sockaddr(fd, msg.msg_name,
3235                                        tswapal(msgp->msg_name),
3236                                        msg.msg_namelen);
3237          if (ret == -TARGET_EFAULT) {
3238              /* For connected sockets msg_name and msg_namelen must
3239               * be ignored, so returning EFAULT immediately is wrong.
3240               * Instead, pass a bad msg_name to the host kernel, and
3241               * let it decide whether to return EFAULT or not.
3242               */
3243              msg.msg_name = (void *)-1;
3244          } else if (ret) {
3245              goto out2;
3246          }
3247      } else {
3248          msg.msg_name = NULL;
3249          msg.msg_namelen = 0;
3250      }
3251      msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3252      msg.msg_control = alloca(msg.msg_controllen);
3253      memset(msg.msg_control, 0, msg.msg_controllen);
3254  
3255      msg.msg_flags = tswap32(msgp->msg_flags);
3256  
3257      count = tswapal(msgp->msg_iovlen);
3258      target_vec = tswapal(msgp->msg_iov);
3259  
3260      if (count > IOV_MAX) {
3261          /* sendrcvmsg returns a different errno for this condition than
3262           * readv/writev, so we must catch it here before lock_iovec() does.
3263           */
3264          ret = -TARGET_EMSGSIZE;
3265          goto out2;
3266      }
3267  
3268      vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3269                       target_vec, count, send);
3270      if (vec == NULL) {
3271          ret = -host_to_target_errno(errno);
3272          /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3273          if (!send || ret) {
3274              goto out2;
3275          }
3276      }
3277      msg.msg_iovlen = count;
3278      msg.msg_iov = vec;
3279  
3280      if (send) {
3281          if (fd_trans_target_to_host_data(fd)) {
3282              void *host_msg;
3283  
3284              host_msg = g_malloc(msg.msg_iov->iov_len);
3285              memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3286              ret = fd_trans_target_to_host_data(fd)(host_msg,
3287                                                     msg.msg_iov->iov_len);
3288              if (ret >= 0) {
3289                  msg.msg_iov->iov_base = host_msg;
3290                  ret = get_errno(safe_sendmsg(fd, &msg, flags));
3291              }
3292              g_free(host_msg);
3293          } else {
3294              ret = target_to_host_cmsg(&msg, msgp);
3295              if (ret == 0) {
3296                  ret = get_errno(safe_sendmsg(fd, &msg, flags));
3297              }
3298          }
3299      } else {
3300          ret = get_errno(safe_recvmsg(fd, &msg, flags));
3301          if (!is_error(ret)) {
3302              len = ret;
3303              if (fd_trans_host_to_target_data(fd)) {
3304                  ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3305                                                 MIN(msg.msg_iov->iov_len, len));
3306              }
3307              if (!is_error(ret)) {
3308                  ret = host_to_target_cmsg(msgp, &msg);
3309              }
3310              if (!is_error(ret)) {
3311                  msgp->msg_namelen = tswap32(msg.msg_namelen);
3312                  msgp->msg_flags = tswap32(msg.msg_flags);
3313                  if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3314                      ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3315                                      msg.msg_name, msg.msg_namelen);
3316                      if (ret) {
3317                          goto out;
3318                      }
3319                  }
3320  
3321                  ret = len;
3322              }
3323          }
3324      }
3325  
3326  out:
3327      if (vec) {
3328          unlock_iovec(vec, target_vec, count, !send);
3329      }
3330  out2:
3331      return ret;
3332  }
3333  
3334  static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3335                                 int flags, int send)
3336  {
3337      abi_long ret;
3338      struct target_msghdr *msgp;
3339  
3340      if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3341                            msgp,
3342                            target_msg,
3343                            send ? 1 : 0)) {
3344          return -TARGET_EFAULT;
3345      }
3346      ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3347      unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3348      return ret;
3349  }
3350  
3351  /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3352   * so it might not have this *mmsg-specific flag either.
3353   */
3354  #ifndef MSG_WAITFORONE
3355  #define MSG_WAITFORONE 0x10000
3356  #endif
3357  
3358  static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3359                                  unsigned int vlen, unsigned int flags,
3360                                  int send)
3361  {
3362      struct target_mmsghdr *mmsgp;
3363      abi_long ret = 0;
3364      int i;
3365  
3366      if (vlen > UIO_MAXIOV) {
3367          vlen = UIO_MAXIOV;
3368      }
3369  
3370      mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3371      if (!mmsgp) {
3372          return -TARGET_EFAULT;
3373      }
3374  
3375      for (i = 0; i < vlen; i++) {
3376          ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3377          if (is_error(ret)) {
3378              break;
3379          }
3380          mmsgp[i].msg_len = tswap32(ret);
3381          /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3382          if (flags & MSG_WAITFORONE) {
3383              flags |= MSG_DONTWAIT;
3384          }
3385      }
3386  
3387      unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3388  
3389      /* Return number of datagrams sent if we sent any at all;
3390       * otherwise return the error.
3391       */
3392      if (i) {
3393          return i;
3394      }
3395      return ret;
3396  }
3397  
3398  /* do_accept4() Must return target values and target errnos. */
3399  static abi_long do_accept4(int fd, abi_ulong target_addr,
3400                             abi_ulong target_addrlen_addr, int flags)
3401  {
3402      socklen_t addrlen, ret_addrlen;
3403      void *addr;
3404      abi_long ret;
3405      int host_flags;
3406  
3407      if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3408          return -TARGET_EINVAL;
3409      }
3410  
3411      host_flags = 0;
3412      if (flags & TARGET_SOCK_NONBLOCK) {
3413          host_flags |= SOCK_NONBLOCK;
3414      }
3415      if (flags & TARGET_SOCK_CLOEXEC) {
3416          host_flags |= SOCK_CLOEXEC;
3417      }
3418  
3419      if (target_addr == 0) {
3420          return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3421      }
3422  
3423      /* linux returns EFAULT if addrlen pointer is invalid */
3424      if (get_user_u32(addrlen, target_addrlen_addr))
3425          return -TARGET_EFAULT;
3426  
3427      if ((int)addrlen < 0) {
3428          return -TARGET_EINVAL;
3429      }
3430  
3431      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3432          return -TARGET_EFAULT;
3433      }
3434  
3435      addr = alloca(addrlen);
3436  
3437      ret_addrlen = addrlen;
3438      ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3439      if (!is_error(ret)) {
3440          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3441          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3442              ret = -TARGET_EFAULT;
3443          }
3444      }
3445      return ret;
3446  }
3447  
3448  /* do_getpeername() Must return target values and target errnos. */
3449  static abi_long do_getpeername(int fd, abi_ulong target_addr,
3450                                 abi_ulong target_addrlen_addr)
3451  {
3452      socklen_t addrlen, ret_addrlen;
3453      void *addr;
3454      abi_long ret;
3455  
3456      if (get_user_u32(addrlen, target_addrlen_addr))
3457          return -TARGET_EFAULT;
3458  
3459      if ((int)addrlen < 0) {
3460          return -TARGET_EINVAL;
3461      }
3462  
3463      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3464          return -TARGET_EFAULT;
3465      }
3466  
3467      addr = alloca(addrlen);
3468  
3469      ret_addrlen = addrlen;
3470      ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3471      if (!is_error(ret)) {
3472          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3473          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3474              ret = -TARGET_EFAULT;
3475          }
3476      }
3477      return ret;
3478  }
3479  
3480  /* do_getsockname() Must return target values and target errnos. */
3481  static abi_long do_getsockname(int fd, abi_ulong target_addr,
3482                                 abi_ulong target_addrlen_addr)
3483  {
3484      socklen_t addrlen, ret_addrlen;
3485      void *addr;
3486      abi_long ret;
3487  
3488      if (get_user_u32(addrlen, target_addrlen_addr))
3489          return -TARGET_EFAULT;
3490  
3491      if ((int)addrlen < 0) {
3492          return -TARGET_EINVAL;
3493      }
3494  
3495      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3496          return -TARGET_EFAULT;
3497      }
3498  
3499      addr = alloca(addrlen);
3500  
3501      ret_addrlen = addrlen;
3502      ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3503      if (!is_error(ret)) {
3504          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3505          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3506              ret = -TARGET_EFAULT;
3507          }
3508      }
3509      return ret;
3510  }
3511  
3512  /* do_socketpair() Must return target values and target errnos. */
3513  static abi_long do_socketpair(int domain, int type, int protocol,
3514                                abi_ulong target_tab_addr)
3515  {
3516      int tab[2];
3517      abi_long ret;
3518  
3519      target_to_host_sock_type(&type);
3520  
3521      ret = get_errno(socketpair(domain, type, protocol, tab));
3522      if (!is_error(ret)) {
3523          if (put_user_s32(tab[0], target_tab_addr)
3524              || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3525              ret = -TARGET_EFAULT;
3526      }
3527      return ret;
3528  }
3529  
3530  /* do_sendto() Must return target values and target errnos. */
3531  static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3532                            abi_ulong target_addr, socklen_t addrlen)
3533  {
3534      void *addr;
3535      void *host_msg;
3536      void *copy_msg = NULL;
3537      abi_long ret;
3538  
3539      if ((int)addrlen < 0) {
3540          return -TARGET_EINVAL;
3541      }
3542  
3543      host_msg = lock_user(VERIFY_READ, msg, len, 1);
3544      if (!host_msg)
3545          return -TARGET_EFAULT;
3546      if (fd_trans_target_to_host_data(fd)) {
3547          copy_msg = host_msg;
3548          host_msg = g_malloc(len);
3549          memcpy(host_msg, copy_msg, len);
3550          ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3551          if (ret < 0) {
3552              goto fail;
3553          }
3554      }
3555      if (target_addr) {
3556          addr = alloca(addrlen+1);
3557          ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3558          if (ret) {
3559              goto fail;
3560          }
3561          ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3562      } else {
3563          ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3564      }
3565  fail:
3566      if (copy_msg) {
3567          g_free(host_msg);
3568          host_msg = copy_msg;
3569      }
3570      unlock_user(host_msg, msg, 0);
3571      return ret;
3572  }
3573  
3574  /* do_recvfrom() Must return target values and target errnos. */
3575  static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3576                              abi_ulong target_addr,
3577                              abi_ulong target_addrlen)
3578  {
3579      socklen_t addrlen, ret_addrlen;
3580      void *addr;
3581      void *host_msg;
3582      abi_long ret;
3583  
3584      if (!msg) {
3585          host_msg = NULL;
3586      } else {
3587          host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3588          if (!host_msg) {
3589              return -TARGET_EFAULT;
3590          }
3591      }
3592      if (target_addr) {
3593          if (get_user_u32(addrlen, target_addrlen)) {
3594              ret = -TARGET_EFAULT;
3595              goto fail;
3596          }
3597          if ((int)addrlen < 0) {
3598              ret = -TARGET_EINVAL;
3599              goto fail;
3600          }
3601          addr = alloca(addrlen);
3602          ret_addrlen = addrlen;
3603          ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3604                                        addr, &ret_addrlen));
3605      } else {
3606          addr = NULL; /* To keep compiler quiet.  */
3607          addrlen = 0; /* To keep compiler quiet.  */
3608          ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3609      }
3610      if (!is_error(ret)) {
3611          if (fd_trans_host_to_target_data(fd)) {
3612              abi_long trans;
3613              trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3614              if (is_error(trans)) {
3615                  ret = trans;
3616                  goto fail;
3617              }
3618          }
3619          if (target_addr) {
3620              host_to_target_sockaddr(target_addr, addr,
3621                                      MIN(addrlen, ret_addrlen));
3622              if (put_user_u32(ret_addrlen, target_addrlen)) {
3623                  ret = -TARGET_EFAULT;
3624                  goto fail;
3625              }
3626          }
3627          unlock_user(host_msg, msg, len);
3628      } else {
3629  fail:
3630          unlock_user(host_msg, msg, 0);
3631      }
3632      return ret;
3633  }
3634  
3635  #ifdef TARGET_NR_socketcall
3636  /* do_socketcall() must return target values and target errnos. */
3637  static abi_long do_socketcall(int num, abi_ulong vptr)
3638  {
3639      static const unsigned nargs[] = { /* number of arguments per operation */
3640          [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3641          [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3642          [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3643          [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3644          [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3645          [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3646          [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3647          [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3648          [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3649          [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3650          [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3651          [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3652          [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3653          [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3654          [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3655          [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3656          [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3657          [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3658          [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3659          [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3660      };
3661      abi_long a[6]; /* max 6 args */
3662      unsigned i;
3663  
3664      /* check the range of the first argument num */
3665      /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3666      if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3667          return -TARGET_EINVAL;
3668      }
3669      /* ensure we have space for args */
3670      if (nargs[num] > ARRAY_SIZE(a)) {
3671          return -TARGET_EINVAL;
3672      }
3673      /* collect the arguments in a[] according to nargs[] */
3674      for (i = 0; i < nargs[num]; ++i) {
3675          if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3676              return -TARGET_EFAULT;
3677          }
3678      }
3679      /* now when we have the args, invoke the appropriate underlying function */
3680      switch (num) {
3681      case TARGET_SYS_SOCKET: /* domain, type, protocol */
3682          return do_socket(a[0], a[1], a[2]);
3683      case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3684          return do_bind(a[0], a[1], a[2]);
3685      case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3686          return do_connect(a[0], a[1], a[2]);
3687      case TARGET_SYS_LISTEN: /* sockfd, backlog */
3688          return get_errno(listen(a[0], a[1]));
3689      case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3690          return do_accept4(a[0], a[1], a[2], 0);
3691      case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3692          return do_getsockname(a[0], a[1], a[2]);
3693      case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3694          return do_getpeername(a[0], a[1], a[2]);
3695      case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3696          return do_socketpair(a[0], a[1], a[2], a[3]);
3697      case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3698          return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3699      case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3700          return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3701      case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3702          return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3703      case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3704          return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3705      case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3706          return get_errno(shutdown(a[0], a[1]));
3707      case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3708          return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3709      case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3710          return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3711      case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3712          return do_sendrecvmsg(a[0], a[1], a[2], 1);
3713      case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3714          return do_sendrecvmsg(a[0], a[1], a[2], 0);
3715      case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3716          return do_accept4(a[0], a[1], a[2], a[3]);
3717      case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3718          return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3719      case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3720          return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3721      default:
3722          qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3723          return -TARGET_EINVAL;
3724      }
3725  }
3726  #endif
3727  
3728  #ifndef TARGET_SEMID64_DS
3729  /* asm-generic version of this struct */
3730  struct target_semid64_ds
3731  {
3732    struct target_ipc_perm sem_perm;
3733    abi_ulong sem_otime;
3734  #if TARGET_ABI_BITS == 32
3735    abi_ulong __unused1;
3736  #endif
3737    abi_ulong sem_ctime;
3738  #if TARGET_ABI_BITS == 32
3739    abi_ulong __unused2;
3740  #endif
3741    abi_ulong sem_nsems;
3742    abi_ulong __unused3;
3743    abi_ulong __unused4;
3744  };
3745  #endif
3746  
3747  static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3748                                                 abi_ulong target_addr)
3749  {
3750      struct target_ipc_perm *target_ip;
3751      struct target_semid64_ds *target_sd;
3752  
3753      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3754          return -TARGET_EFAULT;
3755      target_ip = &(target_sd->sem_perm);
3756      host_ip->__key = tswap32(target_ip->__key);
3757      host_ip->uid = tswap32(target_ip->uid);
3758      host_ip->gid = tswap32(target_ip->gid);
3759      host_ip->cuid = tswap32(target_ip->cuid);
3760      host_ip->cgid = tswap32(target_ip->cgid);
3761  #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3762      host_ip->mode = tswap32(target_ip->mode);
3763  #else
3764      host_ip->mode = tswap16(target_ip->mode);
3765  #endif
3766  #if defined(TARGET_PPC)
3767      host_ip->__seq = tswap32(target_ip->__seq);
3768  #else
3769      host_ip->__seq = tswap16(target_ip->__seq);
3770  #endif
3771      unlock_user_struct(target_sd, target_addr, 0);
3772      return 0;
3773  }
3774  
3775  static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3776                                                 struct ipc_perm *host_ip)
3777  {
3778      struct target_ipc_perm *target_ip;
3779      struct target_semid64_ds *target_sd;
3780  
3781      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3782          return -TARGET_EFAULT;
3783      target_ip = &(target_sd->sem_perm);
3784      target_ip->__key = tswap32(host_ip->__key);
3785      target_ip->uid = tswap32(host_ip->uid);
3786      target_ip->gid = tswap32(host_ip->gid);
3787      target_ip->cuid = tswap32(host_ip->cuid);
3788      target_ip->cgid = tswap32(host_ip->cgid);
3789  #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3790      target_ip->mode = tswap32(host_ip->mode);
3791  #else
3792      target_ip->mode = tswap16(host_ip->mode);
3793  #endif
3794  #if defined(TARGET_PPC)
3795      target_ip->__seq = tswap32(host_ip->__seq);
3796  #else
3797      target_ip->__seq = tswap16(host_ip->__seq);
3798  #endif
3799      unlock_user_struct(target_sd, target_addr, 1);
3800      return 0;
3801  }
3802  
3803  static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3804                                                 abi_ulong target_addr)
3805  {
3806      struct target_semid64_ds *target_sd;
3807  
3808      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3809          return -TARGET_EFAULT;
3810      if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3811          return -TARGET_EFAULT;
3812      host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3813      host_sd->sem_otime = tswapal(target_sd->sem_otime);
3814      host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3815      unlock_user_struct(target_sd, target_addr, 0);
3816      return 0;
3817  }
3818  
3819  static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3820                                                 struct semid_ds *host_sd)
3821  {
3822      struct target_semid64_ds *target_sd;
3823  
3824      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3825          return -TARGET_EFAULT;
3826      if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3827          return -TARGET_EFAULT;
3828      target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3829      target_sd->sem_otime = tswapal(host_sd->sem_otime);
3830      target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3831      unlock_user_struct(target_sd, target_addr, 1);
3832      return 0;
3833  }
3834  
3835  struct target_seminfo {
3836      int semmap;
3837      int semmni;
3838      int semmns;
3839      int semmnu;
3840      int semmsl;
3841      int semopm;
3842      int semume;
3843      int semusz;
3844      int semvmx;
3845      int semaem;
3846  };
3847  
3848  static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3849                                                struct seminfo *host_seminfo)
3850  {
3851      struct target_seminfo *target_seminfo;
3852      if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3853          return -TARGET_EFAULT;
3854      __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3855      __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3856      __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3857      __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3858      __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3859      __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3860      __put_user(host_seminfo->semume, &target_seminfo->semume);
3861      __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3862      __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3863      __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3864      unlock_user_struct(target_seminfo, target_addr, 1);
3865      return 0;
3866  }
3867  
3868  union semun {
3869  	int val;
3870  	struct semid_ds *buf;
3871  	unsigned short *array;
3872  	struct seminfo *__buf;
3873  };
3874  
3875  union target_semun {
3876  	int val;
3877  	abi_ulong buf;
3878  	abi_ulong array;
3879  	abi_ulong __buf;
3880  };
3881  
3882  static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3883                                                 abi_ulong target_addr)
3884  {
3885      int nsems;
3886      unsigned short *array;
3887      union semun semun;
3888      struct semid_ds semid_ds;
3889      int i, ret;
3890  
3891      semun.buf = &semid_ds;
3892  
3893      ret = semctl(semid, 0, IPC_STAT, semun);
3894      if (ret == -1)
3895          return get_errno(ret);
3896  
3897      nsems = semid_ds.sem_nsems;
3898  
3899      *host_array = g_try_new(unsigned short, nsems);
3900      if (!*host_array) {
3901          return -TARGET_ENOMEM;
3902      }
3903      array = lock_user(VERIFY_READ, target_addr,
3904                        nsems*sizeof(unsigned short), 1);
3905      if (!array) {
3906          g_free(*host_array);
3907          return -TARGET_EFAULT;
3908      }
3909  
3910      for(i=0; i<nsems; i++) {
3911          __get_user((*host_array)[i], &array[i]);
3912      }
3913      unlock_user(array, target_addr, 0);
3914  
3915      return 0;
3916  }
3917  
3918  static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3919                                                 unsigned short **host_array)
3920  {
3921      int nsems;
3922      unsigned short *array;
3923      union semun semun;
3924      struct semid_ds semid_ds;
3925      int i, ret;
3926  
3927      semun.buf = &semid_ds;
3928  
3929      ret = semctl(semid, 0, IPC_STAT, semun);
3930      if (ret == -1)
3931          return get_errno(ret);
3932  
3933      nsems = semid_ds.sem_nsems;
3934  
3935      array = lock_user(VERIFY_WRITE, target_addr,
3936                        nsems*sizeof(unsigned short), 0);
3937      if (!array)
3938          return -TARGET_EFAULT;
3939  
3940      for(i=0; i<nsems; i++) {
3941          __put_user((*host_array)[i], &array[i]);
3942      }
3943      g_free(*host_array);
3944      unlock_user(array, target_addr, 1);
3945  
3946      return 0;
3947  }
3948  
3949  static inline abi_long do_semctl(int semid, int semnum, int cmd,
3950                                   abi_ulong target_arg)
3951  {
3952      union target_semun target_su = { .buf = target_arg };
3953      union semun arg;
3954      struct semid_ds dsarg;
3955      unsigned short *array = NULL;
3956      struct seminfo seminfo;
3957      abi_long ret = -TARGET_EINVAL;
3958      abi_long err;
3959      cmd &= 0xff;
3960  
3961      switch( cmd ) {
3962  	case GETVAL:
3963  	case SETVAL:
3964              /* In 64 bit cross-endian situations, we will erroneously pick up
3965               * the wrong half of the union for the "val" element.  To rectify
3966               * this, the entire 8-byte structure is byteswapped, followed by
3967  	     * a swap of the 4 byte val field. In other cases, the data is
3968  	     * already in proper host byte order. */
3969  	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3970  		target_su.buf = tswapal(target_su.buf);
3971  		arg.val = tswap32(target_su.val);
3972  	    } else {
3973  		arg.val = target_su.val;
3974  	    }
3975              ret = get_errno(semctl(semid, semnum, cmd, arg));
3976              break;
3977  	case GETALL:
3978  	case SETALL:
3979              err = target_to_host_semarray(semid, &array, target_su.array);
3980              if (err)
3981                  return err;
3982              arg.array = array;
3983              ret = get_errno(semctl(semid, semnum, cmd, arg));
3984              err = host_to_target_semarray(semid, target_su.array, &array);
3985              if (err)
3986                  return err;
3987              break;
3988  	case IPC_STAT:
3989  	case IPC_SET:
3990  	case SEM_STAT:
3991              err = target_to_host_semid_ds(&dsarg, target_su.buf);
3992              if (err)
3993                  return err;
3994              arg.buf = &dsarg;
3995              ret = get_errno(semctl(semid, semnum, cmd, arg));
3996              err = host_to_target_semid_ds(target_su.buf, &dsarg);
3997              if (err)
3998                  return err;
3999              break;
4000  	case IPC_INFO:
4001  	case SEM_INFO:
4002              arg.__buf = &seminfo;
4003              ret = get_errno(semctl(semid, semnum, cmd, arg));
4004              err = host_to_target_seminfo(target_su.__buf, &seminfo);
4005              if (err)
4006                  return err;
4007              break;
4008  	case IPC_RMID:
4009  	case GETPID:
4010  	case GETNCNT:
4011  	case GETZCNT:
4012              ret = get_errno(semctl(semid, semnum, cmd, NULL));
4013              break;
4014      }
4015  
4016      return ret;
4017  }
4018  
4019  struct target_sembuf {
4020      unsigned short sem_num;
4021      short sem_op;
4022      short sem_flg;
4023  };
4024  
4025  static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4026                                               abi_ulong target_addr,
4027                                               unsigned nsops)
4028  {
4029      struct target_sembuf *target_sembuf;
4030      int i;
4031  
4032      target_sembuf = lock_user(VERIFY_READ, target_addr,
4033                                nsops*sizeof(struct target_sembuf), 1);
4034      if (!target_sembuf)
4035          return -TARGET_EFAULT;
4036  
4037      for(i=0; i<nsops; i++) {
4038          __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4039          __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4040          __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4041      }
4042  
4043      unlock_user(target_sembuf, target_addr, 0);
4044  
4045      return 0;
4046  }
4047  
4048  #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4049      defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4050  
4051  /*
4052   * This macro is required to handle the s390 variants, which passes the
4053   * arguments in a different order than default.
4054   */
4055  #ifdef __s390x__
4056  #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4057    (__nsops), (__timeout), (__sops)
4058  #else
4059  #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4060    (__nsops), 0, (__sops), (__timeout)
4061  #endif
4062  
4063  static inline abi_long do_semtimedop(int semid,
4064                                       abi_long ptr,
4065                                       unsigned nsops,
4066                                       abi_long timeout, bool time64)
4067  {
4068      struct sembuf *sops;
4069      struct timespec ts, *pts = NULL;
4070      abi_long ret;
4071  
4072      if (timeout) {
4073          pts = &ts;
4074          if (time64) {
4075              if (target_to_host_timespec64(pts, timeout)) {
4076                  return -TARGET_EFAULT;
4077              }
4078          } else {
4079              if (target_to_host_timespec(pts, timeout)) {
4080                  return -TARGET_EFAULT;
4081              }
4082          }
4083      }
4084  
4085      if (nsops > TARGET_SEMOPM) {
4086          return -TARGET_E2BIG;
4087      }
4088  
4089      sops = g_new(struct sembuf, nsops);
4090  
4091      if (target_to_host_sembuf(sops, ptr, nsops)) {
4092          g_free(sops);
4093          return -TARGET_EFAULT;
4094      }
4095  
4096      ret = -TARGET_ENOSYS;
4097  #ifdef __NR_semtimedop
4098      ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4099  #endif
4100  #ifdef __NR_ipc
4101      if (ret == -TARGET_ENOSYS) {
4102          ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4103                                   SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4104      }
4105  #endif
4106      g_free(sops);
4107      return ret;
4108  }
4109  #endif
4110  
4111  struct target_msqid_ds
4112  {
4113      struct target_ipc_perm msg_perm;
4114      abi_ulong msg_stime;
4115  #if TARGET_ABI_BITS == 32
4116      abi_ulong __unused1;
4117  #endif
4118      abi_ulong msg_rtime;
4119  #if TARGET_ABI_BITS == 32
4120      abi_ulong __unused2;
4121  #endif
4122      abi_ulong msg_ctime;
4123  #if TARGET_ABI_BITS == 32
4124      abi_ulong __unused3;
4125  #endif
4126      abi_ulong __msg_cbytes;
4127      abi_ulong msg_qnum;
4128      abi_ulong msg_qbytes;
4129      abi_ulong msg_lspid;
4130      abi_ulong msg_lrpid;
4131      abi_ulong __unused4;
4132      abi_ulong __unused5;
4133  };
4134  
4135  static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4136                                                 abi_ulong target_addr)
4137  {
4138      struct target_msqid_ds *target_md;
4139  
4140      if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4141          return -TARGET_EFAULT;
4142      if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4143          return -TARGET_EFAULT;
4144      host_md->msg_stime = tswapal(target_md->msg_stime);
4145      host_md->msg_rtime = tswapal(target_md->msg_rtime);
4146      host_md->msg_ctime = tswapal(target_md->msg_ctime);
4147      host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4148      host_md->msg_qnum = tswapal(target_md->msg_qnum);
4149      host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4150      host_md->msg_lspid = tswapal(target_md->msg_lspid);
4151      host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4152      unlock_user_struct(target_md, target_addr, 0);
4153      return 0;
4154  }
4155  
4156  static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4157                                                 struct msqid_ds *host_md)
4158  {
4159      struct target_msqid_ds *target_md;
4160  
4161      if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4162          return -TARGET_EFAULT;
4163      if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4164          return -TARGET_EFAULT;
4165      target_md->msg_stime = tswapal(host_md->msg_stime);
4166      target_md->msg_rtime = tswapal(host_md->msg_rtime);
4167      target_md->msg_ctime = tswapal(host_md->msg_ctime);
4168      target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4169      target_md->msg_qnum = tswapal(host_md->msg_qnum);
4170      target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4171      target_md->msg_lspid = tswapal(host_md->msg_lspid);
4172      target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4173      unlock_user_struct(target_md, target_addr, 1);
4174      return 0;
4175  }
4176  
4177  struct target_msginfo {
4178      int msgpool;
4179      int msgmap;
4180      int msgmax;
4181      int msgmnb;
4182      int msgmni;
4183      int msgssz;
4184      int msgtql;
4185      unsigned short int msgseg;
4186  };
4187  
4188  static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4189                                                struct msginfo *host_msginfo)
4190  {
4191      struct target_msginfo *target_msginfo;
4192      if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4193          return -TARGET_EFAULT;
4194      __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4195      __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4196      __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4197      __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4198      __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4199      __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4200      __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4201      __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4202      unlock_user_struct(target_msginfo, target_addr, 1);
4203      return 0;
4204  }
4205  
4206  static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4207  {
4208      struct msqid_ds dsarg;
4209      struct msginfo msginfo;
4210      abi_long ret = -TARGET_EINVAL;
4211  
4212      cmd &= 0xff;
4213  
4214      switch (cmd) {
4215      case IPC_STAT:
4216      case IPC_SET:
4217      case MSG_STAT:
4218          if (target_to_host_msqid_ds(&dsarg,ptr))
4219              return -TARGET_EFAULT;
4220          ret = get_errno(msgctl(msgid, cmd, &dsarg));
4221          if (host_to_target_msqid_ds(ptr,&dsarg))
4222              return -TARGET_EFAULT;
4223          break;
4224      case IPC_RMID:
4225          ret = get_errno(msgctl(msgid, cmd, NULL));
4226          break;
4227      case IPC_INFO:
4228      case MSG_INFO:
4229          ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4230          if (host_to_target_msginfo(ptr, &msginfo))
4231              return -TARGET_EFAULT;
4232          break;
4233      }
4234  
4235      return ret;
4236  }
4237  
4238  struct target_msgbuf {
4239      abi_long mtype;
4240      char	mtext[1];
4241  };
4242  
4243  static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4244                                   ssize_t msgsz, int msgflg)
4245  {
4246      struct target_msgbuf *target_mb;
4247      struct msgbuf *host_mb;
4248      abi_long ret = 0;
4249  
4250      if (msgsz < 0) {
4251          return -TARGET_EINVAL;
4252      }
4253  
4254      if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4255          return -TARGET_EFAULT;
4256      host_mb = g_try_malloc(msgsz + sizeof(long));
4257      if (!host_mb) {
4258          unlock_user_struct(target_mb, msgp, 0);
4259          return -TARGET_ENOMEM;
4260      }
4261      host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4262      memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4263      ret = -TARGET_ENOSYS;
4264  #ifdef __NR_msgsnd
4265      ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4266  #endif
4267  #ifdef __NR_ipc
4268      if (ret == -TARGET_ENOSYS) {
4269  #ifdef __s390x__
4270          ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4271                                   host_mb));
4272  #else
4273          ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4274                                   host_mb, 0));
4275  #endif
4276      }
4277  #endif
4278      g_free(host_mb);
4279      unlock_user_struct(target_mb, msgp, 0);
4280  
4281      return ret;
4282  }
4283  
4284  #ifdef __NR_ipc
4285  #if defined(__sparc__)
4286  /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4287  #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4288  #elif defined(__s390x__)
4289  /* The s390 sys_ipc variant has only five parameters.  */
4290  #define MSGRCV_ARGS(__msgp, __msgtyp) \
4291      ((long int[]){(long int)__msgp, __msgtyp})
4292  #else
4293  #define MSGRCV_ARGS(__msgp, __msgtyp) \
4294      ((long int[]){(long int)__msgp, __msgtyp}), 0
4295  #endif
4296  #endif
4297  
4298  static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4299                                   ssize_t msgsz, abi_long msgtyp,
4300                                   int msgflg)
4301  {
4302      struct target_msgbuf *target_mb;
4303      char *target_mtext;
4304      struct msgbuf *host_mb;
4305      abi_long ret = 0;
4306  
4307      if (msgsz < 0) {
4308          return -TARGET_EINVAL;
4309      }
4310  
4311      if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4312          return -TARGET_EFAULT;
4313  
4314      host_mb = g_try_malloc(msgsz + sizeof(long));
4315      if (!host_mb) {
4316          ret = -TARGET_ENOMEM;
4317          goto end;
4318      }
4319      ret = -TARGET_ENOSYS;
4320  #ifdef __NR_msgrcv
4321      ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4322  #endif
4323  #ifdef __NR_ipc
4324      if (ret == -TARGET_ENOSYS) {
4325          ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4326                          msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4327      }
4328  #endif
4329  
4330      if (ret > 0) {
4331          abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4332          target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4333          if (!target_mtext) {
4334              ret = -TARGET_EFAULT;
4335              goto end;
4336          }
4337          memcpy(target_mb->mtext, host_mb->mtext, ret);
4338          unlock_user(target_mtext, target_mtext_addr, ret);
4339      }
4340  
4341      target_mb->mtype = tswapal(host_mb->mtype);
4342  
4343  end:
4344      if (target_mb)
4345          unlock_user_struct(target_mb, msgp, 1);
4346      g_free(host_mb);
4347      return ret;
4348  }
4349  
4350  static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4351                                                 abi_ulong target_addr)
4352  {
4353      struct target_shmid_ds *target_sd;
4354  
4355      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4356          return -TARGET_EFAULT;
4357      if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4358          return -TARGET_EFAULT;
4359      __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4360      __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4361      __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4362      __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4363      __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4364      __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4365      __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4366      unlock_user_struct(target_sd, target_addr, 0);
4367      return 0;
4368  }
4369  
4370  static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4371                                                 struct shmid_ds *host_sd)
4372  {
4373      struct target_shmid_ds *target_sd;
4374  
4375      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4376          return -TARGET_EFAULT;
4377      if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4378          return -TARGET_EFAULT;
4379      __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4380      __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4381      __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4382      __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4383      __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4384      __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4385      __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4386      unlock_user_struct(target_sd, target_addr, 1);
4387      return 0;
4388  }
4389  
4390  struct  target_shminfo {
4391      abi_ulong shmmax;
4392      abi_ulong shmmin;
4393      abi_ulong shmmni;
4394      abi_ulong shmseg;
4395      abi_ulong shmall;
4396  };
4397  
4398  static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4399                                                struct shminfo *host_shminfo)
4400  {
4401      struct target_shminfo *target_shminfo;
4402      if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4403          return -TARGET_EFAULT;
4404      __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4405      __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4406      __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4407      __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4408      __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4409      unlock_user_struct(target_shminfo, target_addr, 1);
4410      return 0;
4411  }
4412  
4413  struct target_shm_info {
4414      int used_ids;
4415      abi_ulong shm_tot;
4416      abi_ulong shm_rss;
4417      abi_ulong shm_swp;
4418      abi_ulong swap_attempts;
4419      abi_ulong swap_successes;
4420  };
4421  
4422  static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4423                                                 struct shm_info *host_shm_info)
4424  {
4425      struct target_shm_info *target_shm_info;
4426      if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4427          return -TARGET_EFAULT;
4428      __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4429      __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4430      __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4431      __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4432      __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4433      __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4434      unlock_user_struct(target_shm_info, target_addr, 1);
4435      return 0;
4436  }
4437  
4438  static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4439  {
4440      struct shmid_ds dsarg;
4441      struct shminfo shminfo;
4442      struct shm_info shm_info;
4443      abi_long ret = -TARGET_EINVAL;
4444  
4445      cmd &= 0xff;
4446  
4447      switch(cmd) {
4448      case IPC_STAT:
4449      case IPC_SET:
4450      case SHM_STAT:
4451          if (target_to_host_shmid_ds(&dsarg, buf))
4452              return -TARGET_EFAULT;
4453          ret = get_errno(shmctl(shmid, cmd, &dsarg));
4454          if (host_to_target_shmid_ds(buf, &dsarg))
4455              return -TARGET_EFAULT;
4456          break;
4457      case IPC_INFO:
4458          ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4459          if (host_to_target_shminfo(buf, &shminfo))
4460              return -TARGET_EFAULT;
4461          break;
4462      case SHM_INFO:
4463          ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4464          if (host_to_target_shm_info(buf, &shm_info))
4465              return -TARGET_EFAULT;
4466          break;
4467      case IPC_RMID:
4468      case SHM_LOCK:
4469      case SHM_UNLOCK:
4470          ret = get_errno(shmctl(shmid, cmd, NULL));
4471          break;
4472      }
4473  
4474      return ret;
4475  }
4476  
4477  #ifdef TARGET_NR_ipc
4478  /* ??? This only works with linear mappings.  */
4479  /* do_ipc() must return target values and target errnos. */
4480  static abi_long do_ipc(CPUArchState *cpu_env,
4481                         unsigned int call, abi_long first,
4482                         abi_long second, abi_long third,
4483                         abi_long ptr, abi_long fifth)
4484  {
4485      int version;
4486      abi_long ret = 0;
4487  
4488      version = call >> 16;
4489      call &= 0xffff;
4490  
4491      switch (call) {
4492      case IPCOP_semop:
4493          ret = do_semtimedop(first, ptr, second, 0, false);
4494          break;
4495      case IPCOP_semtimedop:
4496      /*
4497       * The s390 sys_ipc variant has only five parameters instead of six
4498       * (as for default variant) and the only difference is the handling of
4499       * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4500       * to a struct timespec where the generic variant uses fifth parameter.
4501       */
4502  #if defined(TARGET_S390X)
4503          ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4504  #else
4505          ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4506  #endif
4507          break;
4508  
4509      case IPCOP_semget:
4510          ret = get_errno(semget(first, second, third));
4511          break;
4512  
4513      case IPCOP_semctl: {
4514          /* The semun argument to semctl is passed by value, so dereference the
4515           * ptr argument. */
4516          abi_ulong atptr;
4517          get_user_ual(atptr, ptr);
4518          ret = do_semctl(first, second, third, atptr);
4519          break;
4520      }
4521  
4522      case IPCOP_msgget:
4523          ret = get_errno(msgget(first, second));
4524          break;
4525  
4526      case IPCOP_msgsnd:
4527          ret = do_msgsnd(first, ptr, second, third);
4528          break;
4529  
4530      case IPCOP_msgctl:
4531          ret = do_msgctl(first, second, ptr);
4532          break;
4533  
4534      case IPCOP_msgrcv:
4535          switch (version) {
4536          case 0:
4537              {
4538                  struct target_ipc_kludge {
4539                      abi_long msgp;
4540                      abi_long msgtyp;
4541                  } *tmp;
4542  
4543                  if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4544                      ret = -TARGET_EFAULT;
4545                      break;
4546                  }
4547  
4548                  ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4549  
4550                  unlock_user_struct(tmp, ptr, 0);
4551                  break;
4552              }
4553          default:
4554              ret = do_msgrcv(first, ptr, second, fifth, third);
4555          }
4556          break;
4557  
4558      case IPCOP_shmat:
4559          switch (version) {
4560          default:
4561          {
4562              abi_ulong raddr;
4563              raddr = target_shmat(cpu_env, first, ptr, second);
4564              if (is_error(raddr))
4565                  return get_errno(raddr);
4566              if (put_user_ual(raddr, third))
4567                  return -TARGET_EFAULT;
4568              break;
4569          }
4570          case 1:
4571              ret = -TARGET_EINVAL;
4572              break;
4573          }
4574  	break;
4575      case IPCOP_shmdt:
4576          ret = target_shmdt(ptr);
4577  	break;
4578  
4579      case IPCOP_shmget:
4580  	/* IPC_* flag values are the same on all linux platforms */
4581  	ret = get_errno(shmget(first, second, third));
4582  	break;
4583  
4584  	/* IPC_* and SHM_* command values are the same on all linux platforms */
4585      case IPCOP_shmctl:
4586          ret = do_shmctl(first, second, ptr);
4587          break;
4588      default:
4589          qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4590                        call, version);
4591  	ret = -TARGET_ENOSYS;
4592  	break;
4593      }
4594      return ret;
4595  }
4596  #endif
4597  
4598  /* kernel structure types definitions */
4599  
4600  #define STRUCT(name, ...) STRUCT_ ## name,
4601  #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4602  enum {
4603  #include "syscall_types.h"
4604  STRUCT_MAX
4605  };
4606  #undef STRUCT
4607  #undef STRUCT_SPECIAL
4608  
4609  #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4610  #define STRUCT_SPECIAL(name)
4611  #include "syscall_types.h"
4612  #undef STRUCT
4613  #undef STRUCT_SPECIAL
4614  
4615  #define MAX_STRUCT_SIZE 4096
4616  
4617  #ifdef CONFIG_FIEMAP
4618  /* So fiemap access checks don't overflow on 32 bit systems.
4619   * This is very slightly smaller than the limit imposed by
4620   * the underlying kernel.
4621   */
4622  #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4623                              / sizeof(struct fiemap_extent))
4624  
4625  static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4626                                         int fd, int cmd, abi_long arg)
4627  {
4628      /* The parameter for this ioctl is a struct fiemap followed
4629       * by an array of struct fiemap_extent whose size is set
4630       * in fiemap->fm_extent_count. The array is filled in by the
4631       * ioctl.
4632       */
4633      int target_size_in, target_size_out;
4634      struct fiemap *fm;
4635      const argtype *arg_type = ie->arg_type;
4636      const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4637      void *argptr, *p;
4638      abi_long ret;
4639      int i, extent_size = thunk_type_size(extent_arg_type, 0);
4640      uint32_t outbufsz;
4641      int free_fm = 0;
4642  
4643      assert(arg_type[0] == TYPE_PTR);
4644      assert(ie->access == IOC_RW);
4645      arg_type++;
4646      target_size_in = thunk_type_size(arg_type, 0);
4647      argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4648      if (!argptr) {
4649          return -TARGET_EFAULT;
4650      }
4651      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4652      unlock_user(argptr, arg, 0);
4653      fm = (struct fiemap *)buf_temp;
4654      if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4655          return -TARGET_EINVAL;
4656      }
4657  
4658      outbufsz = sizeof (*fm) +
4659          (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4660  
4661      if (outbufsz > MAX_STRUCT_SIZE) {
4662          /* We can't fit all the extents into the fixed size buffer.
4663           * Allocate one that is large enough and use it instead.
4664           */
4665          fm = g_try_malloc(outbufsz);
4666          if (!fm) {
4667              return -TARGET_ENOMEM;
4668          }
4669          memcpy(fm, buf_temp, sizeof(struct fiemap));
4670          free_fm = 1;
4671      }
4672      ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4673      if (!is_error(ret)) {
4674          target_size_out = target_size_in;
4675          /* An extent_count of 0 means we were only counting the extents
4676           * so there are no structs to copy
4677           */
4678          if (fm->fm_extent_count != 0) {
4679              target_size_out += fm->fm_mapped_extents * extent_size;
4680          }
4681          argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4682          if (!argptr) {
4683              ret = -TARGET_EFAULT;
4684          } else {
4685              /* Convert the struct fiemap */
4686              thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4687              if (fm->fm_extent_count != 0) {
4688                  p = argptr + target_size_in;
4689                  /* ...and then all the struct fiemap_extents */
4690                  for (i = 0; i < fm->fm_mapped_extents; i++) {
4691                      thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4692                                    THUNK_TARGET);
4693                      p += extent_size;
4694                  }
4695              }
4696              unlock_user(argptr, arg, target_size_out);
4697          }
4698      }
4699      if (free_fm) {
4700          g_free(fm);
4701      }
4702      return ret;
4703  }
4704  #endif
4705  
4706  static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4707                                  int fd, int cmd, abi_long arg)
4708  {
4709      const argtype *arg_type = ie->arg_type;
4710      int target_size;
4711      void *argptr;
4712      int ret;
4713      struct ifconf *host_ifconf;
4714      uint32_t outbufsz;
4715      const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4716      const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4717      int target_ifreq_size;
4718      int nb_ifreq;
4719      int free_buf = 0;
4720      int i;
4721      int target_ifc_len;
4722      abi_long target_ifc_buf;
4723      int host_ifc_len;
4724      char *host_ifc_buf;
4725  
4726      assert(arg_type[0] == TYPE_PTR);
4727      assert(ie->access == IOC_RW);
4728  
4729      arg_type++;
4730      target_size = thunk_type_size(arg_type, 0);
4731  
4732      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4733      if (!argptr)
4734          return -TARGET_EFAULT;
4735      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4736      unlock_user(argptr, arg, 0);
4737  
4738      host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4739      target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4740      target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4741  
4742      if (target_ifc_buf != 0) {
4743          target_ifc_len = host_ifconf->ifc_len;
4744          nb_ifreq = target_ifc_len / target_ifreq_size;
4745          host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4746  
4747          outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4748          if (outbufsz > MAX_STRUCT_SIZE) {
4749              /*
4750               * We can't fit all the extents into the fixed size buffer.
4751               * Allocate one that is large enough and use it instead.
4752               */
4753              host_ifconf = g_try_malloc(outbufsz);
4754              if (!host_ifconf) {
4755                  return -TARGET_ENOMEM;
4756              }
4757              memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4758              free_buf = 1;
4759          }
4760          host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4761  
4762          host_ifconf->ifc_len = host_ifc_len;
4763      } else {
4764        host_ifc_buf = NULL;
4765      }
4766      host_ifconf->ifc_buf = host_ifc_buf;
4767  
4768      ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4769      if (!is_error(ret)) {
4770  	/* convert host ifc_len to target ifc_len */
4771  
4772          nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4773          target_ifc_len = nb_ifreq * target_ifreq_size;
4774          host_ifconf->ifc_len = target_ifc_len;
4775  
4776  	/* restore target ifc_buf */
4777  
4778          host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4779  
4780  	/* copy struct ifconf to target user */
4781  
4782          argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4783          if (!argptr)
4784              return -TARGET_EFAULT;
4785          thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4786          unlock_user(argptr, arg, target_size);
4787  
4788          if (target_ifc_buf != 0) {
4789              /* copy ifreq[] to target user */
4790              argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4791              for (i = 0; i < nb_ifreq ; i++) {
4792                  thunk_convert(argptr + i * target_ifreq_size,
4793                                host_ifc_buf + i * sizeof(struct ifreq),
4794                                ifreq_arg_type, THUNK_TARGET);
4795              }
4796              unlock_user(argptr, target_ifc_buf, target_ifc_len);
4797          }
4798      }
4799  
4800      if (free_buf) {
4801          g_free(host_ifconf);
4802      }
4803  
4804      return ret;
4805  }
4806  
4807  #if defined(CONFIG_USBFS)
4808  #if HOST_LONG_BITS > 64
4809  #error USBDEVFS thunks do not support >64 bit hosts yet.
4810  #endif
4811  struct live_urb {
4812      uint64_t target_urb_adr;
4813      uint64_t target_buf_adr;
4814      char *target_buf_ptr;
4815      struct usbdevfs_urb host_urb;
4816  };
4817  
4818  static GHashTable *usbdevfs_urb_hashtable(void)
4819  {
4820      static GHashTable *urb_hashtable;
4821  
4822      if (!urb_hashtable) {
4823          urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4824      }
4825      return urb_hashtable;
4826  }
4827  
4828  static void urb_hashtable_insert(struct live_urb *urb)
4829  {
4830      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4831      g_hash_table_insert(urb_hashtable, urb, urb);
4832  }
4833  
4834  static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4835  {
4836      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4837      return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4838  }
4839  
4840  static void urb_hashtable_remove(struct live_urb *urb)
4841  {
4842      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4843      g_hash_table_remove(urb_hashtable, urb);
4844  }
4845  
4846  static abi_long
4847  do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4848                            int fd, int cmd, abi_long arg)
4849  {
4850      const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4851      const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4852      struct live_urb *lurb;
4853      void *argptr;
4854      uint64_t hurb;
4855      int target_size;
4856      uintptr_t target_urb_adr;
4857      abi_long ret;
4858  
4859      target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4860  
4861      memset(buf_temp, 0, sizeof(uint64_t));
4862      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4863      if (is_error(ret)) {
4864          return ret;
4865      }
4866  
4867      memcpy(&hurb, buf_temp, sizeof(uint64_t));
4868      lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4869      if (!lurb->target_urb_adr) {
4870          return -TARGET_EFAULT;
4871      }
4872      urb_hashtable_remove(lurb);
4873      unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4874          lurb->host_urb.buffer_length);
4875      lurb->target_buf_ptr = NULL;
4876  
4877      /* restore the guest buffer pointer */
4878      lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4879  
4880      /* update the guest urb struct */
4881      argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4882      if (!argptr) {
4883          g_free(lurb);
4884          return -TARGET_EFAULT;
4885      }
4886      thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4887      unlock_user(argptr, lurb->target_urb_adr, target_size);
4888  
4889      target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4890      /* write back the urb handle */
4891      argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4892      if (!argptr) {
4893          g_free(lurb);
4894          return -TARGET_EFAULT;
4895      }
4896  
4897      /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4898      target_urb_adr = lurb->target_urb_adr;
4899      thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4900      unlock_user(argptr, arg, target_size);
4901  
4902      g_free(lurb);
4903      return ret;
4904  }
4905  
4906  static abi_long
4907  do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4908                               uint8_t *buf_temp __attribute__((unused)),
4909                               int fd, int cmd, abi_long arg)
4910  {
4911      struct live_urb *lurb;
4912  
4913      /* map target address back to host URB with metadata. */
4914      lurb = urb_hashtable_lookup(arg);
4915      if (!lurb) {
4916          return -TARGET_EFAULT;
4917      }
4918      return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4919  }
4920  
4921  static abi_long
4922  do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4923                              int fd, int cmd, abi_long arg)
4924  {
4925      const argtype *arg_type = ie->arg_type;
4926      int target_size;
4927      abi_long ret;
4928      void *argptr;
4929      int rw_dir;
4930      struct live_urb *lurb;
4931  
4932      /*
4933       * each submitted URB needs to map to a unique ID for the
4934       * kernel, and that unique ID needs to be a pointer to
4935       * host memory.  hence, we need to malloc for each URB.
4936       * isochronous transfers have a variable length struct.
4937       */
4938      arg_type++;
4939      target_size = thunk_type_size(arg_type, THUNK_TARGET);
4940  
4941      /* construct host copy of urb and metadata */
4942      lurb = g_try_new0(struct live_urb, 1);
4943      if (!lurb) {
4944          return -TARGET_ENOMEM;
4945      }
4946  
4947      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4948      if (!argptr) {
4949          g_free(lurb);
4950          return -TARGET_EFAULT;
4951      }
4952      thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4953      unlock_user(argptr, arg, 0);
4954  
4955      lurb->target_urb_adr = arg;
4956      lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4957  
4958      /* buffer space used depends on endpoint type so lock the entire buffer */
4959      /* control type urbs should check the buffer contents for true direction */
4960      rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4961      lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4962          lurb->host_urb.buffer_length, 1);
4963      if (lurb->target_buf_ptr == NULL) {
4964          g_free(lurb);
4965          return -TARGET_EFAULT;
4966      }
4967  
4968      /* update buffer pointer in host copy */
4969      lurb->host_urb.buffer = lurb->target_buf_ptr;
4970  
4971      ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4972      if (is_error(ret)) {
4973          unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4974          g_free(lurb);
4975      } else {
4976          urb_hashtable_insert(lurb);
4977      }
4978  
4979      return ret;
4980  }
4981  #endif /* CONFIG_USBFS */
4982  
4983  static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4984                              int cmd, abi_long arg)
4985  {
4986      void *argptr;
4987      struct dm_ioctl *host_dm;
4988      abi_long guest_data;
4989      uint32_t guest_data_size;
4990      int target_size;
4991      const argtype *arg_type = ie->arg_type;
4992      abi_long ret;
4993      void *big_buf = NULL;
4994      char *host_data;
4995  
4996      arg_type++;
4997      target_size = thunk_type_size(arg_type, 0);
4998      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4999      if (!argptr) {
5000          ret = -TARGET_EFAULT;
5001          goto out;
5002      }
5003      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5004      unlock_user(argptr, arg, 0);
5005  
5006      /* buf_temp is too small, so fetch things into a bigger buffer */
5007      big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5008      memcpy(big_buf, buf_temp, target_size);
5009      buf_temp = big_buf;
5010      host_dm = big_buf;
5011  
5012      guest_data = arg + host_dm->data_start;
5013      if ((guest_data - arg) < 0) {
5014          ret = -TARGET_EINVAL;
5015          goto out;
5016      }
5017      guest_data_size = host_dm->data_size - host_dm->data_start;
5018      host_data = (char*)host_dm + host_dm->data_start;
5019  
5020      argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5021      if (!argptr) {
5022          ret = -TARGET_EFAULT;
5023          goto out;
5024      }
5025  
5026      switch (ie->host_cmd) {
5027      case DM_REMOVE_ALL:
5028      case DM_LIST_DEVICES:
5029      case DM_DEV_CREATE:
5030      case DM_DEV_REMOVE:
5031      case DM_DEV_SUSPEND:
5032      case DM_DEV_STATUS:
5033      case DM_DEV_WAIT:
5034      case DM_TABLE_STATUS:
5035      case DM_TABLE_CLEAR:
5036      case DM_TABLE_DEPS:
5037      case DM_LIST_VERSIONS:
5038          /* no input data */
5039          break;
5040      case DM_DEV_RENAME:
5041      case DM_DEV_SET_GEOMETRY:
5042          /* data contains only strings */
5043          memcpy(host_data, argptr, guest_data_size);
5044          break;
5045      case DM_TARGET_MSG:
5046          memcpy(host_data, argptr, guest_data_size);
5047          *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5048          break;
5049      case DM_TABLE_LOAD:
5050      {
5051          void *gspec = argptr;
5052          void *cur_data = host_data;
5053          const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5054          int spec_size = thunk_type_size(dm_arg_type, 0);
5055          int i;
5056  
5057          for (i = 0; i < host_dm->target_count; i++) {
5058              struct dm_target_spec *spec = cur_data;
5059              uint32_t next;
5060              int slen;
5061  
5062              thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5063              slen = strlen((char*)gspec + spec_size) + 1;
5064              next = spec->next;
5065              spec->next = sizeof(*spec) + slen;
5066              strcpy((char*)&spec[1], gspec + spec_size);
5067              gspec += next;
5068              cur_data += spec->next;
5069          }
5070          break;
5071      }
5072      default:
5073          ret = -TARGET_EINVAL;
5074          unlock_user(argptr, guest_data, 0);
5075          goto out;
5076      }
5077      unlock_user(argptr, guest_data, 0);
5078  
5079      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5080      if (!is_error(ret)) {
5081          guest_data = arg + host_dm->data_start;
5082          guest_data_size = host_dm->data_size - host_dm->data_start;
5083          argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5084          switch (ie->host_cmd) {
5085          case DM_REMOVE_ALL:
5086          case DM_DEV_CREATE:
5087          case DM_DEV_REMOVE:
5088          case DM_DEV_RENAME:
5089          case DM_DEV_SUSPEND:
5090          case DM_DEV_STATUS:
5091          case DM_TABLE_LOAD:
5092          case DM_TABLE_CLEAR:
5093          case DM_TARGET_MSG:
5094          case DM_DEV_SET_GEOMETRY:
5095              /* no return data */
5096              break;
5097          case DM_LIST_DEVICES:
5098          {
5099              struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5100              uint32_t remaining_data = guest_data_size;
5101              void *cur_data = argptr;
5102              const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5103              int nl_size = 12; /* can't use thunk_size due to alignment */
5104  
5105              while (1) {
5106                  uint32_t next = nl->next;
5107                  if (next) {
5108                      nl->next = nl_size + (strlen(nl->name) + 1);
5109                  }
5110                  if (remaining_data < nl->next) {
5111                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5112                      break;
5113                  }
5114                  thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5115                  strcpy(cur_data + nl_size, nl->name);
5116                  cur_data += nl->next;
5117                  remaining_data -= nl->next;
5118                  if (!next) {
5119                      break;
5120                  }
5121                  nl = (void*)nl + next;
5122              }
5123              break;
5124          }
5125          case DM_DEV_WAIT:
5126          case DM_TABLE_STATUS:
5127          {
5128              struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5129              void *cur_data = argptr;
5130              const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5131              int spec_size = thunk_type_size(dm_arg_type, 0);
5132              int i;
5133  
5134              for (i = 0; i < host_dm->target_count; i++) {
5135                  uint32_t next = spec->next;
5136                  int slen = strlen((char*)&spec[1]) + 1;
5137                  spec->next = (cur_data - argptr) + spec_size + slen;
5138                  if (guest_data_size < spec->next) {
5139                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5140                      break;
5141                  }
5142                  thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5143                  strcpy(cur_data + spec_size, (char*)&spec[1]);
5144                  cur_data = argptr + spec->next;
5145                  spec = (void*)host_dm + host_dm->data_start + next;
5146              }
5147              break;
5148          }
5149          case DM_TABLE_DEPS:
5150          {
5151              void *hdata = (void*)host_dm + host_dm->data_start;
5152              int count = *(uint32_t*)hdata;
5153              uint64_t *hdev = hdata + 8;
5154              uint64_t *gdev = argptr + 8;
5155              int i;
5156  
5157              *(uint32_t*)argptr = tswap32(count);
5158              for (i = 0; i < count; i++) {
5159                  *gdev = tswap64(*hdev);
5160                  gdev++;
5161                  hdev++;
5162              }
5163              break;
5164          }
5165          case DM_LIST_VERSIONS:
5166          {
5167              struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5168              uint32_t remaining_data = guest_data_size;
5169              void *cur_data = argptr;
5170              const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5171              int vers_size = thunk_type_size(dm_arg_type, 0);
5172  
5173              while (1) {
5174                  uint32_t next = vers->next;
5175                  if (next) {
5176                      vers->next = vers_size + (strlen(vers->name) + 1);
5177                  }
5178                  if (remaining_data < vers->next) {
5179                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5180                      break;
5181                  }
5182                  thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5183                  strcpy(cur_data + vers_size, vers->name);
5184                  cur_data += vers->next;
5185                  remaining_data -= vers->next;
5186                  if (!next) {
5187                      break;
5188                  }
5189                  vers = (void*)vers + next;
5190              }
5191              break;
5192          }
5193          default:
5194              unlock_user(argptr, guest_data, 0);
5195              ret = -TARGET_EINVAL;
5196              goto out;
5197          }
5198          unlock_user(argptr, guest_data, guest_data_size);
5199  
5200          argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5201          if (!argptr) {
5202              ret = -TARGET_EFAULT;
5203              goto out;
5204          }
5205          thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5206          unlock_user(argptr, arg, target_size);
5207      }
5208  out:
5209      g_free(big_buf);
5210      return ret;
5211  }
5212  
5213  static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5214                                 int cmd, abi_long arg)
5215  {
5216      void *argptr;
5217      int target_size;
5218      const argtype *arg_type = ie->arg_type;
5219      const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5220      abi_long ret;
5221  
5222      struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5223      struct blkpg_partition host_part;
5224  
5225      /* Read and convert blkpg */
5226      arg_type++;
5227      target_size = thunk_type_size(arg_type, 0);
5228      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5229      if (!argptr) {
5230          ret = -TARGET_EFAULT;
5231          goto out;
5232      }
5233      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5234      unlock_user(argptr, arg, 0);
5235  
5236      switch (host_blkpg->op) {
5237      case BLKPG_ADD_PARTITION:
5238      case BLKPG_DEL_PARTITION:
5239          /* payload is struct blkpg_partition */
5240          break;
5241      default:
5242          /* Unknown opcode */
5243          ret = -TARGET_EINVAL;
5244          goto out;
5245      }
5246  
5247      /* Read and convert blkpg->data */
5248      arg = (abi_long)(uintptr_t)host_blkpg->data;
5249      target_size = thunk_type_size(part_arg_type, 0);
5250      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5251      if (!argptr) {
5252          ret = -TARGET_EFAULT;
5253          goto out;
5254      }
5255      thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5256      unlock_user(argptr, arg, 0);
5257  
5258      /* Swizzle the data pointer to our local copy and call! */
5259      host_blkpg->data = &host_part;
5260      ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5261  
5262  out:
5263      return ret;
5264  }
5265  
5266  static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5267                                  int fd, int cmd, abi_long arg)
5268  {
5269      const argtype *arg_type = ie->arg_type;
5270      const StructEntry *se;
5271      const argtype *field_types;
5272      const int *dst_offsets, *src_offsets;
5273      int target_size;
5274      void *argptr;
5275      abi_ulong *target_rt_dev_ptr = NULL;
5276      unsigned long *host_rt_dev_ptr = NULL;
5277      abi_long ret;
5278      int i;
5279  
5280      assert(ie->access == IOC_W);
5281      assert(*arg_type == TYPE_PTR);
5282      arg_type++;
5283      assert(*arg_type == TYPE_STRUCT);
5284      target_size = thunk_type_size(arg_type, 0);
5285      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5286      if (!argptr) {
5287          return -TARGET_EFAULT;
5288      }
5289      arg_type++;
5290      assert(*arg_type == (int)STRUCT_rtentry);
5291      se = struct_entries + *arg_type++;
5292      assert(se->convert[0] == NULL);
5293      /* convert struct here to be able to catch rt_dev string */
5294      field_types = se->field_types;
5295      dst_offsets = se->field_offsets[THUNK_HOST];
5296      src_offsets = se->field_offsets[THUNK_TARGET];
5297      for (i = 0; i < se->nb_fields; i++) {
5298          if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5299              assert(*field_types == TYPE_PTRVOID);
5300              target_rt_dev_ptr = argptr + src_offsets[i];
5301              host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5302              if (*target_rt_dev_ptr != 0) {
5303                  *host_rt_dev_ptr = (unsigned long)lock_user_string(
5304                                                    tswapal(*target_rt_dev_ptr));
5305                  if (!*host_rt_dev_ptr) {
5306                      unlock_user(argptr, arg, 0);
5307                      return -TARGET_EFAULT;
5308                  }
5309              } else {
5310                  *host_rt_dev_ptr = 0;
5311              }
5312              field_types++;
5313              continue;
5314          }
5315          field_types = thunk_convert(buf_temp + dst_offsets[i],
5316                                      argptr + src_offsets[i],
5317                                      field_types, THUNK_HOST);
5318      }
5319      unlock_user(argptr, arg, 0);
5320  
5321      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5322  
5323      assert(host_rt_dev_ptr != NULL);
5324      assert(target_rt_dev_ptr != NULL);
5325      if (*host_rt_dev_ptr != 0) {
5326          unlock_user((void *)*host_rt_dev_ptr,
5327                      *target_rt_dev_ptr, 0);
5328      }
5329      return ret;
5330  }
5331  
5332  static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5333                                       int fd, int cmd, abi_long arg)
5334  {
5335      int sig = target_to_host_signal(arg);
5336      return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5337  }
5338  
5339  static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5340                                      int fd, int cmd, abi_long arg)
5341  {
5342      struct timeval tv;
5343      abi_long ret;
5344  
5345      ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5346      if (is_error(ret)) {
5347          return ret;
5348      }
5349  
5350      if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5351          if (copy_to_user_timeval(arg, &tv)) {
5352              return -TARGET_EFAULT;
5353          }
5354      } else {
5355          if (copy_to_user_timeval64(arg, &tv)) {
5356              return -TARGET_EFAULT;
5357          }
5358      }
5359  
5360      return ret;
5361  }
5362  
5363  static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5364                                        int fd, int cmd, abi_long arg)
5365  {
5366      struct timespec ts;
5367      abi_long ret;
5368  
5369      ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5370      if (is_error(ret)) {
5371          return ret;
5372      }
5373  
5374      if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5375          if (host_to_target_timespec(arg, &ts)) {
5376              return -TARGET_EFAULT;
5377          }
5378      } else{
5379          if (host_to_target_timespec64(arg, &ts)) {
5380              return -TARGET_EFAULT;
5381          }
5382      }
5383  
5384      return ret;
5385  }
5386  
5387  #ifdef TIOCGPTPEER
5388  static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5389                                       int fd, int cmd, abi_long arg)
5390  {
5391      int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5392      return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5393  }
5394  #endif
5395  
5396  #ifdef HAVE_DRM_H
5397  
5398  static void unlock_drm_version(struct drm_version *host_ver,
5399                                 struct target_drm_version *target_ver,
5400                                 bool copy)
5401  {
5402      unlock_user(host_ver->name, target_ver->name,
5403                                  copy ? host_ver->name_len : 0);
5404      unlock_user(host_ver->date, target_ver->date,
5405                                  copy ? host_ver->date_len : 0);
5406      unlock_user(host_ver->desc, target_ver->desc,
5407                                  copy ? host_ver->desc_len : 0);
5408  }
5409  
5410  static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5411                                            struct target_drm_version *target_ver)
5412  {
5413      memset(host_ver, 0, sizeof(*host_ver));
5414  
5415      __get_user(host_ver->name_len, &target_ver->name_len);
5416      if (host_ver->name_len) {
5417          host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5418                                     target_ver->name_len, 0);
5419          if (!host_ver->name) {
5420              return -EFAULT;
5421          }
5422      }
5423  
5424      __get_user(host_ver->date_len, &target_ver->date_len);
5425      if (host_ver->date_len) {
5426          host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5427                                     target_ver->date_len, 0);
5428          if (!host_ver->date) {
5429              goto err;
5430          }
5431      }
5432  
5433      __get_user(host_ver->desc_len, &target_ver->desc_len);
5434      if (host_ver->desc_len) {
5435          host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5436                                     target_ver->desc_len, 0);
5437          if (!host_ver->desc) {
5438              goto err;
5439          }
5440      }
5441  
5442      return 0;
5443  err:
5444      unlock_drm_version(host_ver, target_ver, false);
5445      return -EFAULT;
5446  }
5447  
5448  static inline void host_to_target_drmversion(
5449                                            struct target_drm_version *target_ver,
5450                                            struct drm_version *host_ver)
5451  {
5452      __put_user(host_ver->version_major, &target_ver->version_major);
5453      __put_user(host_ver->version_minor, &target_ver->version_minor);
5454      __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5455      __put_user(host_ver->name_len, &target_ver->name_len);
5456      __put_user(host_ver->date_len, &target_ver->date_len);
5457      __put_user(host_ver->desc_len, &target_ver->desc_len);
5458      unlock_drm_version(host_ver, target_ver, true);
5459  }
5460  
5461  static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5462                               int fd, int cmd, abi_long arg)
5463  {
5464      struct drm_version *ver;
5465      struct target_drm_version *target_ver;
5466      abi_long ret;
5467  
5468      switch (ie->host_cmd) {
5469      case DRM_IOCTL_VERSION:
5470          if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5471              return -TARGET_EFAULT;
5472          }
5473          ver = (struct drm_version *)buf_temp;
5474          ret = target_to_host_drmversion(ver, target_ver);
5475          if (!is_error(ret)) {
5476              ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5477              if (is_error(ret)) {
5478                  unlock_drm_version(ver, target_ver, false);
5479              } else {
5480                  host_to_target_drmversion(target_ver, ver);
5481              }
5482          }
5483          unlock_user_struct(target_ver, arg, 0);
5484          return ret;
5485      }
5486      return -TARGET_ENOSYS;
5487  }
5488  
5489  static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5490                                             struct drm_i915_getparam *gparam,
5491                                             int fd, abi_long arg)
5492  {
5493      abi_long ret;
5494      int value;
5495      struct target_drm_i915_getparam *target_gparam;
5496  
5497      if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5498          return -TARGET_EFAULT;
5499      }
5500  
5501      __get_user(gparam->param, &target_gparam->param);
5502      gparam->value = &value;
5503      ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5504      put_user_s32(value, target_gparam->value);
5505  
5506      unlock_user_struct(target_gparam, arg, 0);
5507      return ret;
5508  }
5509  
5510  static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5511                                    int fd, int cmd, abi_long arg)
5512  {
5513      switch (ie->host_cmd) {
5514      case DRM_IOCTL_I915_GETPARAM:
5515          return do_ioctl_drm_i915_getparam(ie,
5516                                            (struct drm_i915_getparam *)buf_temp,
5517                                            fd, arg);
5518      default:
5519          return -TARGET_ENOSYS;
5520      }
5521  }
5522  
5523  #endif
5524  
5525  static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5526                                          int fd, int cmd, abi_long arg)
5527  {
5528      struct tun_filter *filter = (struct tun_filter *)buf_temp;
5529      struct tun_filter *target_filter;
5530      char *target_addr;
5531  
5532      assert(ie->access == IOC_W);
5533  
5534      target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5535      if (!target_filter) {
5536          return -TARGET_EFAULT;
5537      }
5538      filter->flags = tswap16(target_filter->flags);
5539      filter->count = tswap16(target_filter->count);
5540      unlock_user(target_filter, arg, 0);
5541  
5542      if (filter->count) {
5543          if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5544              MAX_STRUCT_SIZE) {
5545              return -TARGET_EFAULT;
5546          }
5547  
5548          target_addr = lock_user(VERIFY_READ,
5549                                  arg + offsetof(struct tun_filter, addr),
5550                                  filter->count * ETH_ALEN, 1);
5551          if (!target_addr) {
5552              return -TARGET_EFAULT;
5553          }
5554          memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5555          unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5556      }
5557  
5558      return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5559  }
5560  
5561  IOCTLEntry ioctl_entries[] = {
5562  #define IOCTL(cmd, access, ...) \
5563      { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5564  #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5565      { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5566  #define IOCTL_IGNORE(cmd) \
5567      { TARGET_ ## cmd, 0, #cmd },
5568  #include "ioctls.h"
5569      { 0, 0, },
5570  };
5571  
5572  /* ??? Implement proper locking for ioctls.  */
5573  /* do_ioctl() Must return target values and target errnos. */
5574  static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5575  {
5576      const IOCTLEntry *ie;
5577      const argtype *arg_type;
5578      abi_long ret;
5579      uint8_t buf_temp[MAX_STRUCT_SIZE];
5580      int target_size;
5581      void *argptr;
5582  
5583      ie = ioctl_entries;
5584      for(;;) {
5585          if (ie->target_cmd == 0) {
5586              qemu_log_mask(
5587                  LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5588              return -TARGET_ENOTTY;
5589          }
5590          if (ie->target_cmd == cmd)
5591              break;
5592          ie++;
5593      }
5594      arg_type = ie->arg_type;
5595      if (ie->do_ioctl) {
5596          return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5597      } else if (!ie->host_cmd) {
5598          /* Some architectures define BSD ioctls in their headers
5599             that are not implemented in Linux.  */
5600          return -TARGET_ENOTTY;
5601      }
5602  
5603      switch(arg_type[0]) {
5604      case TYPE_NULL:
5605          /* no argument */
5606          ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5607          break;
5608      case TYPE_PTRVOID:
5609      case TYPE_INT:
5610      case TYPE_LONG:
5611      case TYPE_ULONG:
5612          ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5613          break;
5614      case TYPE_PTR:
5615          arg_type++;
5616          target_size = thunk_type_size(arg_type, 0);
5617          switch(ie->access) {
5618          case IOC_R:
5619              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5620              if (!is_error(ret)) {
5621                  argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5622                  if (!argptr)
5623                      return -TARGET_EFAULT;
5624                  thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5625                  unlock_user(argptr, arg, target_size);
5626              }
5627              break;
5628          case IOC_W:
5629              argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5630              if (!argptr)
5631                  return -TARGET_EFAULT;
5632              thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5633              unlock_user(argptr, arg, 0);
5634              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5635              break;
5636          default:
5637          case IOC_RW:
5638              argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5639              if (!argptr)
5640                  return -TARGET_EFAULT;
5641              thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5642              unlock_user(argptr, arg, 0);
5643              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5644              if (!is_error(ret)) {
5645                  argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5646                  if (!argptr)
5647                      return -TARGET_EFAULT;
5648                  thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5649                  unlock_user(argptr, arg, target_size);
5650              }
5651              break;
5652          }
5653          break;
5654      default:
5655          qemu_log_mask(LOG_UNIMP,
5656                        "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5657                        (long)cmd, arg_type[0]);
5658          ret = -TARGET_ENOTTY;
5659          break;
5660      }
5661      return ret;
5662  }
5663  
5664  static const bitmask_transtbl iflag_tbl[] = {
5665          { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5666          { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5667          { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5668          { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5669          { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5670          { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5671          { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5672          { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5673          { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5674          { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5675          { TARGET_IXON, TARGET_IXON, IXON, IXON },
5676          { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5677          { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5678          { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5679          { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5680  };
5681  
5682  static const bitmask_transtbl oflag_tbl[] = {
5683  	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5684  	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5685  	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5686  	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5687  	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5688  	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5689  	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5690  	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5691  	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5692  	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5693  	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5694  	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5695  	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5696  	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5697  	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5698  	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5699  	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5700  	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5701  	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5702  	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5703  	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5704  	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5705  	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5706  	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5707  };
5708  
5709  static const bitmask_transtbl cflag_tbl[] = {
5710  	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5711  	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5712  	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5713  	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5714  	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5715  	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5716  	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5717  	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5718  	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5719  	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5720  	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5721  	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5722  	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5723  	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5724  	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5725  	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5726  	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5727  	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5728  	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5729  	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5730  	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5731  	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5732  	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5733  	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5734  	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5735  	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5736  	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5737  	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5738  	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5739  	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5740  	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5741  };
5742  
5743  static const bitmask_transtbl lflag_tbl[] = {
5744    { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5745    { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5746    { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5747    { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5748    { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5749    { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5750    { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5751    { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5752    { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5753    { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5754    { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5755    { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5756    { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5757    { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5758    { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5759    { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5760  };
5761  
5762  static void target_to_host_termios (void *dst, const void *src)
5763  {
5764      struct host_termios *host = dst;
5765      const struct target_termios *target = src;
5766  
5767      host->c_iflag =
5768          target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5769      host->c_oflag =
5770          target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5771      host->c_cflag =
5772          target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5773      host->c_lflag =
5774          target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5775      host->c_line = target->c_line;
5776  
5777      memset(host->c_cc, 0, sizeof(host->c_cc));
5778      host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5779      host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5780      host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5781      host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5782      host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5783      host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5784      host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5785      host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5786      host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5787      host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5788      host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5789      host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5790      host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5791      host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5792      host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5793      host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5794      host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5795  }
5796  
5797  static void host_to_target_termios (void *dst, const void *src)
5798  {
5799      struct target_termios *target = dst;
5800      const struct host_termios *host = src;
5801  
5802      target->c_iflag =
5803          tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5804      target->c_oflag =
5805          tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5806      target->c_cflag =
5807          tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5808      target->c_lflag =
5809          tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5810      target->c_line = host->c_line;
5811  
5812      memset(target->c_cc, 0, sizeof(target->c_cc));
5813      target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5814      target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5815      target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5816      target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5817      target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5818      target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5819      target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5820      target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5821      target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5822      target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5823      target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5824      target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5825      target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5826      target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5827      target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5828      target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5829      target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5830  }
5831  
5832  static const StructEntry struct_termios_def = {
5833      .convert = { host_to_target_termios, target_to_host_termios },
5834      .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5835      .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5836      .print = print_termios,
5837  };
5838  
5839  /* If the host does not provide these bits, they may be safely discarded. */
5840  #ifndef MAP_SYNC
5841  #define MAP_SYNC 0
5842  #endif
5843  #ifndef MAP_UNINITIALIZED
5844  #define MAP_UNINITIALIZED 0
5845  #endif
5846  
5847  static const bitmask_transtbl mmap_flags_tbl[] = {
5848      { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5849      { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5850        MAP_ANONYMOUS, MAP_ANONYMOUS },
5851      { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5852        MAP_GROWSDOWN, MAP_GROWSDOWN },
5853      { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5854        MAP_DENYWRITE, MAP_DENYWRITE },
5855      { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5856        MAP_EXECUTABLE, MAP_EXECUTABLE },
5857      { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5858      { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5859        MAP_NORESERVE, MAP_NORESERVE },
5860      { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5861      /* MAP_STACK had been ignored by the kernel for quite some time.
5862         Recognize it for the target insofar as we do not want to pass
5863         it through to the host.  */
5864      { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5865      { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5866      { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5867      { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5868        MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5869      { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5870        MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5871  };
5872  
5873  /*
5874   * Arrange for legacy / undefined architecture specific flags to be
5875   * ignored by mmap handling code.
5876   */
5877  #ifndef TARGET_MAP_32BIT
5878  #define TARGET_MAP_32BIT 0
5879  #endif
5880  #ifndef TARGET_MAP_HUGE_2MB
5881  #define TARGET_MAP_HUGE_2MB 0
5882  #endif
5883  #ifndef TARGET_MAP_HUGE_1GB
5884  #define TARGET_MAP_HUGE_1GB 0
5885  #endif
5886  
5887  static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5888                          int target_flags, int fd, off_t offset)
5889  {
5890      /*
5891       * The historical set of flags that all mmap types implicitly support.
5892       */
5893      enum {
5894          TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5895                                 | TARGET_MAP_PRIVATE
5896                                 | TARGET_MAP_FIXED
5897                                 | TARGET_MAP_ANONYMOUS
5898                                 | TARGET_MAP_DENYWRITE
5899                                 | TARGET_MAP_EXECUTABLE
5900                                 | TARGET_MAP_UNINITIALIZED
5901                                 | TARGET_MAP_GROWSDOWN
5902                                 | TARGET_MAP_LOCKED
5903                                 | TARGET_MAP_NORESERVE
5904                                 | TARGET_MAP_POPULATE
5905                                 | TARGET_MAP_NONBLOCK
5906                                 | TARGET_MAP_STACK
5907                                 | TARGET_MAP_HUGETLB
5908                                 | TARGET_MAP_32BIT
5909                                 | TARGET_MAP_HUGE_2MB
5910                                 | TARGET_MAP_HUGE_1GB
5911      };
5912      int host_flags;
5913  
5914      switch (target_flags & TARGET_MAP_TYPE) {
5915      case TARGET_MAP_PRIVATE:
5916          host_flags = MAP_PRIVATE;
5917          break;
5918      case TARGET_MAP_SHARED:
5919          host_flags = MAP_SHARED;
5920          break;
5921      case TARGET_MAP_SHARED_VALIDATE:
5922          /*
5923           * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5924           * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5925           */
5926          if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5927              return -TARGET_EOPNOTSUPP;
5928          }
5929          host_flags = MAP_SHARED_VALIDATE;
5930          if (target_flags & TARGET_MAP_SYNC) {
5931              host_flags |= MAP_SYNC;
5932          }
5933          break;
5934      default:
5935          return -TARGET_EINVAL;
5936      }
5937      host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5938  
5939      return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5940  }
5941  
5942  /*
5943   * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5944   *       TARGET_I386 is defined if TARGET_X86_64 is defined
5945   */
5946  #if defined(TARGET_I386)
5947  
5948  /* NOTE: there is really one LDT for all the threads */
5949  static uint8_t *ldt_table;
5950  
5951  static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5952  {
5953      int size;
5954      void *p;
5955  
5956      if (!ldt_table)
5957          return 0;
5958      size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5959      if (size > bytecount)
5960          size = bytecount;
5961      p = lock_user(VERIFY_WRITE, ptr, size, 0);
5962      if (!p)
5963          return -TARGET_EFAULT;
5964      /* ??? Should this by byteswapped?  */
5965      memcpy(p, ldt_table, size);
5966      unlock_user(p, ptr, size);
5967      return size;
5968  }
5969  
5970  /* XXX: add locking support */
5971  static abi_long write_ldt(CPUX86State *env,
5972                            abi_ulong ptr, unsigned long bytecount, int oldmode)
5973  {
5974      struct target_modify_ldt_ldt_s ldt_info;
5975      struct target_modify_ldt_ldt_s *target_ldt_info;
5976      int seg_32bit, contents, read_exec_only, limit_in_pages;
5977      int seg_not_present, useable, lm;
5978      uint32_t *lp, entry_1, entry_2;
5979  
5980      if (bytecount != sizeof(ldt_info))
5981          return -TARGET_EINVAL;
5982      if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5983          return -TARGET_EFAULT;
5984      ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5985      ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5986      ldt_info.limit = tswap32(target_ldt_info->limit);
5987      ldt_info.flags = tswap32(target_ldt_info->flags);
5988      unlock_user_struct(target_ldt_info, ptr, 0);
5989  
5990      if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5991          return -TARGET_EINVAL;
5992      seg_32bit = ldt_info.flags & 1;
5993      contents = (ldt_info.flags >> 1) & 3;
5994      read_exec_only = (ldt_info.flags >> 3) & 1;
5995      limit_in_pages = (ldt_info.flags >> 4) & 1;
5996      seg_not_present = (ldt_info.flags >> 5) & 1;
5997      useable = (ldt_info.flags >> 6) & 1;
5998  #ifdef TARGET_ABI32
5999      lm = 0;
6000  #else
6001      lm = (ldt_info.flags >> 7) & 1;
6002  #endif
6003      if (contents == 3) {
6004          if (oldmode)
6005              return -TARGET_EINVAL;
6006          if (seg_not_present == 0)
6007              return -TARGET_EINVAL;
6008      }
6009      /* allocate the LDT */
6010      if (!ldt_table) {
6011          env->ldt.base = target_mmap(0,
6012                                      TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6013                                      PROT_READ|PROT_WRITE,
6014                                      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6015          if (env->ldt.base == -1)
6016              return -TARGET_ENOMEM;
6017          memset(g2h_untagged(env->ldt.base), 0,
6018                 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6019          env->ldt.limit = 0xffff;
6020          ldt_table = g2h_untagged(env->ldt.base);
6021      }
6022  
6023      /* NOTE: same code as Linux kernel */
6024      /* Allow LDTs to be cleared by the user. */
6025      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6026          if (oldmode ||
6027              (contents == 0		&&
6028               read_exec_only == 1	&&
6029               seg_32bit == 0		&&
6030               limit_in_pages == 0	&&
6031               seg_not_present == 1	&&
6032               useable == 0 )) {
6033              entry_1 = 0;
6034              entry_2 = 0;
6035              goto install;
6036          }
6037      }
6038  
6039      entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6040          (ldt_info.limit & 0x0ffff);
6041      entry_2 = (ldt_info.base_addr & 0xff000000) |
6042          ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6043          (ldt_info.limit & 0xf0000) |
6044          ((read_exec_only ^ 1) << 9) |
6045          (contents << 10) |
6046          ((seg_not_present ^ 1) << 15) |
6047          (seg_32bit << 22) |
6048          (limit_in_pages << 23) |
6049          (lm << 21) |
6050          0x7000;
6051      if (!oldmode)
6052          entry_2 |= (useable << 20);
6053  
6054      /* Install the new entry ...  */
6055  install:
6056      lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6057      lp[0] = tswap32(entry_1);
6058      lp[1] = tswap32(entry_2);
6059      return 0;
6060  }
6061  
6062  /* specific and weird i386 syscalls */
6063  static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6064                                unsigned long bytecount)
6065  {
6066      abi_long ret;
6067  
6068      switch (func) {
6069      case 0:
6070          ret = read_ldt(ptr, bytecount);
6071          break;
6072      case 1:
6073          ret = write_ldt(env, ptr, bytecount, 1);
6074          break;
6075      case 0x11:
6076          ret = write_ldt(env, ptr, bytecount, 0);
6077          break;
6078      default:
6079          ret = -TARGET_ENOSYS;
6080          break;
6081      }
6082      return ret;
6083  }
6084  
6085  #if defined(TARGET_ABI32)
6086  abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6087  {
6088      uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6089      struct target_modify_ldt_ldt_s ldt_info;
6090      struct target_modify_ldt_ldt_s *target_ldt_info;
6091      int seg_32bit, contents, read_exec_only, limit_in_pages;
6092      int seg_not_present, useable, lm;
6093      uint32_t *lp, entry_1, entry_2;
6094      int i;
6095  
6096      lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6097      if (!target_ldt_info)
6098          return -TARGET_EFAULT;
6099      ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6100      ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6101      ldt_info.limit = tswap32(target_ldt_info->limit);
6102      ldt_info.flags = tswap32(target_ldt_info->flags);
6103      if (ldt_info.entry_number == -1) {
6104          for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6105              if (gdt_table[i] == 0) {
6106                  ldt_info.entry_number = i;
6107                  target_ldt_info->entry_number = tswap32(i);
6108                  break;
6109              }
6110          }
6111      }
6112      unlock_user_struct(target_ldt_info, ptr, 1);
6113  
6114      if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6115          ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6116             return -TARGET_EINVAL;
6117      seg_32bit = ldt_info.flags & 1;
6118      contents = (ldt_info.flags >> 1) & 3;
6119      read_exec_only = (ldt_info.flags >> 3) & 1;
6120      limit_in_pages = (ldt_info.flags >> 4) & 1;
6121      seg_not_present = (ldt_info.flags >> 5) & 1;
6122      useable = (ldt_info.flags >> 6) & 1;
6123  #ifdef TARGET_ABI32
6124      lm = 0;
6125  #else
6126      lm = (ldt_info.flags >> 7) & 1;
6127  #endif
6128  
6129      if (contents == 3) {
6130          if (seg_not_present == 0)
6131              return -TARGET_EINVAL;
6132      }
6133  
6134      /* NOTE: same code as Linux kernel */
6135      /* Allow LDTs to be cleared by the user. */
6136      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6137          if ((contents == 0             &&
6138               read_exec_only == 1       &&
6139               seg_32bit == 0            &&
6140               limit_in_pages == 0       &&
6141               seg_not_present == 1      &&
6142               useable == 0 )) {
6143              entry_1 = 0;
6144              entry_2 = 0;
6145              goto install;
6146          }
6147      }
6148  
6149      entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6150          (ldt_info.limit & 0x0ffff);
6151      entry_2 = (ldt_info.base_addr & 0xff000000) |
6152          ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6153          (ldt_info.limit & 0xf0000) |
6154          ((read_exec_only ^ 1) << 9) |
6155          (contents << 10) |
6156          ((seg_not_present ^ 1) << 15) |
6157          (seg_32bit << 22) |
6158          (limit_in_pages << 23) |
6159          (useable << 20) |
6160          (lm << 21) |
6161          0x7000;
6162  
6163      /* Install the new entry ...  */
6164  install:
6165      lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6166      lp[0] = tswap32(entry_1);
6167      lp[1] = tswap32(entry_2);
6168      return 0;
6169  }
6170  
6171  static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6172  {
6173      struct target_modify_ldt_ldt_s *target_ldt_info;
6174      uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6175      uint32_t base_addr, limit, flags;
6176      int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6177      int seg_not_present, useable, lm;
6178      uint32_t *lp, entry_1, entry_2;
6179  
6180      lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6181      if (!target_ldt_info)
6182          return -TARGET_EFAULT;
6183      idx = tswap32(target_ldt_info->entry_number);
6184      if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6185          idx > TARGET_GDT_ENTRY_TLS_MAX) {
6186          unlock_user_struct(target_ldt_info, ptr, 1);
6187          return -TARGET_EINVAL;
6188      }
6189      lp = (uint32_t *)(gdt_table + idx);
6190      entry_1 = tswap32(lp[0]);
6191      entry_2 = tswap32(lp[1]);
6192  
6193      read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6194      contents = (entry_2 >> 10) & 3;
6195      seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6196      seg_32bit = (entry_2 >> 22) & 1;
6197      limit_in_pages = (entry_2 >> 23) & 1;
6198      useable = (entry_2 >> 20) & 1;
6199  #ifdef TARGET_ABI32
6200      lm = 0;
6201  #else
6202      lm = (entry_2 >> 21) & 1;
6203  #endif
6204      flags = (seg_32bit << 0) | (contents << 1) |
6205          (read_exec_only << 3) | (limit_in_pages << 4) |
6206          (seg_not_present << 5) | (useable << 6) | (lm << 7);
6207      limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6208      base_addr = (entry_1 >> 16) |
6209          (entry_2 & 0xff000000) |
6210          ((entry_2 & 0xff) << 16);
6211      target_ldt_info->base_addr = tswapal(base_addr);
6212      target_ldt_info->limit = tswap32(limit);
6213      target_ldt_info->flags = tswap32(flags);
6214      unlock_user_struct(target_ldt_info, ptr, 1);
6215      return 0;
6216  }
6217  
6218  abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6219  {
6220      return -TARGET_ENOSYS;
6221  }
6222  #else
6223  abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6224  {
6225      abi_long ret = 0;
6226      abi_ulong val;
6227      int idx;
6228  
6229      switch(code) {
6230      case TARGET_ARCH_SET_GS:
6231      case TARGET_ARCH_SET_FS:
6232          if (code == TARGET_ARCH_SET_GS)
6233              idx = R_GS;
6234          else
6235              idx = R_FS;
6236          cpu_x86_load_seg(env, idx, 0);
6237          env->segs[idx].base = addr;
6238          break;
6239      case TARGET_ARCH_GET_GS:
6240      case TARGET_ARCH_GET_FS:
6241          if (code == TARGET_ARCH_GET_GS)
6242              idx = R_GS;
6243          else
6244              idx = R_FS;
6245          val = env->segs[idx].base;
6246          if (put_user(val, addr, abi_ulong))
6247              ret = -TARGET_EFAULT;
6248          break;
6249      default:
6250          ret = -TARGET_EINVAL;
6251          break;
6252      }
6253      return ret;
6254  }
6255  #endif /* defined(TARGET_ABI32 */
6256  #endif /* defined(TARGET_I386) */
6257  
6258  /*
6259   * These constants are generic.  Supply any that are missing from the host.
6260   */
6261  #ifndef PR_SET_NAME
6262  # define PR_SET_NAME    15
6263  # define PR_GET_NAME    16
6264  #endif
6265  #ifndef PR_SET_FP_MODE
6266  # define PR_SET_FP_MODE 45
6267  # define PR_GET_FP_MODE 46
6268  # define PR_FP_MODE_FR   (1 << 0)
6269  # define PR_FP_MODE_FRE  (1 << 1)
6270  #endif
6271  #ifndef PR_SVE_SET_VL
6272  # define PR_SVE_SET_VL  50
6273  # define PR_SVE_GET_VL  51
6274  # define PR_SVE_VL_LEN_MASK  0xffff
6275  # define PR_SVE_VL_INHERIT   (1 << 17)
6276  #endif
6277  #ifndef PR_PAC_RESET_KEYS
6278  # define PR_PAC_RESET_KEYS  54
6279  # define PR_PAC_APIAKEY   (1 << 0)
6280  # define PR_PAC_APIBKEY   (1 << 1)
6281  # define PR_PAC_APDAKEY   (1 << 2)
6282  # define PR_PAC_APDBKEY   (1 << 3)
6283  # define PR_PAC_APGAKEY   (1 << 4)
6284  #endif
6285  #ifndef PR_SET_TAGGED_ADDR_CTRL
6286  # define PR_SET_TAGGED_ADDR_CTRL 55
6287  # define PR_GET_TAGGED_ADDR_CTRL 56
6288  # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6289  #endif
6290  #ifndef PR_MTE_TCF_SHIFT
6291  # define PR_MTE_TCF_SHIFT       1
6292  # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6293  # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6294  # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6295  # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6296  # define PR_MTE_TAG_SHIFT       3
6297  # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6298  #endif
6299  #ifndef PR_SET_IO_FLUSHER
6300  # define PR_SET_IO_FLUSHER 57
6301  # define PR_GET_IO_FLUSHER 58
6302  #endif
6303  #ifndef PR_SET_SYSCALL_USER_DISPATCH
6304  # define PR_SET_SYSCALL_USER_DISPATCH 59
6305  #endif
6306  #ifndef PR_SME_SET_VL
6307  # define PR_SME_SET_VL  63
6308  # define PR_SME_GET_VL  64
6309  # define PR_SME_VL_LEN_MASK  0xffff
6310  # define PR_SME_VL_INHERIT   (1 << 17)
6311  #endif
6312  
6313  #include "target_prctl.h"
6314  
6315  static abi_long do_prctl_inval0(CPUArchState *env)
6316  {
6317      return -TARGET_EINVAL;
6318  }
6319  
6320  static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6321  {
6322      return -TARGET_EINVAL;
6323  }
6324  
6325  #ifndef do_prctl_get_fp_mode
6326  #define do_prctl_get_fp_mode do_prctl_inval0
6327  #endif
6328  #ifndef do_prctl_set_fp_mode
6329  #define do_prctl_set_fp_mode do_prctl_inval1
6330  #endif
6331  #ifndef do_prctl_sve_get_vl
6332  #define do_prctl_sve_get_vl do_prctl_inval0
6333  #endif
6334  #ifndef do_prctl_sve_set_vl
6335  #define do_prctl_sve_set_vl do_prctl_inval1
6336  #endif
6337  #ifndef do_prctl_reset_keys
6338  #define do_prctl_reset_keys do_prctl_inval1
6339  #endif
6340  #ifndef do_prctl_set_tagged_addr_ctrl
6341  #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6342  #endif
6343  #ifndef do_prctl_get_tagged_addr_ctrl
6344  #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6345  #endif
6346  #ifndef do_prctl_get_unalign
6347  #define do_prctl_get_unalign do_prctl_inval1
6348  #endif
6349  #ifndef do_prctl_set_unalign
6350  #define do_prctl_set_unalign do_prctl_inval1
6351  #endif
6352  #ifndef do_prctl_sme_get_vl
6353  #define do_prctl_sme_get_vl do_prctl_inval0
6354  #endif
6355  #ifndef do_prctl_sme_set_vl
6356  #define do_prctl_sme_set_vl do_prctl_inval1
6357  #endif
6358  
6359  static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6360                           abi_long arg3, abi_long arg4, abi_long arg5)
6361  {
6362      abi_long ret;
6363  
6364      switch (option) {
6365      case PR_GET_PDEATHSIG:
6366          {
6367              int deathsig;
6368              ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6369                                    arg3, arg4, arg5));
6370              if (!is_error(ret) &&
6371                  put_user_s32(host_to_target_signal(deathsig), arg2)) {
6372                  return -TARGET_EFAULT;
6373              }
6374              return ret;
6375          }
6376      case PR_SET_PDEATHSIG:
6377          return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6378                                 arg3, arg4, arg5));
6379      case PR_GET_NAME:
6380          {
6381              void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6382              if (!name) {
6383                  return -TARGET_EFAULT;
6384              }
6385              ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6386                                    arg3, arg4, arg5));
6387              unlock_user(name, arg2, 16);
6388              return ret;
6389          }
6390      case PR_SET_NAME:
6391          {
6392              void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6393              if (!name) {
6394                  return -TARGET_EFAULT;
6395              }
6396              ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6397                                    arg3, arg4, arg5));
6398              unlock_user(name, arg2, 0);
6399              return ret;
6400          }
6401      case PR_GET_FP_MODE:
6402          return do_prctl_get_fp_mode(env);
6403      case PR_SET_FP_MODE:
6404          return do_prctl_set_fp_mode(env, arg2);
6405      case PR_SVE_GET_VL:
6406          return do_prctl_sve_get_vl(env);
6407      case PR_SVE_SET_VL:
6408          return do_prctl_sve_set_vl(env, arg2);
6409      case PR_SME_GET_VL:
6410          return do_prctl_sme_get_vl(env);
6411      case PR_SME_SET_VL:
6412          return do_prctl_sme_set_vl(env, arg2);
6413      case PR_PAC_RESET_KEYS:
6414          if (arg3 || arg4 || arg5) {
6415              return -TARGET_EINVAL;
6416          }
6417          return do_prctl_reset_keys(env, arg2);
6418      case PR_SET_TAGGED_ADDR_CTRL:
6419          if (arg3 || arg4 || arg5) {
6420              return -TARGET_EINVAL;
6421          }
6422          return do_prctl_set_tagged_addr_ctrl(env, arg2);
6423      case PR_GET_TAGGED_ADDR_CTRL:
6424          if (arg2 || arg3 || arg4 || arg5) {
6425              return -TARGET_EINVAL;
6426          }
6427          return do_prctl_get_tagged_addr_ctrl(env);
6428  
6429      case PR_GET_UNALIGN:
6430          return do_prctl_get_unalign(env, arg2);
6431      case PR_SET_UNALIGN:
6432          return do_prctl_set_unalign(env, arg2);
6433  
6434      case PR_CAP_AMBIENT:
6435      case PR_CAPBSET_READ:
6436      case PR_CAPBSET_DROP:
6437      case PR_GET_DUMPABLE:
6438      case PR_SET_DUMPABLE:
6439      case PR_GET_KEEPCAPS:
6440      case PR_SET_KEEPCAPS:
6441      case PR_GET_SECUREBITS:
6442      case PR_SET_SECUREBITS:
6443      case PR_GET_TIMING:
6444      case PR_SET_TIMING:
6445      case PR_GET_TIMERSLACK:
6446      case PR_SET_TIMERSLACK:
6447      case PR_MCE_KILL:
6448      case PR_MCE_KILL_GET:
6449      case PR_GET_NO_NEW_PRIVS:
6450      case PR_SET_NO_NEW_PRIVS:
6451      case PR_GET_IO_FLUSHER:
6452      case PR_SET_IO_FLUSHER:
6453      case PR_SET_CHILD_SUBREAPER:
6454      case PR_GET_SPECULATION_CTRL:
6455      case PR_SET_SPECULATION_CTRL:
6456          /* Some prctl options have no pointer arguments and we can pass on. */
6457          return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6458  
6459      case PR_GET_CHILD_SUBREAPER:
6460          {
6461              int val;
6462              ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6463                                    arg3, arg4, arg5));
6464              if (!is_error(ret) && put_user_s32(val, arg2)) {
6465                  return -TARGET_EFAULT;
6466              }
6467              return ret;
6468          }
6469  
6470      case PR_GET_TID_ADDRESS:
6471          {
6472              TaskState *ts = env_cpu(env)->opaque;
6473              return put_user_ual(ts->child_tidptr, arg2);
6474          }
6475  
6476      case PR_GET_FPEXC:
6477      case PR_SET_FPEXC:
6478          /* Was used for SPE on PowerPC. */
6479          return -TARGET_EINVAL;
6480  
6481      case PR_GET_ENDIAN:
6482      case PR_SET_ENDIAN:
6483      case PR_GET_FPEMU:
6484      case PR_SET_FPEMU:
6485      case PR_SET_MM:
6486      case PR_GET_SECCOMP:
6487      case PR_SET_SECCOMP:
6488      case PR_SET_SYSCALL_USER_DISPATCH:
6489      case PR_GET_THP_DISABLE:
6490      case PR_SET_THP_DISABLE:
6491      case PR_GET_TSC:
6492      case PR_SET_TSC:
6493          /* Disable to prevent the target disabling stuff we need. */
6494          return -TARGET_EINVAL;
6495  
6496      default:
6497          qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6498                        option);
6499          return -TARGET_EINVAL;
6500      }
6501  }
6502  
6503  #define NEW_STACK_SIZE 0x40000
6504  
6505  
6506  static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6507  typedef struct {
6508      CPUArchState *env;
6509      pthread_mutex_t mutex;
6510      pthread_cond_t cond;
6511      pthread_t thread;
6512      uint32_t tid;
6513      abi_ulong child_tidptr;
6514      abi_ulong parent_tidptr;
6515      sigset_t sigmask;
6516  } new_thread_info;
6517  
6518  static void *clone_func(void *arg)
6519  {
6520      new_thread_info *info = arg;
6521      CPUArchState *env;
6522      CPUState *cpu;
6523      TaskState *ts;
6524  
6525      rcu_register_thread();
6526      tcg_register_thread();
6527      env = info->env;
6528      cpu = env_cpu(env);
6529      thread_cpu = cpu;
6530      ts = get_task_state(cpu);
6531      info->tid = sys_gettid();
6532      task_settid(ts);
6533      if (info->child_tidptr)
6534          put_user_u32(info->tid, info->child_tidptr);
6535      if (info->parent_tidptr)
6536          put_user_u32(info->tid, info->parent_tidptr);
6537      qemu_guest_random_seed_thread_part2(cpu->random_seed);
6538      /* Enable signals.  */
6539      sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6540      /* Signal to the parent that we're ready.  */
6541      pthread_mutex_lock(&info->mutex);
6542      pthread_cond_broadcast(&info->cond);
6543      pthread_mutex_unlock(&info->mutex);
6544      /* Wait until the parent has finished initializing the tls state.  */
6545      pthread_mutex_lock(&clone_lock);
6546      pthread_mutex_unlock(&clone_lock);
6547      cpu_loop(env);
6548      /* never exits */
6549      return NULL;
6550  }
6551  
6552  /* do_fork() Must return host values and target errnos (unlike most
6553     do_*() functions). */
6554  static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6555                     abi_ulong parent_tidptr, target_ulong newtls,
6556                     abi_ulong child_tidptr)
6557  {
6558      CPUState *cpu = env_cpu(env);
6559      int ret;
6560      TaskState *ts;
6561      CPUState *new_cpu;
6562      CPUArchState *new_env;
6563      sigset_t sigmask;
6564  
6565      flags &= ~CLONE_IGNORED_FLAGS;
6566  
6567      /* Emulate vfork() with fork() */
6568      if (flags & CLONE_VFORK)
6569          flags &= ~(CLONE_VFORK | CLONE_VM);
6570  
6571      if (flags & CLONE_VM) {
6572          TaskState *parent_ts = get_task_state(cpu);
6573          new_thread_info info;
6574          pthread_attr_t attr;
6575  
6576          if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6577              (flags & CLONE_INVALID_THREAD_FLAGS)) {
6578              return -TARGET_EINVAL;
6579          }
6580  
6581          ts = g_new0(TaskState, 1);
6582          init_task_state(ts);
6583  
6584          /* Grab a mutex so that thread setup appears atomic.  */
6585          pthread_mutex_lock(&clone_lock);
6586  
6587          /*
6588           * If this is our first additional thread, we need to ensure we
6589           * generate code for parallel execution and flush old translations.
6590           * Do this now so that the copy gets CF_PARALLEL too.
6591           */
6592          if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6593              cpu->tcg_cflags |= CF_PARALLEL;
6594              tb_flush(cpu);
6595          }
6596  
6597          /* we create a new CPU instance. */
6598          new_env = cpu_copy(env);
6599          /* Init regs that differ from the parent.  */
6600          cpu_clone_regs_child(new_env, newsp, flags);
6601          cpu_clone_regs_parent(env, flags);
6602          new_cpu = env_cpu(new_env);
6603          new_cpu->opaque = ts;
6604          ts->bprm = parent_ts->bprm;
6605          ts->info = parent_ts->info;
6606          ts->signal_mask = parent_ts->signal_mask;
6607  
6608          if (flags & CLONE_CHILD_CLEARTID) {
6609              ts->child_tidptr = child_tidptr;
6610          }
6611  
6612          if (flags & CLONE_SETTLS) {
6613              cpu_set_tls (new_env, newtls);
6614          }
6615  
6616          memset(&info, 0, sizeof(info));
6617          pthread_mutex_init(&info.mutex, NULL);
6618          pthread_mutex_lock(&info.mutex);
6619          pthread_cond_init(&info.cond, NULL);
6620          info.env = new_env;
6621          if (flags & CLONE_CHILD_SETTID) {
6622              info.child_tidptr = child_tidptr;
6623          }
6624          if (flags & CLONE_PARENT_SETTID) {
6625              info.parent_tidptr = parent_tidptr;
6626          }
6627  
6628          ret = pthread_attr_init(&attr);
6629          ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6630          ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6631          /* It is not safe to deliver signals until the child has finished
6632             initializing, so temporarily block all signals.  */
6633          sigfillset(&sigmask);
6634          sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6635          cpu->random_seed = qemu_guest_random_seed_thread_part1();
6636  
6637          ret = pthread_create(&info.thread, &attr, clone_func, &info);
6638          /* TODO: Free new CPU state if thread creation failed.  */
6639  
6640          sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6641          pthread_attr_destroy(&attr);
6642          if (ret == 0) {
6643              /* Wait for the child to initialize.  */
6644              pthread_cond_wait(&info.cond, &info.mutex);
6645              ret = info.tid;
6646          } else {
6647              ret = -1;
6648          }
6649          pthread_mutex_unlock(&info.mutex);
6650          pthread_cond_destroy(&info.cond);
6651          pthread_mutex_destroy(&info.mutex);
6652          pthread_mutex_unlock(&clone_lock);
6653      } else {
6654          /* if no CLONE_VM, we consider it is a fork */
6655          if (flags & CLONE_INVALID_FORK_FLAGS) {
6656              return -TARGET_EINVAL;
6657          }
6658  
6659          /* We can't support custom termination signals */
6660          if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6661              return -TARGET_EINVAL;
6662          }
6663  
6664  #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6665          if (flags & CLONE_PIDFD) {
6666              return -TARGET_EINVAL;
6667          }
6668  #endif
6669  
6670          /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6671          if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6672              return -TARGET_EINVAL;
6673          }
6674  
6675          if (block_signals()) {
6676              return -QEMU_ERESTARTSYS;
6677          }
6678  
6679          fork_start();
6680          ret = fork();
6681          if (ret == 0) {
6682              /* Child Process.  */
6683              cpu_clone_regs_child(env, newsp, flags);
6684              fork_end(ret);
6685              /* There is a race condition here.  The parent process could
6686                 theoretically read the TID in the child process before the child
6687                 tid is set.  This would require using either ptrace
6688                 (not implemented) or having *_tidptr to point at a shared memory
6689                 mapping.  We can't repeat the spinlock hack used above because
6690                 the child process gets its own copy of the lock.  */
6691              if (flags & CLONE_CHILD_SETTID)
6692                  put_user_u32(sys_gettid(), child_tidptr);
6693              if (flags & CLONE_PARENT_SETTID)
6694                  put_user_u32(sys_gettid(), parent_tidptr);
6695              ts = get_task_state(cpu);
6696              if (flags & CLONE_SETTLS)
6697                  cpu_set_tls (env, newtls);
6698              if (flags & CLONE_CHILD_CLEARTID)
6699                  ts->child_tidptr = child_tidptr;
6700          } else {
6701              cpu_clone_regs_parent(env, flags);
6702              if (flags & CLONE_PIDFD) {
6703                  int pid_fd = 0;
6704  #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6705                  int pid_child = ret;
6706                  pid_fd = pidfd_open(pid_child, 0);
6707                  if (pid_fd >= 0) {
6708                          fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6709                                                 | FD_CLOEXEC);
6710                  } else {
6711                          pid_fd = 0;
6712                  }
6713  #endif
6714                  put_user_u32(pid_fd, parent_tidptr);
6715              }
6716              fork_end(ret);
6717          }
6718          g_assert(!cpu_in_exclusive_context(cpu));
6719      }
6720      return ret;
6721  }
6722  
6723  /* warning : doesn't handle linux specific flags... */
6724  static int target_to_host_fcntl_cmd(int cmd)
6725  {
6726      int ret;
6727  
6728      switch(cmd) {
6729      case TARGET_F_DUPFD:
6730      case TARGET_F_GETFD:
6731      case TARGET_F_SETFD:
6732      case TARGET_F_GETFL:
6733      case TARGET_F_SETFL:
6734      case TARGET_F_OFD_GETLK:
6735      case TARGET_F_OFD_SETLK:
6736      case TARGET_F_OFD_SETLKW:
6737          ret = cmd;
6738          break;
6739      case TARGET_F_GETLK:
6740          ret = F_GETLK64;
6741          break;
6742      case TARGET_F_SETLK:
6743          ret = F_SETLK64;
6744          break;
6745      case TARGET_F_SETLKW:
6746          ret = F_SETLKW64;
6747          break;
6748      case TARGET_F_GETOWN:
6749          ret = F_GETOWN;
6750          break;
6751      case TARGET_F_SETOWN:
6752          ret = F_SETOWN;
6753          break;
6754      case TARGET_F_GETSIG:
6755          ret = F_GETSIG;
6756          break;
6757      case TARGET_F_SETSIG:
6758          ret = F_SETSIG;
6759          break;
6760  #if TARGET_ABI_BITS == 32
6761      case TARGET_F_GETLK64:
6762          ret = F_GETLK64;
6763          break;
6764      case TARGET_F_SETLK64:
6765          ret = F_SETLK64;
6766          break;
6767      case TARGET_F_SETLKW64:
6768          ret = F_SETLKW64;
6769          break;
6770  #endif
6771      case TARGET_F_SETLEASE:
6772          ret = F_SETLEASE;
6773          break;
6774      case TARGET_F_GETLEASE:
6775          ret = F_GETLEASE;
6776          break;
6777  #ifdef F_DUPFD_CLOEXEC
6778      case TARGET_F_DUPFD_CLOEXEC:
6779          ret = F_DUPFD_CLOEXEC;
6780          break;
6781  #endif
6782      case TARGET_F_NOTIFY:
6783          ret = F_NOTIFY;
6784          break;
6785  #ifdef F_GETOWN_EX
6786      case TARGET_F_GETOWN_EX:
6787          ret = F_GETOWN_EX;
6788          break;
6789  #endif
6790  #ifdef F_SETOWN_EX
6791      case TARGET_F_SETOWN_EX:
6792          ret = F_SETOWN_EX;
6793          break;
6794  #endif
6795  #ifdef F_SETPIPE_SZ
6796      case TARGET_F_SETPIPE_SZ:
6797          ret = F_SETPIPE_SZ;
6798          break;
6799      case TARGET_F_GETPIPE_SZ:
6800          ret = F_GETPIPE_SZ;
6801          break;
6802  #endif
6803  #ifdef F_ADD_SEALS
6804      case TARGET_F_ADD_SEALS:
6805          ret = F_ADD_SEALS;
6806          break;
6807      case TARGET_F_GET_SEALS:
6808          ret = F_GET_SEALS;
6809          break;
6810  #endif
6811      default:
6812          ret = -TARGET_EINVAL;
6813          break;
6814      }
6815  
6816  #if defined(__powerpc64__)
6817      /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6818       * is not supported by kernel. The glibc fcntl call actually adjusts
6819       * them to 5, 6 and 7 before making the syscall(). Since we make the
6820       * syscall directly, adjust to what is supported by the kernel.
6821       */
6822      if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6823          ret -= F_GETLK64 - 5;
6824      }
6825  #endif
6826  
6827      return ret;
6828  }
6829  
6830  #define FLOCK_TRANSTBL \
6831      switch (type) { \
6832      TRANSTBL_CONVERT(F_RDLCK); \
6833      TRANSTBL_CONVERT(F_WRLCK); \
6834      TRANSTBL_CONVERT(F_UNLCK); \
6835      }
6836  
6837  static int target_to_host_flock(int type)
6838  {
6839  #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6840      FLOCK_TRANSTBL
6841  #undef  TRANSTBL_CONVERT
6842      return -TARGET_EINVAL;
6843  }
6844  
6845  static int host_to_target_flock(int type)
6846  {
6847  #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6848      FLOCK_TRANSTBL
6849  #undef  TRANSTBL_CONVERT
6850      /* if we don't know how to convert the value coming
6851       * from the host we copy to the target field as-is
6852       */
6853      return type;
6854  }
6855  
6856  static inline abi_long copy_from_user_flock(struct flock64 *fl,
6857                                              abi_ulong target_flock_addr)
6858  {
6859      struct target_flock *target_fl;
6860      int l_type;
6861  
6862      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6863          return -TARGET_EFAULT;
6864      }
6865  
6866      __get_user(l_type, &target_fl->l_type);
6867      l_type = target_to_host_flock(l_type);
6868      if (l_type < 0) {
6869          return l_type;
6870      }
6871      fl->l_type = l_type;
6872      __get_user(fl->l_whence, &target_fl->l_whence);
6873      __get_user(fl->l_start, &target_fl->l_start);
6874      __get_user(fl->l_len, &target_fl->l_len);
6875      __get_user(fl->l_pid, &target_fl->l_pid);
6876      unlock_user_struct(target_fl, target_flock_addr, 0);
6877      return 0;
6878  }
6879  
6880  static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6881                                            const struct flock64 *fl)
6882  {
6883      struct target_flock *target_fl;
6884      short l_type;
6885  
6886      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6887          return -TARGET_EFAULT;
6888      }
6889  
6890      l_type = host_to_target_flock(fl->l_type);
6891      __put_user(l_type, &target_fl->l_type);
6892      __put_user(fl->l_whence, &target_fl->l_whence);
6893      __put_user(fl->l_start, &target_fl->l_start);
6894      __put_user(fl->l_len, &target_fl->l_len);
6895      __put_user(fl->l_pid, &target_fl->l_pid);
6896      unlock_user_struct(target_fl, target_flock_addr, 1);
6897      return 0;
6898  }
6899  
6900  typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6901  typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6902  
6903  #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6904  struct target_oabi_flock64 {
6905      abi_short l_type;
6906      abi_short l_whence;
6907      abi_llong l_start;
6908      abi_llong l_len;
6909      abi_int   l_pid;
6910  } QEMU_PACKED;
6911  
6912  static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6913                                                     abi_ulong target_flock_addr)
6914  {
6915      struct target_oabi_flock64 *target_fl;
6916      int l_type;
6917  
6918      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6919          return -TARGET_EFAULT;
6920      }
6921  
6922      __get_user(l_type, &target_fl->l_type);
6923      l_type = target_to_host_flock(l_type);
6924      if (l_type < 0) {
6925          return l_type;
6926      }
6927      fl->l_type = l_type;
6928      __get_user(fl->l_whence, &target_fl->l_whence);
6929      __get_user(fl->l_start, &target_fl->l_start);
6930      __get_user(fl->l_len, &target_fl->l_len);
6931      __get_user(fl->l_pid, &target_fl->l_pid);
6932      unlock_user_struct(target_fl, target_flock_addr, 0);
6933      return 0;
6934  }
6935  
6936  static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6937                                                   const struct flock64 *fl)
6938  {
6939      struct target_oabi_flock64 *target_fl;
6940      short l_type;
6941  
6942      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6943          return -TARGET_EFAULT;
6944      }
6945  
6946      l_type = host_to_target_flock(fl->l_type);
6947      __put_user(l_type, &target_fl->l_type);
6948      __put_user(fl->l_whence, &target_fl->l_whence);
6949      __put_user(fl->l_start, &target_fl->l_start);
6950      __put_user(fl->l_len, &target_fl->l_len);
6951      __put_user(fl->l_pid, &target_fl->l_pid);
6952      unlock_user_struct(target_fl, target_flock_addr, 1);
6953      return 0;
6954  }
6955  #endif
6956  
6957  static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6958                                                abi_ulong target_flock_addr)
6959  {
6960      struct target_flock64 *target_fl;
6961      int l_type;
6962  
6963      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6964          return -TARGET_EFAULT;
6965      }
6966  
6967      __get_user(l_type, &target_fl->l_type);
6968      l_type = target_to_host_flock(l_type);
6969      if (l_type < 0) {
6970          return l_type;
6971      }
6972      fl->l_type = l_type;
6973      __get_user(fl->l_whence, &target_fl->l_whence);
6974      __get_user(fl->l_start, &target_fl->l_start);
6975      __get_user(fl->l_len, &target_fl->l_len);
6976      __get_user(fl->l_pid, &target_fl->l_pid);
6977      unlock_user_struct(target_fl, target_flock_addr, 0);
6978      return 0;
6979  }
6980  
6981  static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6982                                              const struct flock64 *fl)
6983  {
6984      struct target_flock64 *target_fl;
6985      short l_type;
6986  
6987      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6988          return -TARGET_EFAULT;
6989      }
6990  
6991      l_type = host_to_target_flock(fl->l_type);
6992      __put_user(l_type, &target_fl->l_type);
6993      __put_user(fl->l_whence, &target_fl->l_whence);
6994      __put_user(fl->l_start, &target_fl->l_start);
6995      __put_user(fl->l_len, &target_fl->l_len);
6996      __put_user(fl->l_pid, &target_fl->l_pid);
6997      unlock_user_struct(target_fl, target_flock_addr, 1);
6998      return 0;
6999  }
7000  
7001  static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7002  {
7003      struct flock64 fl64;
7004  #ifdef F_GETOWN_EX
7005      struct f_owner_ex fox;
7006      struct target_f_owner_ex *target_fox;
7007  #endif
7008      abi_long ret;
7009      int host_cmd = target_to_host_fcntl_cmd(cmd);
7010  
7011      if (host_cmd == -TARGET_EINVAL)
7012  	    return host_cmd;
7013  
7014      switch(cmd) {
7015      case TARGET_F_GETLK:
7016          ret = copy_from_user_flock(&fl64, arg);
7017          if (ret) {
7018              return ret;
7019          }
7020          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7021          if (ret == 0) {
7022              ret = copy_to_user_flock(arg, &fl64);
7023          }
7024          break;
7025  
7026      case TARGET_F_SETLK:
7027      case TARGET_F_SETLKW:
7028          ret = copy_from_user_flock(&fl64, arg);
7029          if (ret) {
7030              return ret;
7031          }
7032          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7033          break;
7034  
7035      case TARGET_F_GETLK64:
7036      case TARGET_F_OFD_GETLK:
7037          ret = copy_from_user_flock64(&fl64, arg);
7038          if (ret) {
7039              return ret;
7040          }
7041          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7042          if (ret == 0) {
7043              ret = copy_to_user_flock64(arg, &fl64);
7044          }
7045          break;
7046      case TARGET_F_SETLK64:
7047      case TARGET_F_SETLKW64:
7048      case TARGET_F_OFD_SETLK:
7049      case TARGET_F_OFD_SETLKW:
7050          ret = copy_from_user_flock64(&fl64, arg);
7051          if (ret) {
7052              return ret;
7053          }
7054          ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7055          break;
7056  
7057      case TARGET_F_GETFL:
7058          ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7059          if (ret >= 0) {
7060              ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7061              /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7062              if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7063                  ret |= TARGET_O_LARGEFILE;
7064              }
7065          }
7066          break;
7067  
7068      case TARGET_F_SETFL:
7069          ret = get_errno(safe_fcntl(fd, host_cmd,
7070                                     target_to_host_bitmask(arg,
7071                                                            fcntl_flags_tbl)));
7072          break;
7073  
7074  #ifdef F_GETOWN_EX
7075      case TARGET_F_GETOWN_EX:
7076          ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7077          if (ret >= 0) {
7078              if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7079                  return -TARGET_EFAULT;
7080              target_fox->type = tswap32(fox.type);
7081              target_fox->pid = tswap32(fox.pid);
7082              unlock_user_struct(target_fox, arg, 1);
7083          }
7084          break;
7085  #endif
7086  
7087  #ifdef F_SETOWN_EX
7088      case TARGET_F_SETOWN_EX:
7089          if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7090              return -TARGET_EFAULT;
7091          fox.type = tswap32(target_fox->type);
7092          fox.pid = tswap32(target_fox->pid);
7093          unlock_user_struct(target_fox, arg, 0);
7094          ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7095          break;
7096  #endif
7097  
7098      case TARGET_F_SETSIG:
7099          ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7100          break;
7101  
7102      case TARGET_F_GETSIG:
7103          ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7104          break;
7105  
7106      case TARGET_F_SETOWN:
7107      case TARGET_F_GETOWN:
7108      case TARGET_F_SETLEASE:
7109      case TARGET_F_GETLEASE:
7110      case TARGET_F_SETPIPE_SZ:
7111      case TARGET_F_GETPIPE_SZ:
7112      case TARGET_F_ADD_SEALS:
7113      case TARGET_F_GET_SEALS:
7114          ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7115          break;
7116  
7117      default:
7118          ret = get_errno(safe_fcntl(fd, cmd, arg));
7119          break;
7120      }
7121      return ret;
7122  }
7123  
7124  #ifdef USE_UID16
7125  
7126  static inline int high2lowuid(int uid)
7127  {
7128      if (uid > 65535)
7129          return 65534;
7130      else
7131          return uid;
7132  }
7133  
7134  static inline int high2lowgid(int gid)
7135  {
7136      if (gid > 65535)
7137          return 65534;
7138      else
7139          return gid;
7140  }
7141  
7142  static inline int low2highuid(int uid)
7143  {
7144      if ((int16_t)uid == -1)
7145          return -1;
7146      else
7147          return uid;
7148  }
7149  
7150  static inline int low2highgid(int gid)
7151  {
7152      if ((int16_t)gid == -1)
7153          return -1;
7154      else
7155          return gid;
7156  }
7157  static inline int tswapid(int id)
7158  {
7159      return tswap16(id);
7160  }
7161  
7162  #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7163  
7164  #else /* !USE_UID16 */
7165  static inline int high2lowuid(int uid)
7166  {
7167      return uid;
7168  }
7169  static inline int high2lowgid(int gid)
7170  {
7171      return gid;
7172  }
7173  static inline int low2highuid(int uid)
7174  {
7175      return uid;
7176  }
7177  static inline int low2highgid(int gid)
7178  {
7179      return gid;
7180  }
7181  static inline int tswapid(int id)
7182  {
7183      return tswap32(id);
7184  }
7185  
7186  #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7187  
7188  #endif /* USE_UID16 */
7189  
7190  /* We must do direct syscalls for setting UID/GID, because we want to
7191   * implement the Linux system call semantics of "change only for this thread",
7192   * not the libc/POSIX semantics of "change for all threads in process".
7193   * (See http://ewontfix.com/17/ for more details.)
7194   * We use the 32-bit version of the syscalls if present; if it is not
7195   * then either the host architecture supports 32-bit UIDs natively with
7196   * the standard syscall, or the 16-bit UID is the best we can do.
7197   */
7198  #ifdef __NR_setuid32
7199  #define __NR_sys_setuid __NR_setuid32
7200  #else
7201  #define __NR_sys_setuid __NR_setuid
7202  #endif
7203  #ifdef __NR_setgid32
7204  #define __NR_sys_setgid __NR_setgid32
7205  #else
7206  #define __NR_sys_setgid __NR_setgid
7207  #endif
7208  #ifdef __NR_setresuid32
7209  #define __NR_sys_setresuid __NR_setresuid32
7210  #else
7211  #define __NR_sys_setresuid __NR_setresuid
7212  #endif
7213  #ifdef __NR_setresgid32
7214  #define __NR_sys_setresgid __NR_setresgid32
7215  #else
7216  #define __NR_sys_setresgid __NR_setresgid
7217  #endif
7218  
7219  _syscall1(int, sys_setuid, uid_t, uid)
7220  _syscall1(int, sys_setgid, gid_t, gid)
7221  _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7222  _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7223  
7224  void syscall_init(void)
7225  {
7226      IOCTLEntry *ie;
7227      const argtype *arg_type;
7228      int size;
7229  
7230      thunk_init(STRUCT_MAX);
7231  
7232  #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7233  #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7234  #include "syscall_types.h"
7235  #undef STRUCT
7236  #undef STRUCT_SPECIAL
7237  
7238      /* we patch the ioctl size if necessary. We rely on the fact that
7239         no ioctl has all the bits at '1' in the size field */
7240      ie = ioctl_entries;
7241      while (ie->target_cmd != 0) {
7242          if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7243              TARGET_IOC_SIZEMASK) {
7244              arg_type = ie->arg_type;
7245              if (arg_type[0] != TYPE_PTR) {
7246                  fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7247                          ie->target_cmd);
7248                  exit(1);
7249              }
7250              arg_type++;
7251              size = thunk_type_size(arg_type, 0);
7252              ie->target_cmd = (ie->target_cmd &
7253                                ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7254                  (size << TARGET_IOC_SIZESHIFT);
7255          }
7256  
7257          /* automatic consistency check if same arch */
7258  #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7259      (defined(__x86_64__) && defined(TARGET_X86_64))
7260          if (unlikely(ie->target_cmd != ie->host_cmd)) {
7261              fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7262                      ie->name, ie->target_cmd, ie->host_cmd);
7263          }
7264  #endif
7265          ie++;
7266      }
7267  }
7268  
7269  #ifdef TARGET_NR_truncate64
7270  static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7271                                           abi_long arg2,
7272                                           abi_long arg3,
7273                                           abi_long arg4)
7274  {
7275      if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7276          arg2 = arg3;
7277          arg3 = arg4;
7278      }
7279      return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7280  }
7281  #endif
7282  
7283  #ifdef TARGET_NR_ftruncate64
7284  static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7285                                            abi_long arg2,
7286                                            abi_long arg3,
7287                                            abi_long arg4)
7288  {
7289      if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7290          arg2 = arg3;
7291          arg3 = arg4;
7292      }
7293      return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7294  }
7295  #endif
7296  
7297  #if defined(TARGET_NR_timer_settime) || \
7298      (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7299  static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7300                                                   abi_ulong target_addr)
7301  {
7302      if (target_to_host_timespec(&host_its->it_interval, target_addr +
7303                                  offsetof(struct target_itimerspec,
7304                                           it_interval)) ||
7305          target_to_host_timespec(&host_its->it_value, target_addr +
7306                                  offsetof(struct target_itimerspec,
7307                                           it_value))) {
7308          return -TARGET_EFAULT;
7309      }
7310  
7311      return 0;
7312  }
7313  #endif
7314  
7315  #if defined(TARGET_NR_timer_settime64) || \
7316      (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7317  static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7318                                                     abi_ulong target_addr)
7319  {
7320      if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7321                                    offsetof(struct target__kernel_itimerspec,
7322                                             it_interval)) ||
7323          target_to_host_timespec64(&host_its->it_value, target_addr +
7324                                    offsetof(struct target__kernel_itimerspec,
7325                                             it_value))) {
7326          return -TARGET_EFAULT;
7327      }
7328  
7329      return 0;
7330  }
7331  #endif
7332  
7333  #if ((defined(TARGET_NR_timerfd_gettime) || \
7334        defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7335        defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7336  static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7337                                                   struct itimerspec *host_its)
7338  {
7339      if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7340                                                         it_interval),
7341                                  &host_its->it_interval) ||
7342          host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7343                                                         it_value),
7344                                  &host_its->it_value)) {
7345          return -TARGET_EFAULT;
7346      }
7347      return 0;
7348  }
7349  #endif
7350  
7351  #if ((defined(TARGET_NR_timerfd_gettime64) || \
7352        defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7353        defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7354  static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7355                                                     struct itimerspec *host_its)
7356  {
7357      if (host_to_target_timespec64(target_addr +
7358                                    offsetof(struct target__kernel_itimerspec,
7359                                             it_interval),
7360                                    &host_its->it_interval) ||
7361          host_to_target_timespec64(target_addr +
7362                                    offsetof(struct target__kernel_itimerspec,
7363                                             it_value),
7364                                    &host_its->it_value)) {
7365          return -TARGET_EFAULT;
7366      }
7367      return 0;
7368  }
7369  #endif
7370  
7371  #if defined(TARGET_NR_adjtimex) || \
7372      (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7373  static inline abi_long target_to_host_timex(struct timex *host_tx,
7374                                              abi_long target_addr)
7375  {
7376      struct target_timex *target_tx;
7377  
7378      if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7379          return -TARGET_EFAULT;
7380      }
7381  
7382      __get_user(host_tx->modes, &target_tx->modes);
7383      __get_user(host_tx->offset, &target_tx->offset);
7384      __get_user(host_tx->freq, &target_tx->freq);
7385      __get_user(host_tx->maxerror, &target_tx->maxerror);
7386      __get_user(host_tx->esterror, &target_tx->esterror);
7387      __get_user(host_tx->status, &target_tx->status);
7388      __get_user(host_tx->constant, &target_tx->constant);
7389      __get_user(host_tx->precision, &target_tx->precision);
7390      __get_user(host_tx->tolerance, &target_tx->tolerance);
7391      __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7392      __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7393      __get_user(host_tx->tick, &target_tx->tick);
7394      __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7395      __get_user(host_tx->jitter, &target_tx->jitter);
7396      __get_user(host_tx->shift, &target_tx->shift);
7397      __get_user(host_tx->stabil, &target_tx->stabil);
7398      __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7399      __get_user(host_tx->calcnt, &target_tx->calcnt);
7400      __get_user(host_tx->errcnt, &target_tx->errcnt);
7401      __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7402      __get_user(host_tx->tai, &target_tx->tai);
7403  
7404      unlock_user_struct(target_tx, target_addr, 0);
7405      return 0;
7406  }
7407  
7408  static inline abi_long host_to_target_timex(abi_long target_addr,
7409                                              struct timex *host_tx)
7410  {
7411      struct target_timex *target_tx;
7412  
7413      if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7414          return -TARGET_EFAULT;
7415      }
7416  
7417      __put_user(host_tx->modes, &target_tx->modes);
7418      __put_user(host_tx->offset, &target_tx->offset);
7419      __put_user(host_tx->freq, &target_tx->freq);
7420      __put_user(host_tx->maxerror, &target_tx->maxerror);
7421      __put_user(host_tx->esterror, &target_tx->esterror);
7422      __put_user(host_tx->status, &target_tx->status);
7423      __put_user(host_tx->constant, &target_tx->constant);
7424      __put_user(host_tx->precision, &target_tx->precision);
7425      __put_user(host_tx->tolerance, &target_tx->tolerance);
7426      __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7427      __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7428      __put_user(host_tx->tick, &target_tx->tick);
7429      __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7430      __put_user(host_tx->jitter, &target_tx->jitter);
7431      __put_user(host_tx->shift, &target_tx->shift);
7432      __put_user(host_tx->stabil, &target_tx->stabil);
7433      __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7434      __put_user(host_tx->calcnt, &target_tx->calcnt);
7435      __put_user(host_tx->errcnt, &target_tx->errcnt);
7436      __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7437      __put_user(host_tx->tai, &target_tx->tai);
7438  
7439      unlock_user_struct(target_tx, target_addr, 1);
7440      return 0;
7441  }
7442  #endif
7443  
7444  
7445  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7446  static inline abi_long target_to_host_timex64(struct timex *host_tx,
7447                                                abi_long target_addr)
7448  {
7449      struct target__kernel_timex *target_tx;
7450  
7451      if (copy_from_user_timeval64(&host_tx->time, target_addr +
7452                                   offsetof(struct target__kernel_timex,
7453                                            time))) {
7454          return -TARGET_EFAULT;
7455      }
7456  
7457      if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7458          return -TARGET_EFAULT;
7459      }
7460  
7461      __get_user(host_tx->modes, &target_tx->modes);
7462      __get_user(host_tx->offset, &target_tx->offset);
7463      __get_user(host_tx->freq, &target_tx->freq);
7464      __get_user(host_tx->maxerror, &target_tx->maxerror);
7465      __get_user(host_tx->esterror, &target_tx->esterror);
7466      __get_user(host_tx->status, &target_tx->status);
7467      __get_user(host_tx->constant, &target_tx->constant);
7468      __get_user(host_tx->precision, &target_tx->precision);
7469      __get_user(host_tx->tolerance, &target_tx->tolerance);
7470      __get_user(host_tx->tick, &target_tx->tick);
7471      __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7472      __get_user(host_tx->jitter, &target_tx->jitter);
7473      __get_user(host_tx->shift, &target_tx->shift);
7474      __get_user(host_tx->stabil, &target_tx->stabil);
7475      __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7476      __get_user(host_tx->calcnt, &target_tx->calcnt);
7477      __get_user(host_tx->errcnt, &target_tx->errcnt);
7478      __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7479      __get_user(host_tx->tai, &target_tx->tai);
7480  
7481      unlock_user_struct(target_tx, target_addr, 0);
7482      return 0;
7483  }
7484  
7485  static inline abi_long host_to_target_timex64(abi_long target_addr,
7486                                                struct timex *host_tx)
7487  {
7488      struct target__kernel_timex *target_tx;
7489  
7490     if (copy_to_user_timeval64(target_addr +
7491                                offsetof(struct target__kernel_timex, time),
7492                                &host_tx->time)) {
7493          return -TARGET_EFAULT;
7494      }
7495  
7496      if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7497          return -TARGET_EFAULT;
7498      }
7499  
7500      __put_user(host_tx->modes, &target_tx->modes);
7501      __put_user(host_tx->offset, &target_tx->offset);
7502      __put_user(host_tx->freq, &target_tx->freq);
7503      __put_user(host_tx->maxerror, &target_tx->maxerror);
7504      __put_user(host_tx->esterror, &target_tx->esterror);
7505      __put_user(host_tx->status, &target_tx->status);
7506      __put_user(host_tx->constant, &target_tx->constant);
7507      __put_user(host_tx->precision, &target_tx->precision);
7508      __put_user(host_tx->tolerance, &target_tx->tolerance);
7509      __put_user(host_tx->tick, &target_tx->tick);
7510      __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7511      __put_user(host_tx->jitter, &target_tx->jitter);
7512      __put_user(host_tx->shift, &target_tx->shift);
7513      __put_user(host_tx->stabil, &target_tx->stabil);
7514      __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7515      __put_user(host_tx->calcnt, &target_tx->calcnt);
7516      __put_user(host_tx->errcnt, &target_tx->errcnt);
7517      __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7518      __put_user(host_tx->tai, &target_tx->tai);
7519  
7520      unlock_user_struct(target_tx, target_addr, 1);
7521      return 0;
7522  }
7523  #endif
7524  
7525  #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7526  #define sigev_notify_thread_id _sigev_un._tid
7527  #endif
7528  
7529  static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7530                                                 abi_ulong target_addr)
7531  {
7532      struct target_sigevent *target_sevp;
7533  
7534      if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7535          return -TARGET_EFAULT;
7536      }
7537  
7538      /* This union is awkward on 64 bit systems because it has a 32 bit
7539       * integer and a pointer in it; we follow the conversion approach
7540       * used for handling sigval types in signal.c so the guest should get
7541       * the correct value back even if we did a 64 bit byteswap and it's
7542       * using the 32 bit integer.
7543       */
7544      host_sevp->sigev_value.sival_ptr =
7545          (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7546      host_sevp->sigev_signo =
7547          target_to_host_signal(tswap32(target_sevp->sigev_signo));
7548      host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7549      host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7550  
7551      unlock_user_struct(target_sevp, target_addr, 1);
7552      return 0;
7553  }
7554  
7555  #if defined(TARGET_NR_mlockall)
7556  static inline int target_to_host_mlockall_arg(int arg)
7557  {
7558      int result = 0;
7559  
7560      if (arg & TARGET_MCL_CURRENT) {
7561          result |= MCL_CURRENT;
7562      }
7563      if (arg & TARGET_MCL_FUTURE) {
7564          result |= MCL_FUTURE;
7565      }
7566  #ifdef MCL_ONFAULT
7567      if (arg & TARGET_MCL_ONFAULT) {
7568          result |= MCL_ONFAULT;
7569      }
7570  #endif
7571  
7572      return result;
7573  }
7574  #endif
7575  
7576  static inline int target_to_host_msync_arg(abi_long arg)
7577  {
7578      return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7579             ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7580             ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7581             (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7582  }
7583  
7584  #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7585       defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7586       defined(TARGET_NR_newfstatat))
7587  static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7588                                               abi_ulong target_addr,
7589                                               struct stat *host_st)
7590  {
7591  #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7592      if (cpu_env->eabi) {
7593          struct target_eabi_stat64 *target_st;
7594  
7595          if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7596              return -TARGET_EFAULT;
7597          memset(target_st, 0, sizeof(struct target_eabi_stat64));
7598          __put_user(host_st->st_dev, &target_st->st_dev);
7599          __put_user(host_st->st_ino, &target_st->st_ino);
7600  #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7601          __put_user(host_st->st_ino, &target_st->__st_ino);
7602  #endif
7603          __put_user(host_st->st_mode, &target_st->st_mode);
7604          __put_user(host_st->st_nlink, &target_st->st_nlink);
7605          __put_user(host_st->st_uid, &target_st->st_uid);
7606          __put_user(host_st->st_gid, &target_st->st_gid);
7607          __put_user(host_st->st_rdev, &target_st->st_rdev);
7608          __put_user(host_st->st_size, &target_st->st_size);
7609          __put_user(host_st->st_blksize, &target_st->st_blksize);
7610          __put_user(host_st->st_blocks, &target_st->st_blocks);
7611          __put_user(host_st->st_atime, &target_st->target_st_atime);
7612          __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7613          __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7614  #ifdef HAVE_STRUCT_STAT_ST_ATIM
7615          __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7616          __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7617          __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7618  #endif
7619          unlock_user_struct(target_st, target_addr, 1);
7620      } else
7621  #endif
7622      {
7623  #if defined(TARGET_HAS_STRUCT_STAT64)
7624          struct target_stat64 *target_st;
7625  #else
7626          struct target_stat *target_st;
7627  #endif
7628  
7629          if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7630              return -TARGET_EFAULT;
7631          memset(target_st, 0, sizeof(*target_st));
7632          __put_user(host_st->st_dev, &target_st->st_dev);
7633          __put_user(host_st->st_ino, &target_st->st_ino);
7634  #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7635          __put_user(host_st->st_ino, &target_st->__st_ino);
7636  #endif
7637          __put_user(host_st->st_mode, &target_st->st_mode);
7638          __put_user(host_st->st_nlink, &target_st->st_nlink);
7639          __put_user(host_st->st_uid, &target_st->st_uid);
7640          __put_user(host_st->st_gid, &target_st->st_gid);
7641          __put_user(host_st->st_rdev, &target_st->st_rdev);
7642          /* XXX: better use of kernel struct */
7643          __put_user(host_st->st_size, &target_st->st_size);
7644          __put_user(host_st->st_blksize, &target_st->st_blksize);
7645          __put_user(host_st->st_blocks, &target_st->st_blocks);
7646          __put_user(host_st->st_atime, &target_st->target_st_atime);
7647          __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7648          __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7649  #ifdef HAVE_STRUCT_STAT_ST_ATIM
7650          __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7651          __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7652          __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7653  #endif
7654          unlock_user_struct(target_st, target_addr, 1);
7655      }
7656  
7657      return 0;
7658  }
7659  #endif
7660  
7661  #if defined(TARGET_NR_statx) && defined(__NR_statx)
7662  static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7663                                              abi_ulong target_addr)
7664  {
7665      struct target_statx *target_stx;
7666  
7667      if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7668          return -TARGET_EFAULT;
7669      }
7670      memset(target_stx, 0, sizeof(*target_stx));
7671  
7672      __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7673      __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7674      __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7675      __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7676      __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7677      __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7678      __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7679      __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7680      __put_user(host_stx->stx_size, &target_stx->stx_size);
7681      __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7682      __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7683      __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7684      __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7685      __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7686      __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7687      __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7688      __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7689      __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7690      __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7691      __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7692      __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7693      __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7694      __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7695  
7696      unlock_user_struct(target_stx, target_addr, 1);
7697  
7698      return 0;
7699  }
7700  #endif
7701  
7702  static int do_sys_futex(int *uaddr, int op, int val,
7703                           const struct timespec *timeout, int *uaddr2,
7704                           int val3)
7705  {
7706  #if HOST_LONG_BITS == 64
7707  #if defined(__NR_futex)
7708      /* always a 64-bit time_t, it doesn't define _time64 version  */
7709      return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7710  
7711  #endif
7712  #else /* HOST_LONG_BITS == 64 */
7713  #if defined(__NR_futex_time64)
7714      if (sizeof(timeout->tv_sec) == 8) {
7715          /* _time64 function on 32bit arch */
7716          return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7717      }
7718  #endif
7719  #if defined(__NR_futex)
7720      /* old function on 32bit arch */
7721      return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7722  #endif
7723  #endif /* HOST_LONG_BITS == 64 */
7724      g_assert_not_reached();
7725  }
7726  
7727  static int do_safe_futex(int *uaddr, int op, int val,
7728                           const struct timespec *timeout, int *uaddr2,
7729                           int val3)
7730  {
7731  #if HOST_LONG_BITS == 64
7732  #if defined(__NR_futex)
7733      /* always a 64-bit time_t, it doesn't define _time64 version  */
7734      return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7735  #endif
7736  #else /* HOST_LONG_BITS == 64 */
7737  #if defined(__NR_futex_time64)
7738      if (sizeof(timeout->tv_sec) == 8) {
7739          /* _time64 function on 32bit arch */
7740          return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7741                                             val3));
7742      }
7743  #endif
7744  #if defined(__NR_futex)
7745      /* old function on 32bit arch */
7746      return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7747  #endif
7748  #endif /* HOST_LONG_BITS == 64 */
7749      return -TARGET_ENOSYS;
7750  }
7751  
7752  /* ??? Using host futex calls even when target atomic operations
7753     are not really atomic probably breaks things.  However implementing
7754     futexes locally would make futexes shared between multiple processes
7755     tricky.  However they're probably useless because guest atomic
7756     operations won't work either.  */
7757  #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7758  static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7759                      int op, int val, target_ulong timeout,
7760                      target_ulong uaddr2, int val3)
7761  {
7762      struct timespec ts, *pts = NULL;
7763      void *haddr2 = NULL;
7764      int base_op;
7765  
7766      /* We assume FUTEX_* constants are the same on both host and target. */
7767  #ifdef FUTEX_CMD_MASK
7768      base_op = op & FUTEX_CMD_MASK;
7769  #else
7770      base_op = op;
7771  #endif
7772      switch (base_op) {
7773      case FUTEX_WAIT:
7774      case FUTEX_WAIT_BITSET:
7775          val = tswap32(val);
7776          break;
7777      case FUTEX_WAIT_REQUEUE_PI:
7778          val = tswap32(val);
7779          haddr2 = g2h(cpu, uaddr2);
7780          break;
7781      case FUTEX_LOCK_PI:
7782      case FUTEX_LOCK_PI2:
7783          break;
7784      case FUTEX_WAKE:
7785      case FUTEX_WAKE_BITSET:
7786      case FUTEX_TRYLOCK_PI:
7787      case FUTEX_UNLOCK_PI:
7788          timeout = 0;
7789          break;
7790      case FUTEX_FD:
7791          val = target_to_host_signal(val);
7792          timeout = 0;
7793          break;
7794      case FUTEX_CMP_REQUEUE:
7795      case FUTEX_CMP_REQUEUE_PI:
7796          val3 = tswap32(val3);
7797          /* fall through */
7798      case FUTEX_REQUEUE:
7799      case FUTEX_WAKE_OP:
7800          /*
7801           * For these, the 4th argument is not TIMEOUT, but VAL2.
7802           * But the prototype of do_safe_futex takes a pointer, so
7803           * insert casts to satisfy the compiler.  We do not need
7804           * to tswap VAL2 since it's not compared to guest memory.
7805            */
7806          pts = (struct timespec *)(uintptr_t)timeout;
7807          timeout = 0;
7808          haddr2 = g2h(cpu, uaddr2);
7809          break;
7810      default:
7811          return -TARGET_ENOSYS;
7812      }
7813      if (timeout) {
7814          pts = &ts;
7815          if (time64
7816              ? target_to_host_timespec64(pts, timeout)
7817              : target_to_host_timespec(pts, timeout)) {
7818              return -TARGET_EFAULT;
7819          }
7820      }
7821      return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7822  }
7823  #endif
7824  
7825  #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7826  static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7827                                       abi_long handle, abi_long mount_id,
7828                                       abi_long flags)
7829  {
7830      struct file_handle *target_fh;
7831      struct file_handle *fh;
7832      int mid = 0;
7833      abi_long ret;
7834      char *name;
7835      unsigned int size, total_size;
7836  
7837      if (get_user_s32(size, handle)) {
7838          return -TARGET_EFAULT;
7839      }
7840  
7841      name = lock_user_string(pathname);
7842      if (!name) {
7843          return -TARGET_EFAULT;
7844      }
7845  
7846      total_size = sizeof(struct file_handle) + size;
7847      target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7848      if (!target_fh) {
7849          unlock_user(name, pathname, 0);
7850          return -TARGET_EFAULT;
7851      }
7852  
7853      fh = g_malloc0(total_size);
7854      fh->handle_bytes = size;
7855  
7856      ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7857      unlock_user(name, pathname, 0);
7858  
7859      /* man name_to_handle_at(2):
7860       * Other than the use of the handle_bytes field, the caller should treat
7861       * the file_handle structure as an opaque data type
7862       */
7863  
7864      memcpy(target_fh, fh, total_size);
7865      target_fh->handle_bytes = tswap32(fh->handle_bytes);
7866      target_fh->handle_type = tswap32(fh->handle_type);
7867      g_free(fh);
7868      unlock_user(target_fh, handle, total_size);
7869  
7870      if (put_user_s32(mid, mount_id)) {
7871          return -TARGET_EFAULT;
7872      }
7873  
7874      return ret;
7875  
7876  }
7877  #endif
7878  
7879  #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7880  static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7881                                       abi_long flags)
7882  {
7883      struct file_handle *target_fh;
7884      struct file_handle *fh;
7885      unsigned int size, total_size;
7886      abi_long ret;
7887  
7888      if (get_user_s32(size, handle)) {
7889          return -TARGET_EFAULT;
7890      }
7891  
7892      total_size = sizeof(struct file_handle) + size;
7893      target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7894      if (!target_fh) {
7895          return -TARGET_EFAULT;
7896      }
7897  
7898      fh = g_memdup(target_fh, total_size);
7899      fh->handle_bytes = size;
7900      fh->handle_type = tswap32(target_fh->handle_type);
7901  
7902      ret = get_errno(open_by_handle_at(mount_fd, fh,
7903                      target_to_host_bitmask(flags, fcntl_flags_tbl)));
7904  
7905      g_free(fh);
7906  
7907      unlock_user(target_fh, handle, total_size);
7908  
7909      return ret;
7910  }
7911  #endif
7912  
7913  #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7914  
7915  static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7916  {
7917      int host_flags;
7918      target_sigset_t *target_mask;
7919      sigset_t host_mask;
7920      abi_long ret;
7921  
7922      if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7923          return -TARGET_EINVAL;
7924      }
7925      if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7926          return -TARGET_EFAULT;
7927      }
7928  
7929      target_to_host_sigset(&host_mask, target_mask);
7930  
7931      host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7932  
7933      ret = get_errno(signalfd(fd, &host_mask, host_flags));
7934      if (ret >= 0) {
7935          fd_trans_register(ret, &target_signalfd_trans);
7936      }
7937  
7938      unlock_user_struct(target_mask, mask, 0);
7939  
7940      return ret;
7941  }
7942  #endif
7943  
7944  /* Map host to target signal numbers for the wait family of syscalls.
7945     Assume all other status bits are the same.  */
7946  int host_to_target_waitstatus(int status)
7947  {
7948      if (WIFSIGNALED(status)) {
7949          return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7950      }
7951      if (WIFSTOPPED(status)) {
7952          return (host_to_target_signal(WSTOPSIG(status)) << 8)
7953                 | (status & 0xff);
7954      }
7955      return status;
7956  }
7957  
7958  static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7959  {
7960      CPUState *cpu = env_cpu(cpu_env);
7961      struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7962      int i;
7963  
7964      for (i = 0; i < bprm->argc; i++) {
7965          size_t len = strlen(bprm->argv[i]) + 1;
7966  
7967          if (write(fd, bprm->argv[i], len) != len) {
7968              return -1;
7969          }
7970      }
7971  
7972      return 0;
7973  }
7974  
7975  struct open_self_maps_data {
7976      TaskState *ts;
7977      IntervalTreeRoot *host_maps;
7978      int fd;
7979      bool smaps;
7980  };
7981  
7982  /*
7983   * Subroutine to output one line of /proc/self/maps,
7984   * or one region of /proc/self/smaps.
7985   */
7986  
7987  #ifdef TARGET_HPPA
7988  # define test_stack(S, E, L)  (E == L)
7989  #else
7990  # define test_stack(S, E, L)  (S == L)
7991  #endif
7992  
7993  static void open_self_maps_4(const struct open_self_maps_data *d,
7994                               const MapInfo *mi, abi_ptr start,
7995                               abi_ptr end, unsigned flags)
7996  {
7997      const struct image_info *info = d->ts->info;
7998      const char *path = mi->path;
7999      uint64_t offset;
8000      int fd = d->fd;
8001      int count;
8002  
8003      if (test_stack(start, end, info->stack_limit)) {
8004          path = "[stack]";
8005      } else if (start == info->brk) {
8006          path = "[heap]";
8007      } else if (start == info->vdso) {
8008          path = "[vdso]";
8009  #ifdef TARGET_X86_64
8010      } else if (start == TARGET_VSYSCALL_PAGE) {
8011          path = "[vsyscall]";
8012  #endif
8013      }
8014  
8015      /* Except null device (MAP_ANON), adjust offset for this fragment. */
8016      offset = mi->offset;
8017      if (mi->dev) {
8018          uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8019          offset += hstart - mi->itree.start;
8020      }
8021  
8022      count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8023                      " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8024                      start, end,
8025                      (flags & PAGE_READ) ? 'r' : '-',
8026                      (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8027                      (flags & PAGE_EXEC) ? 'x' : '-',
8028                      mi->is_priv ? 'p' : 's',
8029                      offset, major(mi->dev), minor(mi->dev),
8030                      (uint64_t)mi->inode);
8031      if (path) {
8032          dprintf(fd, "%*s%s\n", 73 - count, "", path);
8033      } else {
8034          dprintf(fd, "\n");
8035      }
8036  
8037      if (d->smaps) {
8038          unsigned long size = end - start;
8039          unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8040          unsigned long size_kb = size >> 10;
8041  
8042          dprintf(fd, "Size:                  %lu kB\n"
8043                  "KernelPageSize:        %lu kB\n"
8044                  "MMUPageSize:           %lu kB\n"
8045                  "Rss:                   0 kB\n"
8046                  "Pss:                   0 kB\n"
8047                  "Pss_Dirty:             0 kB\n"
8048                  "Shared_Clean:          0 kB\n"
8049                  "Shared_Dirty:          0 kB\n"
8050                  "Private_Clean:         0 kB\n"
8051                  "Private_Dirty:         0 kB\n"
8052                  "Referenced:            0 kB\n"
8053                  "Anonymous:             %lu kB\n"
8054                  "LazyFree:              0 kB\n"
8055                  "AnonHugePages:         0 kB\n"
8056                  "ShmemPmdMapped:        0 kB\n"
8057                  "FilePmdMapped:         0 kB\n"
8058                  "Shared_Hugetlb:        0 kB\n"
8059                  "Private_Hugetlb:       0 kB\n"
8060                  "Swap:                  0 kB\n"
8061                  "SwapPss:               0 kB\n"
8062                  "Locked:                0 kB\n"
8063                  "THPeligible:    0\n"
8064                  "VmFlags:%s%s%s%s%s%s%s%s\n",
8065                  size_kb, page_size_kb, page_size_kb,
8066                  (flags & PAGE_ANON ? size_kb : 0),
8067                  (flags & PAGE_READ) ? " rd" : "",
8068                  (flags & PAGE_WRITE_ORG) ? " wr" : "",
8069                  (flags & PAGE_EXEC) ? " ex" : "",
8070                  mi->is_priv ? "" : " sh",
8071                  (flags & PAGE_READ) ? " mr" : "",
8072                  (flags & PAGE_WRITE_ORG) ? " mw" : "",
8073                  (flags & PAGE_EXEC) ? " me" : "",
8074                  mi->is_priv ? "" : " ms");
8075      }
8076  }
8077  
8078  /*
8079   * Callback for walk_memory_regions, when read_self_maps() fails.
8080   * Proceed without the benefit of host /proc/self/maps cross-check.
8081   */
8082  static int open_self_maps_3(void *opaque, target_ulong guest_start,
8083                              target_ulong guest_end, unsigned long flags)
8084  {
8085      static const MapInfo mi = { .is_priv = true };
8086  
8087      open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8088      return 0;
8089  }
8090  
8091  /*
8092   * Callback for walk_memory_regions, when read_self_maps() succeeds.
8093   */
8094  static int open_self_maps_2(void *opaque, target_ulong guest_start,
8095                              target_ulong guest_end, unsigned long flags)
8096  {
8097      const struct open_self_maps_data *d = opaque;
8098      uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8099      uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8100  
8101  #ifdef TARGET_X86_64
8102      /*
8103       * Because of the extremely high position of the page within the guest
8104       * virtual address space, this is not backed by host memory at all.
8105       * Therefore the loop below would fail.  This is the only instance
8106       * of not having host backing memory.
8107       */
8108      if (guest_start == TARGET_VSYSCALL_PAGE) {
8109          return open_self_maps_3(opaque, guest_start, guest_end, flags);
8110      }
8111  #endif
8112  
8113      while (1) {
8114          IntervalTreeNode *n =
8115              interval_tree_iter_first(d->host_maps, host_start, host_start);
8116          MapInfo *mi = container_of(n, MapInfo, itree);
8117          uintptr_t this_hlast = MIN(host_last, n->last);
8118          target_ulong this_gend = h2g(this_hlast) + 1;
8119  
8120          open_self_maps_4(d, mi, guest_start, this_gend, flags);
8121  
8122          if (this_hlast == host_last) {
8123              return 0;
8124          }
8125          host_start = this_hlast + 1;
8126          guest_start = h2g(host_start);
8127      }
8128  }
8129  
8130  static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8131  {
8132      struct open_self_maps_data d = {
8133          .ts = env_cpu(env)->opaque,
8134          .host_maps = read_self_maps(),
8135          .fd = fd,
8136          .smaps = smaps
8137      };
8138  
8139      if (d.host_maps) {
8140          walk_memory_regions(&d, open_self_maps_2);
8141          free_self_maps(d.host_maps);
8142      } else {
8143          walk_memory_regions(&d, open_self_maps_3);
8144      }
8145      return 0;
8146  }
8147  
8148  static int open_self_maps(CPUArchState *cpu_env, int fd)
8149  {
8150      return open_self_maps_1(cpu_env, fd, false);
8151  }
8152  
8153  static int open_self_smaps(CPUArchState *cpu_env, int fd)
8154  {
8155      return open_self_maps_1(cpu_env, fd, true);
8156  }
8157  
8158  static int open_self_stat(CPUArchState *cpu_env, int fd)
8159  {
8160      CPUState *cpu = env_cpu(cpu_env);
8161      TaskState *ts = get_task_state(cpu);
8162      g_autoptr(GString) buf = g_string_new(NULL);
8163      int i;
8164  
8165      for (i = 0; i < 44; i++) {
8166          if (i == 0) {
8167              /* pid */
8168              g_string_printf(buf, FMT_pid " ", getpid());
8169          } else if (i == 1) {
8170              /* app name */
8171              gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8172              bin = bin ? bin + 1 : ts->bprm->argv[0];
8173              g_string_printf(buf, "(%.15s) ", bin);
8174          } else if (i == 2) {
8175              /* task state */
8176              g_string_assign(buf, "R "); /* we are running right now */
8177          } else if (i == 3) {
8178              /* ppid */
8179              g_string_printf(buf, FMT_pid " ", getppid());
8180          } else if (i == 21) {
8181              /* starttime */
8182              g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8183          } else if (i == 27) {
8184              /* stack bottom */
8185              g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8186          } else {
8187              /* for the rest, there is MasterCard */
8188              g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8189          }
8190  
8191          if (write(fd, buf->str, buf->len) != buf->len) {
8192              return -1;
8193          }
8194      }
8195  
8196      return 0;
8197  }
8198  
8199  static int open_self_auxv(CPUArchState *cpu_env, int fd)
8200  {
8201      CPUState *cpu = env_cpu(cpu_env);
8202      TaskState *ts = get_task_state(cpu);
8203      abi_ulong auxv = ts->info->saved_auxv;
8204      abi_ulong len = ts->info->auxv_len;
8205      char *ptr;
8206  
8207      /*
8208       * Auxiliary vector is stored in target process stack.
8209       * read in whole auxv vector and copy it to file
8210       */
8211      ptr = lock_user(VERIFY_READ, auxv, len, 0);
8212      if (ptr != NULL) {
8213          while (len > 0) {
8214              ssize_t r;
8215              r = write(fd, ptr, len);
8216              if (r <= 0) {
8217                  break;
8218              }
8219              len -= r;
8220              ptr += r;
8221          }
8222          lseek(fd, 0, SEEK_SET);
8223          unlock_user(ptr, auxv, len);
8224      }
8225  
8226      return 0;
8227  }
8228  
8229  static int is_proc_myself(const char *filename, const char *entry)
8230  {
8231      if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8232          filename += strlen("/proc/");
8233          if (!strncmp(filename, "self/", strlen("self/"))) {
8234              filename += strlen("self/");
8235          } else if (*filename >= '1' && *filename <= '9') {
8236              char myself[80];
8237              snprintf(myself, sizeof(myself), "%d/", getpid());
8238              if (!strncmp(filename, myself, strlen(myself))) {
8239                  filename += strlen(myself);
8240              } else {
8241                  return 0;
8242              }
8243          } else {
8244              return 0;
8245          }
8246          if (!strcmp(filename, entry)) {
8247              return 1;
8248          }
8249      }
8250      return 0;
8251  }
8252  
8253  static void excp_dump_file(FILE *logfile, CPUArchState *env,
8254                        const char *fmt, int code)
8255  {
8256      if (logfile) {
8257          CPUState *cs = env_cpu(env);
8258  
8259          fprintf(logfile, fmt, code);
8260          fprintf(logfile, "Failing executable: %s\n", exec_path);
8261          cpu_dump_state(cs, logfile, 0);
8262          open_self_maps(env, fileno(logfile));
8263      }
8264  }
8265  
8266  void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8267  {
8268      /* dump to console */
8269      excp_dump_file(stderr, env, fmt, code);
8270  
8271      /* dump to log file */
8272      if (qemu_log_separate()) {
8273          FILE *logfile = qemu_log_trylock();
8274  
8275          excp_dump_file(logfile, env, fmt, code);
8276          qemu_log_unlock(logfile);
8277      }
8278  }
8279  
8280  #include "target_proc.h"
8281  
8282  #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8283      defined(HAVE_ARCH_PROC_CPUINFO) || \
8284      defined(HAVE_ARCH_PROC_HARDWARE)
8285  static int is_proc(const char *filename, const char *entry)
8286  {
8287      return strcmp(filename, entry) == 0;
8288  }
8289  #endif
8290  
8291  #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8292  static int open_net_route(CPUArchState *cpu_env, int fd)
8293  {
8294      FILE *fp;
8295      char *line = NULL;
8296      size_t len = 0;
8297      ssize_t read;
8298  
8299      fp = fopen("/proc/net/route", "r");
8300      if (fp == NULL) {
8301          return -1;
8302      }
8303  
8304      /* read header */
8305  
8306      read = getline(&line, &len, fp);
8307      dprintf(fd, "%s", line);
8308  
8309      /* read routes */
8310  
8311      while ((read = getline(&line, &len, fp)) != -1) {
8312          char iface[16];
8313          uint32_t dest, gw, mask;
8314          unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8315          int fields;
8316  
8317          fields = sscanf(line,
8318                          "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8319                          iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8320                          &mask, &mtu, &window, &irtt);
8321          if (fields != 11) {
8322              continue;
8323          }
8324          dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8325                  iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8326                  metric, tswap32(mask), mtu, window, irtt);
8327      }
8328  
8329      free(line);
8330      fclose(fp);
8331  
8332      return 0;
8333  }
8334  #endif
8335  
8336  int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
8337                      int flags, mode_t mode, bool safe)
8338  {
8339      g_autofree char *proc_name = NULL;
8340      const char *pathname;
8341      struct fake_open {
8342          const char *filename;
8343          int (*fill)(CPUArchState *cpu_env, int fd);
8344          int (*cmp)(const char *s1, const char *s2);
8345      };
8346      const struct fake_open *fake_open;
8347      static const struct fake_open fakes[] = {
8348          { "maps", open_self_maps, is_proc_myself },
8349          { "smaps", open_self_smaps, is_proc_myself },
8350          { "stat", open_self_stat, is_proc_myself },
8351          { "auxv", open_self_auxv, is_proc_myself },
8352          { "cmdline", open_self_cmdline, is_proc_myself },
8353  #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8354          { "/proc/net/route", open_net_route, is_proc },
8355  #endif
8356  #if defined(HAVE_ARCH_PROC_CPUINFO)
8357          { "/proc/cpuinfo", open_cpuinfo, is_proc },
8358  #endif
8359  #if defined(HAVE_ARCH_PROC_HARDWARE)
8360          { "/proc/hardware", open_hardware, is_proc },
8361  #endif
8362          { NULL, NULL, NULL }
8363      };
8364  
8365      /* if this is a file from /proc/ filesystem, expand full name */
8366      proc_name = realpath(fname, NULL);
8367      if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8368          pathname = proc_name;
8369      } else {
8370          pathname = fname;
8371      }
8372  
8373      if (is_proc_myself(pathname, "exe")) {
8374          if (safe) {
8375              return safe_openat(dirfd, exec_path, flags, mode);
8376          } else {
8377              return openat(dirfd, exec_path, flags, mode);
8378          }
8379      }
8380  
8381      for (fake_open = fakes; fake_open->filename; fake_open++) {
8382          if (fake_open->cmp(pathname, fake_open->filename)) {
8383              break;
8384          }
8385      }
8386  
8387      if (fake_open->filename) {
8388          const char *tmpdir;
8389          char filename[PATH_MAX];
8390          int fd, r;
8391  
8392          fd = memfd_create("qemu-open", 0);
8393          if (fd < 0) {
8394              if (errno != ENOSYS) {
8395                  return fd;
8396              }
8397              /* create temporary file to map stat to */
8398              tmpdir = getenv("TMPDIR");
8399              if (!tmpdir)
8400                  tmpdir = "/tmp";
8401              snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8402              fd = mkstemp(filename);
8403              if (fd < 0) {
8404                  return fd;
8405              }
8406              unlink(filename);
8407          }
8408  
8409          if ((r = fake_open->fill(cpu_env, fd))) {
8410              int e = errno;
8411              close(fd);
8412              errno = e;
8413              return r;
8414          }
8415          lseek(fd, 0, SEEK_SET);
8416  
8417          return fd;
8418      }
8419  
8420      if (safe) {
8421          return safe_openat(dirfd, path(pathname), flags, mode);
8422      } else {
8423          return openat(dirfd, path(pathname), flags, mode);
8424      }
8425  }
8426  
8427  ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8428  {
8429      ssize_t ret;
8430  
8431      if (!pathname || !buf) {
8432          errno = EFAULT;
8433          return -1;
8434      }
8435  
8436      if (!bufsiz) {
8437          /* Short circuit this for the magic exe check. */
8438          errno = EINVAL;
8439          return -1;
8440      }
8441  
8442      if (is_proc_myself((const char *)pathname, "exe")) {
8443          /*
8444           * Don't worry about sign mismatch as earlier mapping
8445           * logic would have thrown a bad address error.
8446           */
8447          ret = MIN(strlen(exec_path), bufsiz);
8448          /* We cannot NUL terminate the string. */
8449          memcpy(buf, exec_path, ret);
8450      } else {
8451          ret = readlink(path(pathname), buf, bufsiz);
8452      }
8453  
8454      return ret;
8455  }
8456  
8457  static int do_execv(CPUArchState *cpu_env, int dirfd,
8458                      abi_long pathname, abi_long guest_argp,
8459                      abi_long guest_envp, int flags, bool is_execveat)
8460  {
8461      int ret;
8462      char **argp, **envp;
8463      int argc, envc;
8464      abi_ulong gp;
8465      abi_ulong addr;
8466      char **q;
8467      void *p;
8468  
8469      argc = 0;
8470  
8471      for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8472          if (get_user_ual(addr, gp)) {
8473              return -TARGET_EFAULT;
8474          }
8475          if (!addr) {
8476              break;
8477          }
8478          argc++;
8479      }
8480      envc = 0;
8481      for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8482          if (get_user_ual(addr, gp)) {
8483              return -TARGET_EFAULT;
8484          }
8485          if (!addr) {
8486              break;
8487          }
8488          envc++;
8489      }
8490  
8491      argp = g_new0(char *, argc + 1);
8492      envp = g_new0(char *, envc + 1);
8493  
8494      for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8495          if (get_user_ual(addr, gp)) {
8496              goto execve_efault;
8497          }
8498          if (!addr) {
8499              break;
8500          }
8501          *q = lock_user_string(addr);
8502          if (!*q) {
8503              goto execve_efault;
8504          }
8505      }
8506      *q = NULL;
8507  
8508      for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8509          if (get_user_ual(addr, gp)) {
8510              goto execve_efault;
8511          }
8512          if (!addr) {
8513              break;
8514          }
8515          *q = lock_user_string(addr);
8516          if (!*q) {
8517              goto execve_efault;
8518          }
8519      }
8520      *q = NULL;
8521  
8522      /*
8523       * Although execve() is not an interruptible syscall it is
8524       * a special case where we must use the safe_syscall wrapper:
8525       * if we allow a signal to happen before we make the host
8526       * syscall then we will 'lose' it, because at the point of
8527       * execve the process leaves QEMU's control. So we use the
8528       * safe syscall wrapper to ensure that we either take the
8529       * signal as a guest signal, or else it does not happen
8530       * before the execve completes and makes it the other
8531       * program's problem.
8532       */
8533      p = lock_user_string(pathname);
8534      if (!p) {
8535          goto execve_efault;
8536      }
8537  
8538      const char *exe = p;
8539      if (is_proc_myself(p, "exe")) {
8540          exe = exec_path;
8541      }
8542      ret = is_execveat
8543          ? safe_execveat(dirfd, exe, argp, envp, flags)
8544          : safe_execve(exe, argp, envp);
8545      ret = get_errno(ret);
8546  
8547      unlock_user(p, pathname, 0);
8548  
8549      goto execve_end;
8550  
8551  execve_efault:
8552      ret = -TARGET_EFAULT;
8553  
8554  execve_end:
8555      for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8556          if (get_user_ual(addr, gp) || !addr) {
8557              break;
8558          }
8559          unlock_user(*q, addr, 0);
8560      }
8561      for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8562          if (get_user_ual(addr, gp) || !addr) {
8563              break;
8564          }
8565          unlock_user(*q, addr, 0);
8566      }
8567  
8568      g_free(argp);
8569      g_free(envp);
8570      return ret;
8571  }
8572  
8573  #define TIMER_MAGIC 0x0caf0000
8574  #define TIMER_MAGIC_MASK 0xffff0000
8575  
8576  /* Convert QEMU provided timer ID back to internal 16bit index format */
8577  static target_timer_t get_timer_id(abi_long arg)
8578  {
8579      target_timer_t timerid = arg;
8580  
8581      if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8582          return -TARGET_EINVAL;
8583      }
8584  
8585      timerid &= 0xffff;
8586  
8587      if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8588          return -TARGET_EINVAL;
8589      }
8590  
8591      return timerid;
8592  }
8593  
8594  static int target_to_host_cpu_mask(unsigned long *host_mask,
8595                                     size_t host_size,
8596                                     abi_ulong target_addr,
8597                                     size_t target_size)
8598  {
8599      unsigned target_bits = sizeof(abi_ulong) * 8;
8600      unsigned host_bits = sizeof(*host_mask) * 8;
8601      abi_ulong *target_mask;
8602      unsigned i, j;
8603  
8604      assert(host_size >= target_size);
8605  
8606      target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8607      if (!target_mask) {
8608          return -TARGET_EFAULT;
8609      }
8610      memset(host_mask, 0, host_size);
8611  
8612      for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8613          unsigned bit = i * target_bits;
8614          abi_ulong val;
8615  
8616          __get_user(val, &target_mask[i]);
8617          for (j = 0; j < target_bits; j++, bit++) {
8618              if (val & (1UL << j)) {
8619                  host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8620              }
8621          }
8622      }
8623  
8624      unlock_user(target_mask, target_addr, 0);
8625      return 0;
8626  }
8627  
8628  static int host_to_target_cpu_mask(const unsigned long *host_mask,
8629                                     size_t host_size,
8630                                     abi_ulong target_addr,
8631                                     size_t target_size)
8632  {
8633      unsigned target_bits = sizeof(abi_ulong) * 8;
8634      unsigned host_bits = sizeof(*host_mask) * 8;
8635      abi_ulong *target_mask;
8636      unsigned i, j;
8637  
8638      assert(host_size >= target_size);
8639  
8640      target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8641      if (!target_mask) {
8642          return -TARGET_EFAULT;
8643      }
8644  
8645      for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8646          unsigned bit = i * target_bits;
8647          abi_ulong val = 0;
8648  
8649          for (j = 0; j < target_bits; j++, bit++) {
8650              if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8651                  val |= 1UL << j;
8652              }
8653          }
8654          __put_user(val, &target_mask[i]);
8655      }
8656  
8657      unlock_user(target_mask, target_addr, target_size);
8658      return 0;
8659  }
8660  
8661  #ifdef TARGET_NR_getdents
8662  static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8663  {
8664      g_autofree void *hdirp = NULL;
8665      void *tdirp;
8666      int hlen, hoff, toff;
8667      int hreclen, treclen;
8668      off64_t prev_diroff = 0;
8669  
8670      hdirp = g_try_malloc(count);
8671      if (!hdirp) {
8672          return -TARGET_ENOMEM;
8673      }
8674  
8675  #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8676      hlen = sys_getdents(dirfd, hdirp, count);
8677  #else
8678      hlen = sys_getdents64(dirfd, hdirp, count);
8679  #endif
8680  
8681      hlen = get_errno(hlen);
8682      if (is_error(hlen)) {
8683          return hlen;
8684      }
8685  
8686      tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8687      if (!tdirp) {
8688          return -TARGET_EFAULT;
8689      }
8690  
8691      for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8692  #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8693          struct linux_dirent *hde = hdirp + hoff;
8694  #else
8695          struct linux_dirent64 *hde = hdirp + hoff;
8696  #endif
8697          struct target_dirent *tde = tdirp + toff;
8698          int namelen;
8699          uint8_t type;
8700  
8701          namelen = strlen(hde->d_name);
8702          hreclen = hde->d_reclen;
8703          treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8704          treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8705  
8706          if (toff + treclen > count) {
8707              /*
8708               * If the host struct is smaller than the target struct, or
8709               * requires less alignment and thus packs into less space,
8710               * then the host can return more entries than we can pass
8711               * on to the guest.
8712               */
8713              if (toff == 0) {
8714                  toff = -TARGET_EINVAL; /* result buffer is too small */
8715                  break;
8716              }
8717              /*
8718               * Return what we have, resetting the file pointer to the
8719               * location of the first record not returned.
8720               */
8721              lseek64(dirfd, prev_diroff, SEEK_SET);
8722              break;
8723          }
8724  
8725          prev_diroff = hde->d_off;
8726          tde->d_ino = tswapal(hde->d_ino);
8727          tde->d_off = tswapal(hde->d_off);
8728          tde->d_reclen = tswap16(treclen);
8729          memcpy(tde->d_name, hde->d_name, namelen + 1);
8730  
8731          /*
8732           * The getdents type is in what was formerly a padding byte at the
8733           * end of the structure.
8734           */
8735  #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8736          type = *((uint8_t *)hde + hreclen - 1);
8737  #else
8738          type = hde->d_type;
8739  #endif
8740          *((uint8_t *)tde + treclen - 1) = type;
8741      }
8742  
8743      unlock_user(tdirp, arg2, toff);
8744      return toff;
8745  }
8746  #endif /* TARGET_NR_getdents */
8747  
8748  #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8749  static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8750  {
8751      g_autofree void *hdirp = NULL;
8752      void *tdirp;
8753      int hlen, hoff, toff;
8754      int hreclen, treclen;
8755      off64_t prev_diroff = 0;
8756  
8757      hdirp = g_try_malloc(count);
8758      if (!hdirp) {
8759          return -TARGET_ENOMEM;
8760      }
8761  
8762      hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8763      if (is_error(hlen)) {
8764          return hlen;
8765      }
8766  
8767      tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8768      if (!tdirp) {
8769          return -TARGET_EFAULT;
8770      }
8771  
8772      for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8773          struct linux_dirent64 *hde = hdirp + hoff;
8774          struct target_dirent64 *tde = tdirp + toff;
8775          int namelen;
8776  
8777          namelen = strlen(hde->d_name) + 1;
8778          hreclen = hde->d_reclen;
8779          treclen = offsetof(struct target_dirent64, d_name) + namelen;
8780          treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8781  
8782          if (toff + treclen > count) {
8783              /*
8784               * If the host struct is smaller than the target struct, or
8785               * requires less alignment and thus packs into less space,
8786               * then the host can return more entries than we can pass
8787               * on to the guest.
8788               */
8789              if (toff == 0) {
8790                  toff = -TARGET_EINVAL; /* result buffer is too small */
8791                  break;
8792              }
8793              /*
8794               * Return what we have, resetting the file pointer to the
8795               * location of the first record not returned.
8796               */
8797              lseek64(dirfd, prev_diroff, SEEK_SET);
8798              break;
8799          }
8800  
8801          prev_diroff = hde->d_off;
8802          tde->d_ino = tswap64(hde->d_ino);
8803          tde->d_off = tswap64(hde->d_off);
8804          tde->d_reclen = tswap16(treclen);
8805          tde->d_type = hde->d_type;
8806          memcpy(tde->d_name, hde->d_name, namelen);
8807      }
8808  
8809      unlock_user(tdirp, arg2, toff);
8810      return toff;
8811  }
8812  #endif /* TARGET_NR_getdents64 */
8813  
8814  #if defined(TARGET_NR_riscv_hwprobe)
8815  
8816  #define RISCV_HWPROBE_KEY_MVENDORID     0
8817  #define RISCV_HWPROBE_KEY_MARCHID       1
8818  #define RISCV_HWPROBE_KEY_MIMPID        2
8819  
8820  #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8821  #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8822  
8823  #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8824  #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8825  #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8826  #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8827  #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8828  #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8829  #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8830  #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8831  #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8832  #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8833  #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8834  #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8835  #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8836  #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8837  #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8838  #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8839  #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8840  #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8841  #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8842  #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8843  #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8844  #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8845  #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8846  #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8847  #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8848  #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8849  #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8850  #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8851  #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8852  #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8853  #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8854  #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8855  #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1 << 31)
8856  #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8857  #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8858  #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8859  #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8860  
8861  #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8862  #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8863  #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8864  #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8865  #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8866  #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8867  #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8868  
8869  #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8870  
8871  struct riscv_hwprobe {
8872      abi_llong  key;
8873      abi_ullong value;
8874  };
8875  
8876  static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8877                                      struct riscv_hwprobe *pair,
8878                                      size_t pair_count)
8879  {
8880      const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8881  
8882      for (; pair_count > 0; pair_count--, pair++) {
8883          abi_llong key;
8884          abi_ullong value;
8885          __put_user(0, &pair->value);
8886          __get_user(key, &pair->key);
8887          switch (key) {
8888          case RISCV_HWPROBE_KEY_MVENDORID:
8889              __put_user(cfg->mvendorid, &pair->value);
8890              break;
8891          case RISCV_HWPROBE_KEY_MARCHID:
8892              __put_user(cfg->marchid, &pair->value);
8893              break;
8894          case RISCV_HWPROBE_KEY_MIMPID:
8895              __put_user(cfg->mimpid, &pair->value);
8896              break;
8897          case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
8898              value = riscv_has_ext(env, RVI) &&
8899                      riscv_has_ext(env, RVM) &&
8900                      riscv_has_ext(env, RVA) ?
8901                      RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
8902              __put_user(value, &pair->value);
8903              break;
8904          case RISCV_HWPROBE_KEY_IMA_EXT_0:
8905              value = riscv_has_ext(env, RVF) &&
8906                      riscv_has_ext(env, RVD) ?
8907                      RISCV_HWPROBE_IMA_FD : 0;
8908              value |= riscv_has_ext(env, RVC) ?
8909                       RISCV_HWPROBE_IMA_C : 0;
8910              value |= riscv_has_ext(env, RVV) ?
8911                       RISCV_HWPROBE_IMA_V : 0;
8912              value |= cfg->ext_zba ?
8913                       RISCV_HWPROBE_EXT_ZBA : 0;
8914              value |= cfg->ext_zbb ?
8915                       RISCV_HWPROBE_EXT_ZBB : 0;
8916              value |= cfg->ext_zbs ?
8917                       RISCV_HWPROBE_EXT_ZBS : 0;
8918              value |= cfg->ext_zicboz ?
8919                       RISCV_HWPROBE_EXT_ZICBOZ : 0;
8920              value |= cfg->ext_zbc ?
8921                       RISCV_HWPROBE_EXT_ZBC : 0;
8922              value |= cfg->ext_zbkb ?
8923                       RISCV_HWPROBE_EXT_ZBKB : 0;
8924              value |= cfg->ext_zbkc ?
8925                       RISCV_HWPROBE_EXT_ZBKC : 0;
8926              value |= cfg->ext_zbkx ?
8927                       RISCV_HWPROBE_EXT_ZBKX : 0;
8928              value |= cfg->ext_zknd ?
8929                       RISCV_HWPROBE_EXT_ZKND : 0;
8930              value |= cfg->ext_zkne ?
8931                       RISCV_HWPROBE_EXT_ZKNE : 0;
8932              value |= cfg->ext_zknh ?
8933                       RISCV_HWPROBE_EXT_ZKNH : 0;
8934              value |= cfg->ext_zksed ?
8935                       RISCV_HWPROBE_EXT_ZKSED : 0;
8936              value |= cfg->ext_zksh ?
8937                       RISCV_HWPROBE_EXT_ZKSH : 0;
8938              value |= cfg->ext_zkt ?
8939                       RISCV_HWPROBE_EXT_ZKT : 0;
8940              value |= cfg->ext_zvbb ?
8941                       RISCV_HWPROBE_EXT_ZVBB : 0;
8942              value |= cfg->ext_zvbc ?
8943                       RISCV_HWPROBE_EXT_ZVBC : 0;
8944              value |= cfg->ext_zvkb ?
8945                       RISCV_HWPROBE_EXT_ZVKB : 0;
8946              value |= cfg->ext_zvkg ?
8947                       RISCV_HWPROBE_EXT_ZVKG : 0;
8948              value |= cfg->ext_zvkned ?
8949                       RISCV_HWPROBE_EXT_ZVKNED : 0;
8950              value |= cfg->ext_zvknha ?
8951                       RISCV_HWPROBE_EXT_ZVKNHA : 0;
8952              value |= cfg->ext_zvknhb ?
8953                       RISCV_HWPROBE_EXT_ZVKNHB : 0;
8954              value |= cfg->ext_zvksed ?
8955                       RISCV_HWPROBE_EXT_ZVKSED : 0;
8956              value |= cfg->ext_zvksh ?
8957                       RISCV_HWPROBE_EXT_ZVKSH : 0;
8958              value |= cfg->ext_zvkt ?
8959                       RISCV_HWPROBE_EXT_ZVKT : 0;
8960              value |= cfg->ext_zfh ?
8961                       RISCV_HWPROBE_EXT_ZFH : 0;
8962              value |= cfg->ext_zfhmin ?
8963                       RISCV_HWPROBE_EXT_ZFHMIN : 0;
8964              value |= cfg->ext_zihintntl ?
8965                       RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
8966              value |= cfg->ext_zvfh ?
8967                       RISCV_HWPROBE_EXT_ZVFH : 0;
8968              value |= cfg->ext_zvfhmin ?
8969                       RISCV_HWPROBE_EXT_ZVFHMIN : 0;
8970              value |= cfg->ext_zfa ?
8971                       RISCV_HWPROBE_EXT_ZFA : 0;
8972              value |= cfg->ext_ztso ?
8973                       RISCV_HWPROBE_EXT_ZTSO : 0;
8974              value |= cfg->ext_zacas ?
8975                       RISCV_HWPROBE_EXT_ZACAS : 0;
8976              value |= cfg->ext_zicond ?
8977                       RISCV_HWPROBE_EXT_ZICOND : 0;
8978              __put_user(value, &pair->value);
8979              break;
8980          case RISCV_HWPROBE_KEY_CPUPERF_0:
8981              __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
8982              break;
8983          case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
8984              value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
8985              __put_user(value, &pair->value);
8986              break;
8987          default:
8988              __put_user(-1, &pair->key);
8989              break;
8990          }
8991      }
8992  }
8993  
8994  static int cpu_set_valid(abi_long arg3, abi_long arg4)
8995  {
8996      int ret, i, tmp;
8997      size_t host_mask_size, target_mask_size;
8998      unsigned long *host_mask;
8999  
9000      /*
9001       * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9002       * arg3 contains the cpu count.
9003       */
9004      tmp = (8 * sizeof(abi_ulong));
9005      target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9006      host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9007                       ~(sizeof(*host_mask) - 1);
9008  
9009      host_mask = alloca(host_mask_size);
9010  
9011      ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9012                                    arg4, target_mask_size);
9013      if (ret != 0) {
9014          return ret;
9015      }
9016  
9017      for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9018          if (host_mask[i] != 0) {
9019              return 0;
9020          }
9021      }
9022      return -TARGET_EINVAL;
9023  }
9024  
9025  static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9026                                   abi_long arg2, abi_long arg3,
9027                                   abi_long arg4, abi_long arg5)
9028  {
9029      int ret;
9030      struct riscv_hwprobe *host_pairs;
9031  
9032      /* flags must be 0 */
9033      if (arg5 != 0) {
9034          return -TARGET_EINVAL;
9035      }
9036  
9037      /* check cpu_set */
9038      if (arg3 != 0) {
9039          ret = cpu_set_valid(arg3, arg4);
9040          if (ret != 0) {
9041              return ret;
9042          }
9043      } else if (arg4 != 0) {
9044          return -TARGET_EINVAL;
9045      }
9046  
9047      /* no pairs */
9048      if (arg2 == 0) {
9049          return 0;
9050      }
9051  
9052      host_pairs = lock_user(VERIFY_WRITE, arg1,
9053                             sizeof(*host_pairs) * (size_t)arg2, 0);
9054      if (host_pairs == NULL) {
9055          return -TARGET_EFAULT;
9056      }
9057      risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9058      unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9059      return 0;
9060  }
9061  #endif /* TARGET_NR_riscv_hwprobe */
9062  
9063  #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9064  _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9065  #endif
9066  
9067  #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9068  #define __NR_sys_open_tree __NR_open_tree
9069  _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9070            unsigned int, __flags)
9071  #endif
9072  
9073  #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9074  #define __NR_sys_move_mount __NR_move_mount
9075  _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9076             int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9077  #endif
9078  
9079  /* This is an internal helper for do_syscall so that it is easier
9080   * to have a single return point, so that actions, such as logging
9081   * of syscall results, can be performed.
9082   * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9083   */
9084  static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9085                              abi_long arg2, abi_long arg3, abi_long arg4,
9086                              abi_long arg5, abi_long arg6, abi_long arg7,
9087                              abi_long arg8)
9088  {
9089      CPUState *cpu = env_cpu(cpu_env);
9090      abi_long ret;
9091  #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9092      || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9093      || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9094      || defined(TARGET_NR_statx)
9095      struct stat st;
9096  #endif
9097  #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9098      || defined(TARGET_NR_fstatfs)
9099      struct statfs stfs;
9100  #endif
9101      void *p;
9102  
9103      switch(num) {
9104      case TARGET_NR_exit:
9105          /* In old applications this may be used to implement _exit(2).
9106             However in threaded applications it is used for thread termination,
9107             and _exit_group is used for application termination.
9108             Do thread termination if we have more then one thread.  */
9109  
9110          if (block_signals()) {
9111              return -QEMU_ERESTARTSYS;
9112          }
9113  
9114          pthread_mutex_lock(&clone_lock);
9115  
9116          if (CPU_NEXT(first_cpu)) {
9117              TaskState *ts = get_task_state(cpu);
9118  
9119              if (ts->child_tidptr) {
9120                  put_user_u32(0, ts->child_tidptr);
9121                  do_sys_futex(g2h(cpu, ts->child_tidptr),
9122                               FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9123              }
9124  
9125              object_unparent(OBJECT(cpu));
9126              object_unref(OBJECT(cpu));
9127              /*
9128               * At this point the CPU should be unrealized and removed
9129               * from cpu lists. We can clean-up the rest of the thread
9130               * data without the lock held.
9131               */
9132  
9133              pthread_mutex_unlock(&clone_lock);
9134  
9135              thread_cpu = NULL;
9136              g_free(ts);
9137              rcu_unregister_thread();
9138              pthread_exit(NULL);
9139          }
9140  
9141          pthread_mutex_unlock(&clone_lock);
9142          preexit_cleanup(cpu_env, arg1);
9143          _exit(arg1);
9144          return 0; /* avoid warning */
9145      case TARGET_NR_read:
9146          if (arg2 == 0 && arg3 == 0) {
9147              return get_errno(safe_read(arg1, 0, 0));
9148          } else {
9149              if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9150                  return -TARGET_EFAULT;
9151              ret = get_errno(safe_read(arg1, p, arg3));
9152              if (ret >= 0 &&
9153                  fd_trans_host_to_target_data(arg1)) {
9154                  ret = fd_trans_host_to_target_data(arg1)(p, ret);
9155              }
9156              unlock_user(p, arg2, ret);
9157          }
9158          return ret;
9159      case TARGET_NR_write:
9160          if (arg2 == 0 && arg3 == 0) {
9161              return get_errno(safe_write(arg1, 0, 0));
9162          }
9163          if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9164              return -TARGET_EFAULT;
9165          if (fd_trans_target_to_host_data(arg1)) {
9166              void *copy = g_malloc(arg3);
9167              memcpy(copy, p, arg3);
9168              ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9169              if (ret >= 0) {
9170                  ret = get_errno(safe_write(arg1, copy, ret));
9171              }
9172              g_free(copy);
9173          } else {
9174              ret = get_errno(safe_write(arg1, p, arg3));
9175          }
9176          unlock_user(p, arg2, 0);
9177          return ret;
9178  
9179  #ifdef TARGET_NR_open
9180      case TARGET_NR_open:
9181          if (!(p = lock_user_string(arg1)))
9182              return -TARGET_EFAULT;
9183          ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9184                                    target_to_host_bitmask(arg2, fcntl_flags_tbl),
9185                                    arg3, true));
9186          fd_trans_unregister(ret);
9187          unlock_user(p, arg1, 0);
9188          return ret;
9189  #endif
9190      case TARGET_NR_openat:
9191          if (!(p = lock_user_string(arg2)))
9192              return -TARGET_EFAULT;
9193          ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9194                                    target_to_host_bitmask(arg3, fcntl_flags_tbl),
9195                                    arg4, true));
9196          fd_trans_unregister(ret);
9197          unlock_user(p, arg2, 0);
9198          return ret;
9199  #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9200      case TARGET_NR_name_to_handle_at:
9201          ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9202          return ret;
9203  #endif
9204  #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9205      case TARGET_NR_open_by_handle_at:
9206          ret = do_open_by_handle_at(arg1, arg2, arg3);
9207          fd_trans_unregister(ret);
9208          return ret;
9209  #endif
9210  #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9211      case TARGET_NR_pidfd_open:
9212          return get_errno(pidfd_open(arg1, arg2));
9213  #endif
9214  #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9215      case TARGET_NR_pidfd_send_signal:
9216          {
9217              siginfo_t uinfo, *puinfo;
9218  
9219              if (arg3) {
9220                  p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9221                  if (!p) {
9222                      return -TARGET_EFAULT;
9223                   }
9224                   target_to_host_siginfo(&uinfo, p);
9225                   unlock_user(p, arg3, 0);
9226                   puinfo = &uinfo;
9227              } else {
9228                   puinfo = NULL;
9229              }
9230              ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9231                                                puinfo, arg4));
9232          }
9233          return ret;
9234  #endif
9235  #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9236      case TARGET_NR_pidfd_getfd:
9237          return get_errno(pidfd_getfd(arg1, arg2, arg3));
9238  #endif
9239      case TARGET_NR_close:
9240          fd_trans_unregister(arg1);
9241          return get_errno(close(arg1));
9242  #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9243      case TARGET_NR_close_range:
9244          ret = get_errno(sys_close_range(arg1, arg2, arg3));
9245          if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9246              abi_long fd, maxfd;
9247              maxfd = MIN(arg2, target_fd_max);
9248              for (fd = arg1; fd < maxfd; fd++) {
9249                  fd_trans_unregister(fd);
9250              }
9251          }
9252          return ret;
9253  #endif
9254  
9255      case TARGET_NR_brk:
9256          return do_brk(arg1);
9257  #ifdef TARGET_NR_fork
9258      case TARGET_NR_fork:
9259          return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9260  #endif
9261  #ifdef TARGET_NR_waitpid
9262      case TARGET_NR_waitpid:
9263          {
9264              int status;
9265              ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9266              if (!is_error(ret) && arg2 && ret
9267                  && put_user_s32(host_to_target_waitstatus(status), arg2))
9268                  return -TARGET_EFAULT;
9269          }
9270          return ret;
9271  #endif
9272  #ifdef TARGET_NR_waitid
9273      case TARGET_NR_waitid:
9274          {
9275              struct rusage ru;
9276              siginfo_t info;
9277  
9278              ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9279                                          arg4, (arg5 ? &ru : NULL)));
9280              if (!is_error(ret)) {
9281                  if (arg3) {
9282                      p = lock_user(VERIFY_WRITE, arg3,
9283                                    sizeof(target_siginfo_t), 0);
9284                      if (!p) {
9285                          return -TARGET_EFAULT;
9286                      }
9287                      host_to_target_siginfo(p, &info);
9288                      unlock_user(p, arg3, sizeof(target_siginfo_t));
9289                  }
9290                  if (arg5 && host_to_target_rusage(arg5, &ru)) {
9291                      return -TARGET_EFAULT;
9292                  }
9293              }
9294          }
9295          return ret;
9296  #endif
9297  #ifdef TARGET_NR_creat /* not on alpha */
9298      case TARGET_NR_creat:
9299          if (!(p = lock_user_string(arg1)))
9300              return -TARGET_EFAULT;
9301          ret = get_errno(creat(p, arg2));
9302          fd_trans_unregister(ret);
9303          unlock_user(p, arg1, 0);
9304          return ret;
9305  #endif
9306  #ifdef TARGET_NR_link
9307      case TARGET_NR_link:
9308          {
9309              void * p2;
9310              p = lock_user_string(arg1);
9311              p2 = lock_user_string(arg2);
9312              if (!p || !p2)
9313                  ret = -TARGET_EFAULT;
9314              else
9315                  ret = get_errno(link(p, p2));
9316              unlock_user(p2, arg2, 0);
9317              unlock_user(p, arg1, 0);
9318          }
9319          return ret;
9320  #endif
9321  #if defined(TARGET_NR_linkat)
9322      case TARGET_NR_linkat:
9323          {
9324              void * p2 = NULL;
9325              if (!arg2 || !arg4)
9326                  return -TARGET_EFAULT;
9327              p  = lock_user_string(arg2);
9328              p2 = lock_user_string(arg4);
9329              if (!p || !p2)
9330                  ret = -TARGET_EFAULT;
9331              else
9332                  ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9333              unlock_user(p, arg2, 0);
9334              unlock_user(p2, arg4, 0);
9335          }
9336          return ret;
9337  #endif
9338  #ifdef TARGET_NR_unlink
9339      case TARGET_NR_unlink:
9340          if (!(p = lock_user_string(arg1)))
9341              return -TARGET_EFAULT;
9342          ret = get_errno(unlink(p));
9343          unlock_user(p, arg1, 0);
9344          return ret;
9345  #endif
9346  #if defined(TARGET_NR_unlinkat)
9347      case TARGET_NR_unlinkat:
9348          if (!(p = lock_user_string(arg2)))
9349              return -TARGET_EFAULT;
9350          ret = get_errno(unlinkat(arg1, p, arg3));
9351          unlock_user(p, arg2, 0);
9352          return ret;
9353  #endif
9354      case TARGET_NR_execveat:
9355          return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9356      case TARGET_NR_execve:
9357          return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9358      case TARGET_NR_chdir:
9359          if (!(p = lock_user_string(arg1)))
9360              return -TARGET_EFAULT;
9361          ret = get_errno(chdir(p));
9362          unlock_user(p, arg1, 0);
9363          return ret;
9364  #ifdef TARGET_NR_time
9365      case TARGET_NR_time:
9366          {
9367              time_t host_time;
9368              ret = get_errno(time(&host_time));
9369              if (!is_error(ret)
9370                  && arg1
9371                  && put_user_sal(host_time, arg1))
9372                  return -TARGET_EFAULT;
9373          }
9374          return ret;
9375  #endif
9376  #ifdef TARGET_NR_mknod
9377      case TARGET_NR_mknod:
9378          if (!(p = lock_user_string(arg1)))
9379              return -TARGET_EFAULT;
9380          ret = get_errno(mknod(p, arg2, arg3));
9381          unlock_user(p, arg1, 0);
9382          return ret;
9383  #endif
9384  #if defined(TARGET_NR_mknodat)
9385      case TARGET_NR_mknodat:
9386          if (!(p = lock_user_string(arg2)))
9387              return -TARGET_EFAULT;
9388          ret = get_errno(mknodat(arg1, p, arg3, arg4));
9389          unlock_user(p, arg2, 0);
9390          return ret;
9391  #endif
9392  #ifdef TARGET_NR_chmod
9393      case TARGET_NR_chmod:
9394          if (!(p = lock_user_string(arg1)))
9395              return -TARGET_EFAULT;
9396          ret = get_errno(chmod(p, arg2));
9397          unlock_user(p, arg1, 0);
9398          return ret;
9399  #endif
9400  #ifdef TARGET_NR_lseek
9401      case TARGET_NR_lseek:
9402          return get_errno(lseek(arg1, arg2, arg3));
9403  #endif
9404  #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9405      /* Alpha specific */
9406      case TARGET_NR_getxpid:
9407          cpu_env->ir[IR_A4] = getppid();
9408          return get_errno(getpid());
9409  #endif
9410  #ifdef TARGET_NR_getpid
9411      case TARGET_NR_getpid:
9412          return get_errno(getpid());
9413  #endif
9414      case TARGET_NR_mount:
9415          {
9416              /* need to look at the data field */
9417              void *p2, *p3;
9418  
9419              if (arg1) {
9420                  p = lock_user_string(arg1);
9421                  if (!p) {
9422                      return -TARGET_EFAULT;
9423                  }
9424              } else {
9425                  p = NULL;
9426              }
9427  
9428              p2 = lock_user_string(arg2);
9429              if (!p2) {
9430                  if (arg1) {
9431                      unlock_user(p, arg1, 0);
9432                  }
9433                  return -TARGET_EFAULT;
9434              }
9435  
9436              if (arg3) {
9437                  p3 = lock_user_string(arg3);
9438                  if (!p3) {
9439                      if (arg1) {
9440                          unlock_user(p, arg1, 0);
9441                      }
9442                      unlock_user(p2, arg2, 0);
9443                      return -TARGET_EFAULT;
9444                  }
9445              } else {
9446                  p3 = NULL;
9447              }
9448  
9449              /* FIXME - arg5 should be locked, but it isn't clear how to
9450               * do that since it's not guaranteed to be a NULL-terminated
9451               * string.
9452               */
9453              if (!arg5) {
9454                  ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9455              } else {
9456                  ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9457              }
9458              ret = get_errno(ret);
9459  
9460              if (arg1) {
9461                  unlock_user(p, arg1, 0);
9462              }
9463              unlock_user(p2, arg2, 0);
9464              if (arg3) {
9465                  unlock_user(p3, arg3, 0);
9466              }
9467          }
9468          return ret;
9469  #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9470  #if defined(TARGET_NR_umount)
9471      case TARGET_NR_umount:
9472  #endif
9473  #if defined(TARGET_NR_oldumount)
9474      case TARGET_NR_oldumount:
9475  #endif
9476          if (!(p = lock_user_string(arg1)))
9477              return -TARGET_EFAULT;
9478          ret = get_errno(umount(p));
9479          unlock_user(p, arg1, 0);
9480          return ret;
9481  #endif
9482  #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9483      case TARGET_NR_move_mount:
9484          {
9485              void *p2, *p4;
9486  
9487              if (!arg2 || !arg4) {
9488                  return -TARGET_EFAULT;
9489              }
9490  
9491              p2 = lock_user_string(arg2);
9492              if (!p2) {
9493                  return -TARGET_EFAULT;
9494              }
9495  
9496              p4 = lock_user_string(arg4);
9497              if (!p4) {
9498                  unlock_user(p2, arg2, 0);
9499                  return -TARGET_EFAULT;
9500              }
9501              ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9502  
9503              unlock_user(p2, arg2, 0);
9504              unlock_user(p4, arg4, 0);
9505  
9506              return ret;
9507          }
9508  #endif
9509  #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9510      case TARGET_NR_open_tree:
9511          {
9512              void *p2;
9513              int host_flags;
9514  
9515              if (!arg2) {
9516                  return -TARGET_EFAULT;
9517              }
9518  
9519              p2 = lock_user_string(arg2);
9520              if (!p2) {
9521                  return -TARGET_EFAULT;
9522              }
9523  
9524              host_flags = arg3 & ~TARGET_O_CLOEXEC;
9525              if (arg3 & TARGET_O_CLOEXEC) {
9526                  host_flags |= O_CLOEXEC;
9527              }
9528  
9529              ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9530  
9531              unlock_user(p2, arg2, 0);
9532  
9533              return ret;
9534          }
9535  #endif
9536  #ifdef TARGET_NR_stime /* not on alpha */
9537      case TARGET_NR_stime:
9538          {
9539              struct timespec ts;
9540              ts.tv_nsec = 0;
9541              if (get_user_sal(ts.tv_sec, arg1)) {
9542                  return -TARGET_EFAULT;
9543              }
9544              return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9545          }
9546  #endif
9547  #ifdef TARGET_NR_alarm /* not on alpha */
9548      case TARGET_NR_alarm:
9549          return alarm(arg1);
9550  #endif
9551  #ifdef TARGET_NR_pause /* not on alpha */
9552      case TARGET_NR_pause:
9553          if (!block_signals()) {
9554              sigsuspend(&get_task_state(cpu)->signal_mask);
9555          }
9556          return -TARGET_EINTR;
9557  #endif
9558  #ifdef TARGET_NR_utime
9559      case TARGET_NR_utime:
9560          {
9561              struct utimbuf tbuf, *host_tbuf;
9562              struct target_utimbuf *target_tbuf;
9563              if (arg2) {
9564                  if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9565                      return -TARGET_EFAULT;
9566                  tbuf.actime = tswapal(target_tbuf->actime);
9567                  tbuf.modtime = tswapal(target_tbuf->modtime);
9568                  unlock_user_struct(target_tbuf, arg2, 0);
9569                  host_tbuf = &tbuf;
9570              } else {
9571                  host_tbuf = NULL;
9572              }
9573              if (!(p = lock_user_string(arg1)))
9574                  return -TARGET_EFAULT;
9575              ret = get_errno(utime(p, host_tbuf));
9576              unlock_user(p, arg1, 0);
9577          }
9578          return ret;
9579  #endif
9580  #ifdef TARGET_NR_utimes
9581      case TARGET_NR_utimes:
9582          {
9583              struct timeval *tvp, tv[2];
9584              if (arg2) {
9585                  if (copy_from_user_timeval(&tv[0], arg2)
9586                      || copy_from_user_timeval(&tv[1],
9587                                                arg2 + sizeof(struct target_timeval)))
9588                      return -TARGET_EFAULT;
9589                  tvp = tv;
9590              } else {
9591                  tvp = NULL;
9592              }
9593              if (!(p = lock_user_string(arg1)))
9594                  return -TARGET_EFAULT;
9595              ret = get_errno(utimes(p, tvp));
9596              unlock_user(p, arg1, 0);
9597          }
9598          return ret;
9599  #endif
9600  #if defined(TARGET_NR_futimesat)
9601      case TARGET_NR_futimesat:
9602          {
9603              struct timeval *tvp, tv[2];
9604              if (arg3) {
9605                  if (copy_from_user_timeval(&tv[0], arg3)
9606                      || copy_from_user_timeval(&tv[1],
9607                                                arg3 + sizeof(struct target_timeval)))
9608                      return -TARGET_EFAULT;
9609                  tvp = tv;
9610              } else {
9611                  tvp = NULL;
9612              }
9613              if (!(p = lock_user_string(arg2))) {
9614                  return -TARGET_EFAULT;
9615              }
9616              ret = get_errno(futimesat(arg1, path(p), tvp));
9617              unlock_user(p, arg2, 0);
9618          }
9619          return ret;
9620  #endif
9621  #ifdef TARGET_NR_access
9622      case TARGET_NR_access:
9623          if (!(p = lock_user_string(arg1))) {
9624              return -TARGET_EFAULT;
9625          }
9626          ret = get_errno(access(path(p), arg2));
9627          unlock_user(p, arg1, 0);
9628          return ret;
9629  #endif
9630  #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9631      case TARGET_NR_faccessat:
9632          if (!(p = lock_user_string(arg2))) {
9633              return -TARGET_EFAULT;
9634          }
9635          ret = get_errno(faccessat(arg1, p, arg3, 0));
9636          unlock_user(p, arg2, 0);
9637          return ret;
9638  #endif
9639  #if defined(TARGET_NR_faccessat2)
9640      case TARGET_NR_faccessat2:
9641          if (!(p = lock_user_string(arg2))) {
9642              return -TARGET_EFAULT;
9643          }
9644          ret = get_errno(faccessat(arg1, p, arg3, arg4));
9645          unlock_user(p, arg2, 0);
9646          return ret;
9647  #endif
9648  #ifdef TARGET_NR_nice /* not on alpha */
9649      case TARGET_NR_nice:
9650          return get_errno(nice(arg1));
9651  #endif
9652      case TARGET_NR_sync:
9653          sync();
9654          return 0;
9655  #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9656      case TARGET_NR_syncfs:
9657          return get_errno(syncfs(arg1));
9658  #endif
9659      case TARGET_NR_kill:
9660          return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9661  #ifdef TARGET_NR_rename
9662      case TARGET_NR_rename:
9663          {
9664              void *p2;
9665              p = lock_user_string(arg1);
9666              p2 = lock_user_string(arg2);
9667              if (!p || !p2)
9668                  ret = -TARGET_EFAULT;
9669              else
9670                  ret = get_errno(rename(p, p2));
9671              unlock_user(p2, arg2, 0);
9672              unlock_user(p, arg1, 0);
9673          }
9674          return ret;
9675  #endif
9676  #if defined(TARGET_NR_renameat)
9677      case TARGET_NR_renameat:
9678          {
9679              void *p2;
9680              p  = lock_user_string(arg2);
9681              p2 = lock_user_string(arg4);
9682              if (!p || !p2)
9683                  ret = -TARGET_EFAULT;
9684              else
9685                  ret = get_errno(renameat(arg1, p, arg3, p2));
9686              unlock_user(p2, arg4, 0);
9687              unlock_user(p, arg2, 0);
9688          }
9689          return ret;
9690  #endif
9691  #if defined(TARGET_NR_renameat2)
9692      case TARGET_NR_renameat2:
9693          {
9694              void *p2;
9695              p  = lock_user_string(arg2);
9696              p2 = lock_user_string(arg4);
9697              if (!p || !p2) {
9698                  ret = -TARGET_EFAULT;
9699              } else {
9700                  ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9701              }
9702              unlock_user(p2, arg4, 0);
9703              unlock_user(p, arg2, 0);
9704          }
9705          return ret;
9706  #endif
9707  #ifdef TARGET_NR_mkdir
9708      case TARGET_NR_mkdir:
9709          if (!(p = lock_user_string(arg1)))
9710              return -TARGET_EFAULT;
9711          ret = get_errno(mkdir(p, arg2));
9712          unlock_user(p, arg1, 0);
9713          return ret;
9714  #endif
9715  #if defined(TARGET_NR_mkdirat)
9716      case TARGET_NR_mkdirat:
9717          if (!(p = lock_user_string(arg2)))
9718              return -TARGET_EFAULT;
9719          ret = get_errno(mkdirat(arg1, p, arg3));
9720          unlock_user(p, arg2, 0);
9721          return ret;
9722  #endif
9723  #ifdef TARGET_NR_rmdir
9724      case TARGET_NR_rmdir:
9725          if (!(p = lock_user_string(arg1)))
9726              return -TARGET_EFAULT;
9727          ret = get_errno(rmdir(p));
9728          unlock_user(p, arg1, 0);
9729          return ret;
9730  #endif
9731      case TARGET_NR_dup:
9732          ret = get_errno(dup(arg1));
9733          if (ret >= 0) {
9734              fd_trans_dup(arg1, ret);
9735          }
9736          return ret;
9737  #ifdef TARGET_NR_pipe
9738      case TARGET_NR_pipe:
9739          return do_pipe(cpu_env, arg1, 0, 0);
9740  #endif
9741  #ifdef TARGET_NR_pipe2
9742      case TARGET_NR_pipe2:
9743          return do_pipe(cpu_env, arg1,
9744                         target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9745  #endif
9746      case TARGET_NR_times:
9747          {
9748              struct target_tms *tmsp;
9749              struct tms tms;
9750              ret = get_errno(times(&tms));
9751              if (arg1) {
9752                  tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9753                  if (!tmsp)
9754                      return -TARGET_EFAULT;
9755                  tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9756                  tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9757                  tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9758                  tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9759              }
9760              if (!is_error(ret))
9761                  ret = host_to_target_clock_t(ret);
9762          }
9763          return ret;
9764      case TARGET_NR_acct:
9765          if (arg1 == 0) {
9766              ret = get_errno(acct(NULL));
9767          } else {
9768              if (!(p = lock_user_string(arg1))) {
9769                  return -TARGET_EFAULT;
9770              }
9771              ret = get_errno(acct(path(p)));
9772              unlock_user(p, arg1, 0);
9773          }
9774          return ret;
9775  #ifdef TARGET_NR_umount2
9776      case TARGET_NR_umount2:
9777          if (!(p = lock_user_string(arg1)))
9778              return -TARGET_EFAULT;
9779          ret = get_errno(umount2(p, arg2));
9780          unlock_user(p, arg1, 0);
9781          return ret;
9782  #endif
9783      case TARGET_NR_ioctl:
9784          return do_ioctl(arg1, arg2, arg3);
9785  #ifdef TARGET_NR_fcntl
9786      case TARGET_NR_fcntl:
9787          return do_fcntl(arg1, arg2, arg3);
9788  #endif
9789      case TARGET_NR_setpgid:
9790          return get_errno(setpgid(arg1, arg2));
9791      case TARGET_NR_umask:
9792          return get_errno(umask(arg1));
9793      case TARGET_NR_chroot:
9794          if (!(p = lock_user_string(arg1)))
9795              return -TARGET_EFAULT;
9796          ret = get_errno(chroot(p));
9797          unlock_user(p, arg1, 0);
9798          return ret;
9799  #ifdef TARGET_NR_dup2
9800      case TARGET_NR_dup2:
9801          ret = get_errno(dup2(arg1, arg2));
9802          if (ret >= 0) {
9803              fd_trans_dup(arg1, arg2);
9804          }
9805          return ret;
9806  #endif
9807  #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9808      case TARGET_NR_dup3:
9809      {
9810          int host_flags;
9811  
9812          if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9813              return -EINVAL;
9814          }
9815          host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9816          ret = get_errno(dup3(arg1, arg2, host_flags));
9817          if (ret >= 0) {
9818              fd_trans_dup(arg1, arg2);
9819          }
9820          return ret;
9821      }
9822  #endif
9823  #ifdef TARGET_NR_getppid /* not on alpha */
9824      case TARGET_NR_getppid:
9825          return get_errno(getppid());
9826  #endif
9827  #ifdef TARGET_NR_getpgrp
9828      case TARGET_NR_getpgrp:
9829          return get_errno(getpgrp());
9830  #endif
9831      case TARGET_NR_setsid:
9832          return get_errno(setsid());
9833  #ifdef TARGET_NR_sigaction
9834      case TARGET_NR_sigaction:
9835          {
9836  #if defined(TARGET_MIPS)
9837  	    struct target_sigaction act, oact, *pact, *old_act;
9838  
9839  	    if (arg2) {
9840                  if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9841                      return -TARGET_EFAULT;
9842  		act._sa_handler = old_act->_sa_handler;
9843  		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9844  		act.sa_flags = old_act->sa_flags;
9845  		unlock_user_struct(old_act, arg2, 0);
9846  		pact = &act;
9847  	    } else {
9848  		pact = NULL;
9849  	    }
9850  
9851          ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9852  
9853  	    if (!is_error(ret) && arg3) {
9854                  if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9855                      return -TARGET_EFAULT;
9856  		old_act->_sa_handler = oact._sa_handler;
9857  		old_act->sa_flags = oact.sa_flags;
9858  		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9859  		old_act->sa_mask.sig[1] = 0;
9860  		old_act->sa_mask.sig[2] = 0;
9861  		old_act->sa_mask.sig[3] = 0;
9862  		unlock_user_struct(old_act, arg3, 1);
9863  	    }
9864  #else
9865              struct target_old_sigaction *old_act;
9866              struct target_sigaction act, oact, *pact;
9867              if (arg2) {
9868                  if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9869                      return -TARGET_EFAULT;
9870                  act._sa_handler = old_act->_sa_handler;
9871                  target_siginitset(&act.sa_mask, old_act->sa_mask);
9872                  act.sa_flags = old_act->sa_flags;
9873  #ifdef TARGET_ARCH_HAS_SA_RESTORER
9874                  act.sa_restorer = old_act->sa_restorer;
9875  #endif
9876                  unlock_user_struct(old_act, arg2, 0);
9877                  pact = &act;
9878              } else {
9879                  pact = NULL;
9880              }
9881              ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9882              if (!is_error(ret) && arg3) {
9883                  if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9884                      return -TARGET_EFAULT;
9885                  old_act->_sa_handler = oact._sa_handler;
9886                  old_act->sa_mask = oact.sa_mask.sig[0];
9887                  old_act->sa_flags = oact.sa_flags;
9888  #ifdef TARGET_ARCH_HAS_SA_RESTORER
9889                  old_act->sa_restorer = oact.sa_restorer;
9890  #endif
9891                  unlock_user_struct(old_act, arg3, 1);
9892              }
9893  #endif
9894          }
9895          return ret;
9896  #endif
9897      case TARGET_NR_rt_sigaction:
9898          {
9899              /*
9900               * For Alpha and SPARC this is a 5 argument syscall, with
9901               * a 'restorer' parameter which must be copied into the
9902               * sa_restorer field of the sigaction struct.
9903               * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9904               * and arg5 is the sigsetsize.
9905               */
9906  #if defined(TARGET_ALPHA)
9907              target_ulong sigsetsize = arg4;
9908              target_ulong restorer = arg5;
9909  #elif defined(TARGET_SPARC)
9910              target_ulong restorer = arg4;
9911              target_ulong sigsetsize = arg5;
9912  #else
9913              target_ulong sigsetsize = arg4;
9914              target_ulong restorer = 0;
9915  #endif
9916              struct target_sigaction *act = NULL;
9917              struct target_sigaction *oact = NULL;
9918  
9919              if (sigsetsize != sizeof(target_sigset_t)) {
9920                  return -TARGET_EINVAL;
9921              }
9922              if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9923                  return -TARGET_EFAULT;
9924              }
9925              if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9926                  ret = -TARGET_EFAULT;
9927              } else {
9928                  ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9929                  if (oact) {
9930                      unlock_user_struct(oact, arg3, 1);
9931                  }
9932              }
9933              if (act) {
9934                  unlock_user_struct(act, arg2, 0);
9935              }
9936          }
9937          return ret;
9938  #ifdef TARGET_NR_sgetmask /* not on alpha */
9939      case TARGET_NR_sgetmask:
9940          {
9941              sigset_t cur_set;
9942              abi_ulong target_set;
9943              ret = do_sigprocmask(0, NULL, &cur_set);
9944              if (!ret) {
9945                  host_to_target_old_sigset(&target_set, &cur_set);
9946                  ret = target_set;
9947              }
9948          }
9949          return ret;
9950  #endif
9951  #ifdef TARGET_NR_ssetmask /* not on alpha */
9952      case TARGET_NR_ssetmask:
9953          {
9954              sigset_t set, oset;
9955              abi_ulong target_set = arg1;
9956              target_to_host_old_sigset(&set, &target_set);
9957              ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9958              if (!ret) {
9959                  host_to_target_old_sigset(&target_set, &oset);
9960                  ret = target_set;
9961              }
9962          }
9963          return ret;
9964  #endif
9965  #ifdef TARGET_NR_sigprocmask
9966      case TARGET_NR_sigprocmask:
9967          {
9968  #if defined(TARGET_ALPHA)
9969              sigset_t set, oldset;
9970              abi_ulong mask;
9971              int how;
9972  
9973              switch (arg1) {
9974              case TARGET_SIG_BLOCK:
9975                  how = SIG_BLOCK;
9976                  break;
9977              case TARGET_SIG_UNBLOCK:
9978                  how = SIG_UNBLOCK;
9979                  break;
9980              case TARGET_SIG_SETMASK:
9981                  how = SIG_SETMASK;
9982                  break;
9983              default:
9984                  return -TARGET_EINVAL;
9985              }
9986              mask = arg2;
9987              target_to_host_old_sigset(&set, &mask);
9988  
9989              ret = do_sigprocmask(how, &set, &oldset);
9990              if (!is_error(ret)) {
9991                  host_to_target_old_sigset(&mask, &oldset);
9992                  ret = mask;
9993                  cpu_env->ir[IR_V0] = 0; /* force no error */
9994              }
9995  #else
9996              sigset_t set, oldset, *set_ptr;
9997              int how;
9998  
9999              if (arg2) {
10000                  p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10001                  if (!p) {
10002                      return -TARGET_EFAULT;
10003                  }
10004                  target_to_host_old_sigset(&set, p);
10005                  unlock_user(p, arg2, 0);
10006                  set_ptr = &set;
10007                  switch (arg1) {
10008                  case TARGET_SIG_BLOCK:
10009                      how = SIG_BLOCK;
10010                      break;
10011                  case TARGET_SIG_UNBLOCK:
10012                      how = SIG_UNBLOCK;
10013                      break;
10014                  case TARGET_SIG_SETMASK:
10015                      how = SIG_SETMASK;
10016                      break;
10017                  default:
10018                      return -TARGET_EINVAL;
10019                  }
10020              } else {
10021                  how = 0;
10022                  set_ptr = NULL;
10023              }
10024              ret = do_sigprocmask(how, set_ptr, &oldset);
10025              if (!is_error(ret) && arg3) {
10026                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10027                      return -TARGET_EFAULT;
10028                  host_to_target_old_sigset(p, &oldset);
10029                  unlock_user(p, arg3, sizeof(target_sigset_t));
10030              }
10031  #endif
10032          }
10033          return ret;
10034  #endif
10035      case TARGET_NR_rt_sigprocmask:
10036          {
10037              int how = arg1;
10038              sigset_t set, oldset, *set_ptr;
10039  
10040              if (arg4 != sizeof(target_sigset_t)) {
10041                  return -TARGET_EINVAL;
10042              }
10043  
10044              if (arg2) {
10045                  p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10046                  if (!p) {
10047                      return -TARGET_EFAULT;
10048                  }
10049                  target_to_host_sigset(&set, p);
10050                  unlock_user(p, arg2, 0);
10051                  set_ptr = &set;
10052                  switch(how) {
10053                  case TARGET_SIG_BLOCK:
10054                      how = SIG_BLOCK;
10055                      break;
10056                  case TARGET_SIG_UNBLOCK:
10057                      how = SIG_UNBLOCK;
10058                      break;
10059                  case TARGET_SIG_SETMASK:
10060                      how = SIG_SETMASK;
10061                      break;
10062                  default:
10063                      return -TARGET_EINVAL;
10064                  }
10065              } else {
10066                  how = 0;
10067                  set_ptr = NULL;
10068              }
10069              ret = do_sigprocmask(how, set_ptr, &oldset);
10070              if (!is_error(ret) && arg3) {
10071                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10072                      return -TARGET_EFAULT;
10073                  host_to_target_sigset(p, &oldset);
10074                  unlock_user(p, arg3, sizeof(target_sigset_t));
10075              }
10076          }
10077          return ret;
10078  #ifdef TARGET_NR_sigpending
10079      case TARGET_NR_sigpending:
10080          {
10081              sigset_t set;
10082              ret = get_errno(sigpending(&set));
10083              if (!is_error(ret)) {
10084                  if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10085                      return -TARGET_EFAULT;
10086                  host_to_target_old_sigset(p, &set);
10087                  unlock_user(p, arg1, sizeof(target_sigset_t));
10088              }
10089          }
10090          return ret;
10091  #endif
10092      case TARGET_NR_rt_sigpending:
10093          {
10094              sigset_t set;
10095  
10096              /* Yes, this check is >, not != like most. We follow the kernel's
10097               * logic and it does it like this because it implements
10098               * NR_sigpending through the same code path, and in that case
10099               * the old_sigset_t is smaller in size.
10100               */
10101              if (arg2 > sizeof(target_sigset_t)) {
10102                  return -TARGET_EINVAL;
10103              }
10104  
10105              ret = get_errno(sigpending(&set));
10106              if (!is_error(ret)) {
10107                  if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10108                      return -TARGET_EFAULT;
10109                  host_to_target_sigset(p, &set);
10110                  unlock_user(p, arg1, sizeof(target_sigset_t));
10111              }
10112          }
10113          return ret;
10114  #ifdef TARGET_NR_sigsuspend
10115      case TARGET_NR_sigsuspend:
10116          {
10117              sigset_t *set;
10118  
10119  #if defined(TARGET_ALPHA)
10120              TaskState *ts = get_task_state(cpu);
10121              /* target_to_host_old_sigset will bswap back */
10122              abi_ulong mask = tswapal(arg1);
10123              set = &ts->sigsuspend_mask;
10124              target_to_host_old_sigset(set, &mask);
10125  #else
10126              ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10127              if (ret != 0) {
10128                  return ret;
10129              }
10130  #endif
10131              ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10132              finish_sigsuspend_mask(ret);
10133          }
10134          return ret;
10135  #endif
10136      case TARGET_NR_rt_sigsuspend:
10137          {
10138              sigset_t *set;
10139  
10140              ret = process_sigsuspend_mask(&set, arg1, arg2);
10141              if (ret != 0) {
10142                  return ret;
10143              }
10144              ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10145              finish_sigsuspend_mask(ret);
10146          }
10147          return ret;
10148  #ifdef TARGET_NR_rt_sigtimedwait
10149      case TARGET_NR_rt_sigtimedwait:
10150          {
10151              sigset_t set;
10152              struct timespec uts, *puts;
10153              siginfo_t uinfo;
10154  
10155              if (arg4 != sizeof(target_sigset_t)) {
10156                  return -TARGET_EINVAL;
10157              }
10158  
10159              if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10160                  return -TARGET_EFAULT;
10161              target_to_host_sigset(&set, p);
10162              unlock_user(p, arg1, 0);
10163              if (arg3) {
10164                  puts = &uts;
10165                  if (target_to_host_timespec(puts, arg3)) {
10166                      return -TARGET_EFAULT;
10167                  }
10168              } else {
10169                  puts = NULL;
10170              }
10171              ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10172                                                   SIGSET_T_SIZE));
10173              if (!is_error(ret)) {
10174                  if (arg2) {
10175                      p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10176                                    0);
10177                      if (!p) {
10178                          return -TARGET_EFAULT;
10179                      }
10180                      host_to_target_siginfo(p, &uinfo);
10181                      unlock_user(p, arg2, sizeof(target_siginfo_t));
10182                  }
10183                  ret = host_to_target_signal(ret);
10184              }
10185          }
10186          return ret;
10187  #endif
10188  #ifdef TARGET_NR_rt_sigtimedwait_time64
10189      case TARGET_NR_rt_sigtimedwait_time64:
10190          {
10191              sigset_t set;
10192              struct timespec uts, *puts;
10193              siginfo_t uinfo;
10194  
10195              if (arg4 != sizeof(target_sigset_t)) {
10196                  return -TARGET_EINVAL;
10197              }
10198  
10199              p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10200              if (!p) {
10201                  return -TARGET_EFAULT;
10202              }
10203              target_to_host_sigset(&set, p);
10204              unlock_user(p, arg1, 0);
10205              if (arg3) {
10206                  puts = &uts;
10207                  if (target_to_host_timespec64(puts, arg3)) {
10208                      return -TARGET_EFAULT;
10209                  }
10210              } else {
10211                  puts = NULL;
10212              }
10213              ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10214                                                   SIGSET_T_SIZE));
10215              if (!is_error(ret)) {
10216                  if (arg2) {
10217                      p = lock_user(VERIFY_WRITE, arg2,
10218                                    sizeof(target_siginfo_t), 0);
10219                      if (!p) {
10220                          return -TARGET_EFAULT;
10221                      }
10222                      host_to_target_siginfo(p, &uinfo);
10223                      unlock_user(p, arg2, sizeof(target_siginfo_t));
10224                  }
10225                  ret = host_to_target_signal(ret);
10226              }
10227          }
10228          return ret;
10229  #endif
10230      case TARGET_NR_rt_sigqueueinfo:
10231          {
10232              siginfo_t uinfo;
10233  
10234              p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10235              if (!p) {
10236                  return -TARGET_EFAULT;
10237              }
10238              target_to_host_siginfo(&uinfo, p);
10239              unlock_user(p, arg3, 0);
10240              ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10241          }
10242          return ret;
10243      case TARGET_NR_rt_tgsigqueueinfo:
10244          {
10245              siginfo_t uinfo;
10246  
10247              p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10248              if (!p) {
10249                  return -TARGET_EFAULT;
10250              }
10251              target_to_host_siginfo(&uinfo, p);
10252              unlock_user(p, arg4, 0);
10253              ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10254          }
10255          return ret;
10256  #ifdef TARGET_NR_sigreturn
10257      case TARGET_NR_sigreturn:
10258          if (block_signals()) {
10259              return -QEMU_ERESTARTSYS;
10260          }
10261          return do_sigreturn(cpu_env);
10262  #endif
10263      case TARGET_NR_rt_sigreturn:
10264          if (block_signals()) {
10265              return -QEMU_ERESTARTSYS;
10266          }
10267          return do_rt_sigreturn(cpu_env);
10268      case TARGET_NR_sethostname:
10269          if (!(p = lock_user_string(arg1)))
10270              return -TARGET_EFAULT;
10271          ret = get_errno(sethostname(p, arg2));
10272          unlock_user(p, arg1, 0);
10273          return ret;
10274  #ifdef TARGET_NR_setrlimit
10275      case TARGET_NR_setrlimit:
10276          {
10277              int resource = target_to_host_resource(arg1);
10278              struct target_rlimit *target_rlim;
10279              struct rlimit rlim;
10280              if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10281                  return -TARGET_EFAULT;
10282              rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10283              rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10284              unlock_user_struct(target_rlim, arg2, 0);
10285              /*
10286               * If we just passed through resource limit settings for memory then
10287               * they would also apply to QEMU's own allocations, and QEMU will
10288               * crash or hang or die if its allocations fail. Ideally we would
10289               * track the guest allocations in QEMU and apply the limits ourselves.
10290               * For now, just tell the guest the call succeeded but don't actually
10291               * limit anything.
10292               */
10293              if (resource != RLIMIT_AS &&
10294                  resource != RLIMIT_DATA &&
10295                  resource != RLIMIT_STACK) {
10296                  return get_errno(setrlimit(resource, &rlim));
10297              } else {
10298                  return 0;
10299              }
10300          }
10301  #endif
10302  #ifdef TARGET_NR_getrlimit
10303      case TARGET_NR_getrlimit:
10304          {
10305              int resource = target_to_host_resource(arg1);
10306              struct target_rlimit *target_rlim;
10307              struct rlimit rlim;
10308  
10309              ret = get_errno(getrlimit(resource, &rlim));
10310              if (!is_error(ret)) {
10311                  if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10312                      return -TARGET_EFAULT;
10313                  target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10314                  target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10315                  unlock_user_struct(target_rlim, arg2, 1);
10316              }
10317          }
10318          return ret;
10319  #endif
10320      case TARGET_NR_getrusage:
10321          {
10322              struct rusage rusage;
10323              ret = get_errno(getrusage(arg1, &rusage));
10324              if (!is_error(ret)) {
10325                  ret = host_to_target_rusage(arg2, &rusage);
10326              }
10327          }
10328          return ret;
10329  #if defined(TARGET_NR_gettimeofday)
10330      case TARGET_NR_gettimeofday:
10331          {
10332              struct timeval tv;
10333              struct timezone tz;
10334  
10335              ret = get_errno(gettimeofday(&tv, &tz));
10336              if (!is_error(ret)) {
10337                  if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10338                      return -TARGET_EFAULT;
10339                  }
10340                  if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10341                      return -TARGET_EFAULT;
10342                  }
10343              }
10344          }
10345          return ret;
10346  #endif
10347  #if defined(TARGET_NR_settimeofday)
10348      case TARGET_NR_settimeofday:
10349          {
10350              struct timeval tv, *ptv = NULL;
10351              struct timezone tz, *ptz = NULL;
10352  
10353              if (arg1) {
10354                  if (copy_from_user_timeval(&tv, arg1)) {
10355                      return -TARGET_EFAULT;
10356                  }
10357                  ptv = &tv;
10358              }
10359  
10360              if (arg2) {
10361                  if (copy_from_user_timezone(&tz, arg2)) {
10362                      return -TARGET_EFAULT;
10363                  }
10364                  ptz = &tz;
10365              }
10366  
10367              return get_errno(settimeofday(ptv, ptz));
10368          }
10369  #endif
10370  #if defined(TARGET_NR_select)
10371      case TARGET_NR_select:
10372  #if defined(TARGET_WANT_NI_OLD_SELECT)
10373          /* some architectures used to have old_select here
10374           * but now ENOSYS it.
10375           */
10376          ret = -TARGET_ENOSYS;
10377  #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10378          ret = do_old_select(arg1);
10379  #else
10380          ret = do_select(arg1, arg2, arg3, arg4, arg5);
10381  #endif
10382          return ret;
10383  #endif
10384  #ifdef TARGET_NR_pselect6
10385      case TARGET_NR_pselect6:
10386          return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10387  #endif
10388  #ifdef TARGET_NR_pselect6_time64
10389      case TARGET_NR_pselect6_time64:
10390          return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10391  #endif
10392  #ifdef TARGET_NR_symlink
10393      case TARGET_NR_symlink:
10394          {
10395              void *p2;
10396              p = lock_user_string(arg1);
10397              p2 = lock_user_string(arg2);
10398              if (!p || !p2)
10399                  ret = -TARGET_EFAULT;
10400              else
10401                  ret = get_errno(symlink(p, p2));
10402              unlock_user(p2, arg2, 0);
10403              unlock_user(p, arg1, 0);
10404          }
10405          return ret;
10406  #endif
10407  #if defined(TARGET_NR_symlinkat)
10408      case TARGET_NR_symlinkat:
10409          {
10410              void *p2;
10411              p  = lock_user_string(arg1);
10412              p2 = lock_user_string(arg3);
10413              if (!p || !p2)
10414                  ret = -TARGET_EFAULT;
10415              else
10416                  ret = get_errno(symlinkat(p, arg2, p2));
10417              unlock_user(p2, arg3, 0);
10418              unlock_user(p, arg1, 0);
10419          }
10420          return ret;
10421  #endif
10422  #ifdef TARGET_NR_readlink
10423      case TARGET_NR_readlink:
10424          {
10425              void *p2;
10426              p = lock_user_string(arg1);
10427              p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10428              ret = get_errno(do_guest_readlink(p, p2, arg3));
10429              unlock_user(p2, arg2, ret);
10430              unlock_user(p, arg1, 0);
10431          }
10432          return ret;
10433  #endif
10434  #if defined(TARGET_NR_readlinkat)
10435      case TARGET_NR_readlinkat:
10436          {
10437              void *p2;
10438              p  = lock_user_string(arg2);
10439              p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10440              if (!p || !p2) {
10441                  ret = -TARGET_EFAULT;
10442              } else if (!arg4) {
10443                  /* Short circuit this for the magic exe check. */
10444                  ret = -TARGET_EINVAL;
10445              } else if (is_proc_myself((const char *)p, "exe")) {
10446                  /*
10447                   * Don't worry about sign mismatch as earlier mapping
10448                   * logic would have thrown a bad address error.
10449                   */
10450                  ret = MIN(strlen(exec_path), arg4);
10451                  /* We cannot NUL terminate the string. */
10452                  memcpy(p2, exec_path, ret);
10453              } else {
10454                  ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10455              }
10456              unlock_user(p2, arg3, ret);
10457              unlock_user(p, arg2, 0);
10458          }
10459          return ret;
10460  #endif
10461  #ifdef TARGET_NR_swapon
10462      case TARGET_NR_swapon:
10463          if (!(p = lock_user_string(arg1)))
10464              return -TARGET_EFAULT;
10465          ret = get_errno(swapon(p, arg2));
10466          unlock_user(p, arg1, 0);
10467          return ret;
10468  #endif
10469      case TARGET_NR_reboot:
10470          if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10471             /* arg4 must be ignored in all other cases */
10472             p = lock_user_string(arg4);
10473             if (!p) {
10474                 return -TARGET_EFAULT;
10475             }
10476             ret = get_errno(reboot(arg1, arg2, arg3, p));
10477             unlock_user(p, arg4, 0);
10478          } else {
10479             ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10480          }
10481          return ret;
10482  #ifdef TARGET_NR_mmap
10483      case TARGET_NR_mmap:
10484  #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10485      (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10486      defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10487      || defined(TARGET_S390X)
10488          {
10489              abi_ulong *v;
10490              abi_ulong v1, v2, v3, v4, v5, v6;
10491              if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10492                  return -TARGET_EFAULT;
10493              v1 = tswapal(v[0]);
10494              v2 = tswapal(v[1]);
10495              v3 = tswapal(v[2]);
10496              v4 = tswapal(v[3]);
10497              v5 = tswapal(v[4]);
10498              v6 = tswapal(v[5]);
10499              unlock_user(v, arg1, 0);
10500              return do_mmap(v1, v2, v3, v4, v5, v6);
10501          }
10502  #else
10503          /* mmap pointers are always untagged */
10504          return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10505  #endif
10506  #endif
10507  #ifdef TARGET_NR_mmap2
10508      case TARGET_NR_mmap2:
10509  #ifndef MMAP_SHIFT
10510  #define MMAP_SHIFT 12
10511  #endif
10512          return do_mmap(arg1, arg2, arg3, arg4, arg5,
10513                         (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10514  #endif
10515      case TARGET_NR_munmap:
10516          arg1 = cpu_untagged_addr(cpu, arg1);
10517          return get_errno(target_munmap(arg1, arg2));
10518      case TARGET_NR_mprotect:
10519          arg1 = cpu_untagged_addr(cpu, arg1);
10520          {
10521              TaskState *ts = get_task_state(cpu);
10522              /* Special hack to detect libc making the stack executable.  */
10523              if ((arg3 & PROT_GROWSDOWN)
10524                  && arg1 >= ts->info->stack_limit
10525                  && arg1 <= ts->info->start_stack) {
10526                  arg3 &= ~PROT_GROWSDOWN;
10527                  arg2 = arg2 + arg1 - ts->info->stack_limit;
10528                  arg1 = ts->info->stack_limit;
10529              }
10530          }
10531          return get_errno(target_mprotect(arg1, arg2, arg3));
10532  #ifdef TARGET_NR_mremap
10533      case TARGET_NR_mremap:
10534          arg1 = cpu_untagged_addr(cpu, arg1);
10535          /* mremap new_addr (arg5) is always untagged */
10536          return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10537  #endif
10538          /* ??? msync/mlock/munlock are broken for softmmu.  */
10539  #ifdef TARGET_NR_msync
10540      case TARGET_NR_msync:
10541          return get_errno(msync(g2h(cpu, arg1), arg2,
10542                                 target_to_host_msync_arg(arg3)));
10543  #endif
10544  #ifdef TARGET_NR_mlock
10545      case TARGET_NR_mlock:
10546          return get_errno(mlock(g2h(cpu, arg1), arg2));
10547  #endif
10548  #ifdef TARGET_NR_munlock
10549      case TARGET_NR_munlock:
10550          return get_errno(munlock(g2h(cpu, arg1), arg2));
10551  #endif
10552  #ifdef TARGET_NR_mlockall
10553      case TARGET_NR_mlockall:
10554          return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10555  #endif
10556  #ifdef TARGET_NR_munlockall
10557      case TARGET_NR_munlockall:
10558          return get_errno(munlockall());
10559  #endif
10560  #ifdef TARGET_NR_truncate
10561      case TARGET_NR_truncate:
10562          if (!(p = lock_user_string(arg1)))
10563              return -TARGET_EFAULT;
10564          ret = get_errno(truncate(p, arg2));
10565          unlock_user(p, arg1, 0);
10566          return ret;
10567  #endif
10568  #ifdef TARGET_NR_ftruncate
10569      case TARGET_NR_ftruncate:
10570          return get_errno(ftruncate(arg1, arg2));
10571  #endif
10572      case TARGET_NR_fchmod:
10573          return get_errno(fchmod(arg1, arg2));
10574  #if defined(TARGET_NR_fchmodat)
10575      case TARGET_NR_fchmodat:
10576          if (!(p = lock_user_string(arg2)))
10577              return -TARGET_EFAULT;
10578          ret = get_errno(fchmodat(arg1, p, arg3, 0));
10579          unlock_user(p, arg2, 0);
10580          return ret;
10581  #endif
10582      case TARGET_NR_getpriority:
10583          /* Note that negative values are valid for getpriority, so we must
10584             differentiate based on errno settings.  */
10585          errno = 0;
10586          ret = getpriority(arg1, arg2);
10587          if (ret == -1 && errno != 0) {
10588              return -host_to_target_errno(errno);
10589          }
10590  #ifdef TARGET_ALPHA
10591          /* Return value is the unbiased priority.  Signal no error.  */
10592          cpu_env->ir[IR_V0] = 0;
10593  #else
10594          /* Return value is a biased priority to avoid negative numbers.  */
10595          ret = 20 - ret;
10596  #endif
10597          return ret;
10598      case TARGET_NR_setpriority:
10599          return get_errno(setpriority(arg1, arg2, arg3));
10600  #ifdef TARGET_NR_statfs
10601      case TARGET_NR_statfs:
10602          if (!(p = lock_user_string(arg1))) {
10603              return -TARGET_EFAULT;
10604          }
10605          ret = get_errno(statfs(path(p), &stfs));
10606          unlock_user(p, arg1, 0);
10607      convert_statfs:
10608          if (!is_error(ret)) {
10609              struct target_statfs *target_stfs;
10610  
10611              if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10612                  return -TARGET_EFAULT;
10613              __put_user(stfs.f_type, &target_stfs->f_type);
10614              __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10615              __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10616              __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10617              __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10618              __put_user(stfs.f_files, &target_stfs->f_files);
10619              __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10620              __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10621              __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10622              __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10623              __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10624  #ifdef _STATFS_F_FLAGS
10625              __put_user(stfs.f_flags, &target_stfs->f_flags);
10626  #else
10627              __put_user(0, &target_stfs->f_flags);
10628  #endif
10629              memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10630              unlock_user_struct(target_stfs, arg2, 1);
10631          }
10632          return ret;
10633  #endif
10634  #ifdef TARGET_NR_fstatfs
10635      case TARGET_NR_fstatfs:
10636          ret = get_errno(fstatfs(arg1, &stfs));
10637          goto convert_statfs;
10638  #endif
10639  #ifdef TARGET_NR_statfs64
10640      case TARGET_NR_statfs64:
10641          if (!(p = lock_user_string(arg1))) {
10642              return -TARGET_EFAULT;
10643          }
10644          ret = get_errno(statfs(path(p), &stfs));
10645          unlock_user(p, arg1, 0);
10646      convert_statfs64:
10647          if (!is_error(ret)) {
10648              struct target_statfs64 *target_stfs;
10649  
10650              if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10651                  return -TARGET_EFAULT;
10652              __put_user(stfs.f_type, &target_stfs->f_type);
10653              __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10654              __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10655              __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10656              __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10657              __put_user(stfs.f_files, &target_stfs->f_files);
10658              __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10659              __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10660              __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10661              __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10662              __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10663  #ifdef _STATFS_F_FLAGS
10664              __put_user(stfs.f_flags, &target_stfs->f_flags);
10665  #else
10666              __put_user(0, &target_stfs->f_flags);
10667  #endif
10668              memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10669              unlock_user_struct(target_stfs, arg3, 1);
10670          }
10671          return ret;
10672      case TARGET_NR_fstatfs64:
10673          ret = get_errno(fstatfs(arg1, &stfs));
10674          goto convert_statfs64;
10675  #endif
10676  #ifdef TARGET_NR_socketcall
10677      case TARGET_NR_socketcall:
10678          return do_socketcall(arg1, arg2);
10679  #endif
10680  #ifdef TARGET_NR_accept
10681      case TARGET_NR_accept:
10682          return do_accept4(arg1, arg2, arg3, 0);
10683  #endif
10684  #ifdef TARGET_NR_accept4
10685      case TARGET_NR_accept4:
10686          return do_accept4(arg1, arg2, arg3, arg4);
10687  #endif
10688  #ifdef TARGET_NR_bind
10689      case TARGET_NR_bind:
10690          return do_bind(arg1, arg2, arg3);
10691  #endif
10692  #ifdef TARGET_NR_connect
10693      case TARGET_NR_connect:
10694          return do_connect(arg1, arg2, arg3);
10695  #endif
10696  #ifdef TARGET_NR_getpeername
10697      case TARGET_NR_getpeername:
10698          return do_getpeername(arg1, arg2, arg3);
10699  #endif
10700  #ifdef TARGET_NR_getsockname
10701      case TARGET_NR_getsockname:
10702          return do_getsockname(arg1, arg2, arg3);
10703  #endif
10704  #ifdef TARGET_NR_getsockopt
10705      case TARGET_NR_getsockopt:
10706          return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10707  #endif
10708  #ifdef TARGET_NR_listen
10709      case TARGET_NR_listen:
10710          return get_errno(listen(arg1, arg2));
10711  #endif
10712  #ifdef TARGET_NR_recv
10713      case TARGET_NR_recv:
10714          return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10715  #endif
10716  #ifdef TARGET_NR_recvfrom
10717      case TARGET_NR_recvfrom:
10718          return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10719  #endif
10720  #ifdef TARGET_NR_recvmsg
10721      case TARGET_NR_recvmsg:
10722          return do_sendrecvmsg(arg1, arg2, arg3, 0);
10723  #endif
10724  #ifdef TARGET_NR_send
10725      case TARGET_NR_send:
10726          return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10727  #endif
10728  #ifdef TARGET_NR_sendmsg
10729      case TARGET_NR_sendmsg:
10730          return do_sendrecvmsg(arg1, arg2, arg3, 1);
10731  #endif
10732  #ifdef TARGET_NR_sendmmsg
10733      case TARGET_NR_sendmmsg:
10734          return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10735  #endif
10736  #ifdef TARGET_NR_recvmmsg
10737      case TARGET_NR_recvmmsg:
10738          return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10739  #endif
10740  #ifdef TARGET_NR_sendto
10741      case TARGET_NR_sendto:
10742          return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10743  #endif
10744  #ifdef TARGET_NR_shutdown
10745      case TARGET_NR_shutdown:
10746          return get_errno(shutdown(arg1, arg2));
10747  #endif
10748  #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10749      case TARGET_NR_getrandom:
10750          p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10751          if (!p) {
10752              return -TARGET_EFAULT;
10753          }
10754          ret = get_errno(getrandom(p, arg2, arg3));
10755          unlock_user(p, arg1, ret);
10756          return ret;
10757  #endif
10758  #ifdef TARGET_NR_socket
10759      case TARGET_NR_socket:
10760          return do_socket(arg1, arg2, arg3);
10761  #endif
10762  #ifdef TARGET_NR_socketpair
10763      case TARGET_NR_socketpair:
10764          return do_socketpair(arg1, arg2, arg3, arg4);
10765  #endif
10766  #ifdef TARGET_NR_setsockopt
10767      case TARGET_NR_setsockopt:
10768          return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10769  #endif
10770  #if defined(TARGET_NR_syslog)
10771      case TARGET_NR_syslog:
10772          {
10773              int len = arg2;
10774  
10775              switch (arg1) {
10776              case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10777              case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10778              case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10779              case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10780              case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10781              case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10782              case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10783              case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10784                  return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10785              case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10786              case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10787              case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10788                  {
10789                      if (len < 0) {
10790                          return -TARGET_EINVAL;
10791                      }
10792                      if (len == 0) {
10793                          return 0;
10794                      }
10795                      p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10796                      if (!p) {
10797                          return -TARGET_EFAULT;
10798                      }
10799                      ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10800                      unlock_user(p, arg2, arg3);
10801                  }
10802                  return ret;
10803              default:
10804                  return -TARGET_EINVAL;
10805              }
10806          }
10807          break;
10808  #endif
10809      case TARGET_NR_setitimer:
10810          {
10811              struct itimerval value, ovalue, *pvalue;
10812  
10813              if (arg2) {
10814                  pvalue = &value;
10815                  if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10816                      || copy_from_user_timeval(&pvalue->it_value,
10817                                                arg2 + sizeof(struct target_timeval)))
10818                      return -TARGET_EFAULT;
10819              } else {
10820                  pvalue = NULL;
10821              }
10822              ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10823              if (!is_error(ret) && arg3) {
10824                  if (copy_to_user_timeval(arg3,
10825                                           &ovalue.it_interval)
10826                      || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10827                                              &ovalue.it_value))
10828                      return -TARGET_EFAULT;
10829              }
10830          }
10831          return ret;
10832      case TARGET_NR_getitimer:
10833          {
10834              struct itimerval value;
10835  
10836              ret = get_errno(getitimer(arg1, &value));
10837              if (!is_error(ret) && arg2) {
10838                  if (copy_to_user_timeval(arg2,
10839                                           &value.it_interval)
10840                      || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10841                                              &value.it_value))
10842                      return -TARGET_EFAULT;
10843              }
10844          }
10845          return ret;
10846  #ifdef TARGET_NR_stat
10847      case TARGET_NR_stat:
10848          if (!(p = lock_user_string(arg1))) {
10849              return -TARGET_EFAULT;
10850          }
10851          ret = get_errno(stat(path(p), &st));
10852          unlock_user(p, arg1, 0);
10853          goto do_stat;
10854  #endif
10855  #ifdef TARGET_NR_lstat
10856      case TARGET_NR_lstat:
10857          if (!(p = lock_user_string(arg1))) {
10858              return -TARGET_EFAULT;
10859          }
10860          ret = get_errno(lstat(path(p), &st));
10861          unlock_user(p, arg1, 0);
10862          goto do_stat;
10863  #endif
10864  #ifdef TARGET_NR_fstat
10865      case TARGET_NR_fstat:
10866          {
10867              ret = get_errno(fstat(arg1, &st));
10868  #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10869          do_stat:
10870  #endif
10871              if (!is_error(ret)) {
10872                  struct target_stat *target_st;
10873  
10874                  if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10875                      return -TARGET_EFAULT;
10876                  memset(target_st, 0, sizeof(*target_st));
10877                  __put_user(st.st_dev, &target_st->st_dev);
10878                  __put_user(st.st_ino, &target_st->st_ino);
10879                  __put_user(st.st_mode, &target_st->st_mode);
10880                  __put_user(st.st_uid, &target_st->st_uid);
10881                  __put_user(st.st_gid, &target_st->st_gid);
10882                  __put_user(st.st_nlink, &target_st->st_nlink);
10883                  __put_user(st.st_rdev, &target_st->st_rdev);
10884                  __put_user(st.st_size, &target_st->st_size);
10885                  __put_user(st.st_blksize, &target_st->st_blksize);
10886                  __put_user(st.st_blocks, &target_st->st_blocks);
10887                  __put_user(st.st_atime, &target_st->target_st_atime);
10888                  __put_user(st.st_mtime, &target_st->target_st_mtime);
10889                  __put_user(st.st_ctime, &target_st->target_st_ctime);
10890  #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10891                  __put_user(st.st_atim.tv_nsec,
10892                             &target_st->target_st_atime_nsec);
10893                  __put_user(st.st_mtim.tv_nsec,
10894                             &target_st->target_st_mtime_nsec);
10895                  __put_user(st.st_ctim.tv_nsec,
10896                             &target_st->target_st_ctime_nsec);
10897  #endif
10898                  unlock_user_struct(target_st, arg2, 1);
10899              }
10900          }
10901          return ret;
10902  #endif
10903      case TARGET_NR_vhangup:
10904          return get_errno(vhangup());
10905  #ifdef TARGET_NR_syscall
10906      case TARGET_NR_syscall:
10907          return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10908                            arg6, arg7, arg8, 0);
10909  #endif
10910  #if defined(TARGET_NR_wait4)
10911      case TARGET_NR_wait4:
10912          {
10913              int status;
10914              abi_long status_ptr = arg2;
10915              struct rusage rusage, *rusage_ptr;
10916              abi_ulong target_rusage = arg4;
10917              abi_long rusage_err;
10918              if (target_rusage)
10919                  rusage_ptr = &rusage;
10920              else
10921                  rusage_ptr = NULL;
10922              ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10923              if (!is_error(ret)) {
10924                  if (status_ptr && ret) {
10925                      status = host_to_target_waitstatus(status);
10926                      if (put_user_s32(status, status_ptr))
10927                          return -TARGET_EFAULT;
10928                  }
10929                  if (target_rusage) {
10930                      rusage_err = host_to_target_rusage(target_rusage, &rusage);
10931                      if (rusage_err) {
10932                          ret = rusage_err;
10933                      }
10934                  }
10935              }
10936          }
10937          return ret;
10938  #endif
10939  #ifdef TARGET_NR_swapoff
10940      case TARGET_NR_swapoff:
10941          if (!(p = lock_user_string(arg1)))
10942              return -TARGET_EFAULT;
10943          ret = get_errno(swapoff(p));
10944          unlock_user(p, arg1, 0);
10945          return ret;
10946  #endif
10947      case TARGET_NR_sysinfo:
10948          {
10949              struct target_sysinfo *target_value;
10950              struct sysinfo value;
10951              ret = get_errno(sysinfo(&value));
10952              if (!is_error(ret) && arg1)
10953              {
10954                  if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10955                      return -TARGET_EFAULT;
10956                  __put_user(value.uptime, &target_value->uptime);
10957                  __put_user(value.loads[0], &target_value->loads[0]);
10958                  __put_user(value.loads[1], &target_value->loads[1]);
10959                  __put_user(value.loads[2], &target_value->loads[2]);
10960                  __put_user(value.totalram, &target_value->totalram);
10961                  __put_user(value.freeram, &target_value->freeram);
10962                  __put_user(value.sharedram, &target_value->sharedram);
10963                  __put_user(value.bufferram, &target_value->bufferram);
10964                  __put_user(value.totalswap, &target_value->totalswap);
10965                  __put_user(value.freeswap, &target_value->freeswap);
10966                  __put_user(value.procs, &target_value->procs);
10967                  __put_user(value.totalhigh, &target_value->totalhigh);
10968                  __put_user(value.freehigh, &target_value->freehigh);
10969                  __put_user(value.mem_unit, &target_value->mem_unit);
10970                  unlock_user_struct(target_value, arg1, 1);
10971              }
10972          }
10973          return ret;
10974  #ifdef TARGET_NR_ipc
10975      case TARGET_NR_ipc:
10976          return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10977  #endif
10978  #ifdef TARGET_NR_semget
10979      case TARGET_NR_semget:
10980          return get_errno(semget(arg1, arg2, arg3));
10981  #endif
10982  #ifdef TARGET_NR_semop
10983      case TARGET_NR_semop:
10984          return do_semtimedop(arg1, arg2, arg3, 0, false);
10985  #endif
10986  #ifdef TARGET_NR_semtimedop
10987      case TARGET_NR_semtimedop:
10988          return do_semtimedop(arg1, arg2, arg3, arg4, false);
10989  #endif
10990  #ifdef TARGET_NR_semtimedop_time64
10991      case TARGET_NR_semtimedop_time64:
10992          return do_semtimedop(arg1, arg2, arg3, arg4, true);
10993  #endif
10994  #ifdef TARGET_NR_semctl
10995      case TARGET_NR_semctl:
10996          return do_semctl(arg1, arg2, arg3, arg4);
10997  #endif
10998  #ifdef TARGET_NR_msgctl
10999      case TARGET_NR_msgctl:
11000          return do_msgctl(arg1, arg2, arg3);
11001  #endif
11002  #ifdef TARGET_NR_msgget
11003      case TARGET_NR_msgget:
11004          return get_errno(msgget(arg1, arg2));
11005  #endif
11006  #ifdef TARGET_NR_msgrcv
11007      case TARGET_NR_msgrcv:
11008          return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11009  #endif
11010  #ifdef TARGET_NR_msgsnd
11011      case TARGET_NR_msgsnd:
11012          return do_msgsnd(arg1, arg2, arg3, arg4);
11013  #endif
11014  #ifdef TARGET_NR_shmget
11015      case TARGET_NR_shmget:
11016          return get_errno(shmget(arg1, arg2, arg3));
11017  #endif
11018  #ifdef TARGET_NR_shmctl
11019      case TARGET_NR_shmctl:
11020          return do_shmctl(arg1, arg2, arg3);
11021  #endif
11022  #ifdef TARGET_NR_shmat
11023      case TARGET_NR_shmat:
11024          return target_shmat(cpu_env, arg1, arg2, arg3);
11025  #endif
11026  #ifdef TARGET_NR_shmdt
11027      case TARGET_NR_shmdt:
11028          return target_shmdt(arg1);
11029  #endif
11030      case TARGET_NR_fsync:
11031          return get_errno(fsync(arg1));
11032      case TARGET_NR_clone:
11033          /* Linux manages to have three different orderings for its
11034           * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11035           * match the kernel's CONFIG_CLONE_* settings.
11036           * Microblaze is further special in that it uses a sixth
11037           * implicit argument to clone for the TLS pointer.
11038           */
11039  #if defined(TARGET_MICROBLAZE)
11040          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11041  #elif defined(TARGET_CLONE_BACKWARDS)
11042          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11043  #elif defined(TARGET_CLONE_BACKWARDS2)
11044          ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11045  #else
11046          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11047  #endif
11048          return ret;
11049  #ifdef __NR_exit_group
11050          /* new thread calls */
11051      case TARGET_NR_exit_group:
11052          preexit_cleanup(cpu_env, arg1);
11053          return get_errno(exit_group(arg1));
11054  #endif
11055      case TARGET_NR_setdomainname:
11056          if (!(p = lock_user_string(arg1)))
11057              return -TARGET_EFAULT;
11058          ret = get_errno(setdomainname(p, arg2));
11059          unlock_user(p, arg1, 0);
11060          return ret;
11061      case TARGET_NR_uname:
11062          /* no need to transcode because we use the linux syscall */
11063          {
11064              struct new_utsname * buf;
11065  
11066              if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11067                  return -TARGET_EFAULT;
11068              ret = get_errno(sys_uname(buf));
11069              if (!is_error(ret)) {
11070                  /* Overwrite the native machine name with whatever is being
11071                     emulated. */
11072                  g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11073                            sizeof(buf->machine));
11074                  /* Allow the user to override the reported release.  */
11075                  if (qemu_uname_release && *qemu_uname_release) {
11076                      g_strlcpy(buf->release, qemu_uname_release,
11077                                sizeof(buf->release));
11078                  }
11079              }
11080              unlock_user_struct(buf, arg1, 1);
11081          }
11082          return ret;
11083  #ifdef TARGET_I386
11084      case TARGET_NR_modify_ldt:
11085          return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11086  #if !defined(TARGET_X86_64)
11087      case TARGET_NR_vm86:
11088          return do_vm86(cpu_env, arg1, arg2);
11089  #endif
11090  #endif
11091  #if defined(TARGET_NR_adjtimex)
11092      case TARGET_NR_adjtimex:
11093          {
11094              struct timex host_buf;
11095  
11096              if (target_to_host_timex(&host_buf, arg1) != 0) {
11097                  return -TARGET_EFAULT;
11098              }
11099              ret = get_errno(adjtimex(&host_buf));
11100              if (!is_error(ret)) {
11101                  if (host_to_target_timex(arg1, &host_buf) != 0) {
11102                      return -TARGET_EFAULT;
11103                  }
11104              }
11105          }
11106          return ret;
11107  #endif
11108  #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11109      case TARGET_NR_clock_adjtime:
11110          {
11111              struct timex htx;
11112  
11113              if (target_to_host_timex(&htx, arg2) != 0) {
11114                  return -TARGET_EFAULT;
11115              }
11116              ret = get_errno(clock_adjtime(arg1, &htx));
11117              if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11118                  return -TARGET_EFAULT;
11119              }
11120          }
11121          return ret;
11122  #endif
11123  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11124      case TARGET_NR_clock_adjtime64:
11125          {
11126              struct timex htx;
11127  
11128              if (target_to_host_timex64(&htx, arg2) != 0) {
11129                  return -TARGET_EFAULT;
11130              }
11131              ret = get_errno(clock_adjtime(arg1, &htx));
11132              if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11133                      return -TARGET_EFAULT;
11134              }
11135          }
11136          return ret;
11137  #endif
11138      case TARGET_NR_getpgid:
11139          return get_errno(getpgid(arg1));
11140      case TARGET_NR_fchdir:
11141          return get_errno(fchdir(arg1));
11142      case TARGET_NR_personality:
11143          return get_errno(personality(arg1));
11144  #ifdef TARGET_NR__llseek /* Not on alpha */
11145      case TARGET_NR__llseek:
11146          {
11147              int64_t res;
11148  #if !defined(__NR_llseek)
11149              res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11150              if (res == -1) {
11151                  ret = get_errno(res);
11152              } else {
11153                  ret = 0;
11154              }
11155  #else
11156              ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11157  #endif
11158              if ((ret == 0) && put_user_s64(res, arg4)) {
11159                  return -TARGET_EFAULT;
11160              }
11161          }
11162          return ret;
11163  #endif
11164  #ifdef TARGET_NR_getdents
11165      case TARGET_NR_getdents:
11166          return do_getdents(arg1, arg2, arg3);
11167  #endif /* TARGET_NR_getdents */
11168  #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11169      case TARGET_NR_getdents64:
11170          return do_getdents64(arg1, arg2, arg3);
11171  #endif /* TARGET_NR_getdents64 */
11172  #if defined(TARGET_NR__newselect)
11173      case TARGET_NR__newselect:
11174          return do_select(arg1, arg2, arg3, arg4, arg5);
11175  #endif
11176  #ifdef TARGET_NR_poll
11177      case TARGET_NR_poll:
11178          return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11179  #endif
11180  #ifdef TARGET_NR_ppoll
11181      case TARGET_NR_ppoll:
11182          return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11183  #endif
11184  #ifdef TARGET_NR_ppoll_time64
11185      case TARGET_NR_ppoll_time64:
11186          return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11187  #endif
11188      case TARGET_NR_flock:
11189          /* NOTE: the flock constant seems to be the same for every
11190             Linux platform */
11191          return get_errno(safe_flock(arg1, arg2));
11192      case TARGET_NR_readv:
11193          {
11194              struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11195              if (vec != NULL) {
11196                  ret = get_errno(safe_readv(arg1, vec, arg3));
11197                  unlock_iovec(vec, arg2, arg3, 1);
11198              } else {
11199                  ret = -host_to_target_errno(errno);
11200              }
11201          }
11202          return ret;
11203      case TARGET_NR_writev:
11204          {
11205              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11206              if (vec != NULL) {
11207                  ret = get_errno(safe_writev(arg1, vec, arg3));
11208                  unlock_iovec(vec, arg2, arg3, 0);
11209              } else {
11210                  ret = -host_to_target_errno(errno);
11211              }
11212          }
11213          return ret;
11214  #if defined(TARGET_NR_preadv)
11215      case TARGET_NR_preadv:
11216          {
11217              struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11218              if (vec != NULL) {
11219                  unsigned long low, high;
11220  
11221                  target_to_host_low_high(arg4, arg5, &low, &high);
11222                  ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11223                  unlock_iovec(vec, arg2, arg3, 1);
11224              } else {
11225                  ret = -host_to_target_errno(errno);
11226             }
11227          }
11228          return ret;
11229  #endif
11230  #if defined(TARGET_NR_pwritev)
11231      case TARGET_NR_pwritev:
11232          {
11233              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11234              if (vec != NULL) {
11235                  unsigned long low, high;
11236  
11237                  target_to_host_low_high(arg4, arg5, &low, &high);
11238                  ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11239                  unlock_iovec(vec, arg2, arg3, 0);
11240              } else {
11241                  ret = -host_to_target_errno(errno);
11242             }
11243          }
11244          return ret;
11245  #endif
11246      case TARGET_NR_getsid:
11247          return get_errno(getsid(arg1));
11248  #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11249      case TARGET_NR_fdatasync:
11250          return get_errno(fdatasync(arg1));
11251  #endif
11252      case TARGET_NR_sched_getaffinity:
11253          {
11254              unsigned int mask_size;
11255              unsigned long *mask;
11256  
11257              /*
11258               * sched_getaffinity needs multiples of ulong, so need to take
11259               * care of mismatches between target ulong and host ulong sizes.
11260               */
11261              if (arg2 & (sizeof(abi_ulong) - 1)) {
11262                  return -TARGET_EINVAL;
11263              }
11264              mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11265  
11266              mask = alloca(mask_size);
11267              memset(mask, 0, mask_size);
11268              ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11269  
11270              if (!is_error(ret)) {
11271                  if (ret > arg2) {
11272                      /* More data returned than the caller's buffer will fit.
11273                       * This only happens if sizeof(abi_long) < sizeof(long)
11274                       * and the caller passed us a buffer holding an odd number
11275                       * of abi_longs. If the host kernel is actually using the
11276                       * extra 4 bytes then fail EINVAL; otherwise we can just
11277                       * ignore them and only copy the interesting part.
11278                       */
11279                      int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11280                      if (numcpus > arg2 * 8) {
11281                          return -TARGET_EINVAL;
11282                      }
11283                      ret = arg2;
11284                  }
11285  
11286                  if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11287                      return -TARGET_EFAULT;
11288                  }
11289              }
11290          }
11291          return ret;
11292      case TARGET_NR_sched_setaffinity:
11293          {
11294              unsigned int mask_size;
11295              unsigned long *mask;
11296  
11297              /*
11298               * sched_setaffinity needs multiples of ulong, so need to take
11299               * care of mismatches between target ulong and host ulong sizes.
11300               */
11301              if (arg2 & (sizeof(abi_ulong) - 1)) {
11302                  return -TARGET_EINVAL;
11303              }
11304              mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11305              mask = alloca(mask_size);
11306  
11307              ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11308              if (ret) {
11309                  return ret;
11310              }
11311  
11312              return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11313          }
11314      case TARGET_NR_getcpu:
11315          {
11316              unsigned cpuid, node;
11317              ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11318                                         arg2 ? &node : NULL,
11319                                         NULL));
11320              if (is_error(ret)) {
11321                  return ret;
11322              }
11323              if (arg1 && put_user_u32(cpuid, arg1)) {
11324                  return -TARGET_EFAULT;
11325              }
11326              if (arg2 && put_user_u32(node, arg2)) {
11327                  return -TARGET_EFAULT;
11328              }
11329          }
11330          return ret;
11331      case TARGET_NR_sched_setparam:
11332          {
11333              struct target_sched_param *target_schp;
11334              struct sched_param schp;
11335  
11336              if (arg2 == 0) {
11337                  return -TARGET_EINVAL;
11338              }
11339              if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11340                  return -TARGET_EFAULT;
11341              }
11342              schp.sched_priority = tswap32(target_schp->sched_priority);
11343              unlock_user_struct(target_schp, arg2, 0);
11344              return get_errno(sys_sched_setparam(arg1, &schp));
11345          }
11346      case TARGET_NR_sched_getparam:
11347          {
11348              struct target_sched_param *target_schp;
11349              struct sched_param schp;
11350  
11351              if (arg2 == 0) {
11352                  return -TARGET_EINVAL;
11353              }
11354              ret = get_errno(sys_sched_getparam(arg1, &schp));
11355              if (!is_error(ret)) {
11356                  if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11357                      return -TARGET_EFAULT;
11358                  }
11359                  target_schp->sched_priority = tswap32(schp.sched_priority);
11360                  unlock_user_struct(target_schp, arg2, 1);
11361              }
11362          }
11363          return ret;
11364      case TARGET_NR_sched_setscheduler:
11365          {
11366              struct target_sched_param *target_schp;
11367              struct sched_param schp;
11368              if (arg3 == 0) {
11369                  return -TARGET_EINVAL;
11370              }
11371              if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11372                  return -TARGET_EFAULT;
11373              }
11374              schp.sched_priority = tswap32(target_schp->sched_priority);
11375              unlock_user_struct(target_schp, arg3, 0);
11376              return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11377          }
11378      case TARGET_NR_sched_getscheduler:
11379          return get_errno(sys_sched_getscheduler(arg1));
11380      case TARGET_NR_sched_getattr:
11381          {
11382              struct target_sched_attr *target_scha;
11383              struct sched_attr scha;
11384              if (arg2 == 0) {
11385                  return -TARGET_EINVAL;
11386              }
11387              if (arg3 > sizeof(scha)) {
11388                  arg3 = sizeof(scha);
11389              }
11390              ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11391              if (!is_error(ret)) {
11392                  target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11393                  if (!target_scha) {
11394                      return -TARGET_EFAULT;
11395                  }
11396                  target_scha->size = tswap32(scha.size);
11397                  target_scha->sched_policy = tswap32(scha.sched_policy);
11398                  target_scha->sched_flags = tswap64(scha.sched_flags);
11399                  target_scha->sched_nice = tswap32(scha.sched_nice);
11400                  target_scha->sched_priority = tswap32(scha.sched_priority);
11401                  target_scha->sched_runtime = tswap64(scha.sched_runtime);
11402                  target_scha->sched_deadline = tswap64(scha.sched_deadline);
11403                  target_scha->sched_period = tswap64(scha.sched_period);
11404                  if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11405                      target_scha->sched_util_min = tswap32(scha.sched_util_min);
11406                      target_scha->sched_util_max = tswap32(scha.sched_util_max);
11407                  }
11408                  unlock_user(target_scha, arg2, arg3);
11409              }
11410              return ret;
11411          }
11412      case TARGET_NR_sched_setattr:
11413          {
11414              struct target_sched_attr *target_scha;
11415              struct sched_attr scha;
11416              uint32_t size;
11417              int zeroed;
11418              if (arg2 == 0) {
11419                  return -TARGET_EINVAL;
11420              }
11421              if (get_user_u32(size, arg2)) {
11422                  return -TARGET_EFAULT;
11423              }
11424              if (!size) {
11425                  size = offsetof(struct target_sched_attr, sched_util_min);
11426              }
11427              if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11428                  if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11429                      return -TARGET_EFAULT;
11430                  }
11431                  return -TARGET_E2BIG;
11432              }
11433  
11434              zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11435              if (zeroed < 0) {
11436                  return zeroed;
11437              } else if (zeroed == 0) {
11438                  if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11439                      return -TARGET_EFAULT;
11440                  }
11441                  return -TARGET_E2BIG;
11442              }
11443              if (size > sizeof(struct target_sched_attr)) {
11444                  size = sizeof(struct target_sched_attr);
11445              }
11446  
11447              target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11448              if (!target_scha) {
11449                  return -TARGET_EFAULT;
11450              }
11451              scha.size = size;
11452              scha.sched_policy = tswap32(target_scha->sched_policy);
11453              scha.sched_flags = tswap64(target_scha->sched_flags);
11454              scha.sched_nice = tswap32(target_scha->sched_nice);
11455              scha.sched_priority = tswap32(target_scha->sched_priority);
11456              scha.sched_runtime = tswap64(target_scha->sched_runtime);
11457              scha.sched_deadline = tswap64(target_scha->sched_deadline);
11458              scha.sched_period = tswap64(target_scha->sched_period);
11459              if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11460                  scha.sched_util_min = tswap32(target_scha->sched_util_min);
11461                  scha.sched_util_max = tswap32(target_scha->sched_util_max);
11462              }
11463              unlock_user(target_scha, arg2, 0);
11464              return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11465          }
11466      case TARGET_NR_sched_yield:
11467          return get_errno(sched_yield());
11468      case TARGET_NR_sched_get_priority_max:
11469          return get_errno(sched_get_priority_max(arg1));
11470      case TARGET_NR_sched_get_priority_min:
11471          return get_errno(sched_get_priority_min(arg1));
11472  #ifdef TARGET_NR_sched_rr_get_interval
11473      case TARGET_NR_sched_rr_get_interval:
11474          {
11475              struct timespec ts;
11476              ret = get_errno(sched_rr_get_interval(arg1, &ts));
11477              if (!is_error(ret)) {
11478                  ret = host_to_target_timespec(arg2, &ts);
11479              }
11480          }
11481          return ret;
11482  #endif
11483  #ifdef TARGET_NR_sched_rr_get_interval_time64
11484      case TARGET_NR_sched_rr_get_interval_time64:
11485          {
11486              struct timespec ts;
11487              ret = get_errno(sched_rr_get_interval(arg1, &ts));
11488              if (!is_error(ret)) {
11489                  ret = host_to_target_timespec64(arg2, &ts);
11490              }
11491          }
11492          return ret;
11493  #endif
11494  #if defined(TARGET_NR_nanosleep)
11495      case TARGET_NR_nanosleep:
11496          {
11497              struct timespec req, rem;
11498              target_to_host_timespec(&req, arg1);
11499              ret = get_errno(safe_nanosleep(&req, &rem));
11500              if (is_error(ret) && arg2) {
11501                  host_to_target_timespec(arg2, &rem);
11502              }
11503          }
11504          return ret;
11505  #endif
11506      case TARGET_NR_prctl:
11507          return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11508          break;
11509  #ifdef TARGET_NR_arch_prctl
11510      case TARGET_NR_arch_prctl:
11511          return do_arch_prctl(cpu_env, arg1, arg2);
11512  #endif
11513  #ifdef TARGET_NR_pread64
11514      case TARGET_NR_pread64:
11515          if (regpairs_aligned(cpu_env, num)) {
11516              arg4 = arg5;
11517              arg5 = arg6;
11518          }
11519          if (arg2 == 0 && arg3 == 0) {
11520              /* Special-case NULL buffer and zero length, which should succeed */
11521              p = 0;
11522          } else {
11523              p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11524              if (!p) {
11525                  return -TARGET_EFAULT;
11526              }
11527          }
11528          ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11529          unlock_user(p, arg2, ret);
11530          return ret;
11531      case TARGET_NR_pwrite64:
11532          if (regpairs_aligned(cpu_env, num)) {
11533              arg4 = arg5;
11534              arg5 = arg6;
11535          }
11536          if (arg2 == 0 && arg3 == 0) {
11537              /* Special-case NULL buffer and zero length, which should succeed */
11538              p = 0;
11539          } else {
11540              p = lock_user(VERIFY_READ, arg2, arg3, 1);
11541              if (!p) {
11542                  return -TARGET_EFAULT;
11543              }
11544          }
11545          ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11546          unlock_user(p, arg2, 0);
11547          return ret;
11548  #endif
11549      case TARGET_NR_getcwd:
11550          if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11551              return -TARGET_EFAULT;
11552          ret = get_errno(sys_getcwd1(p, arg2));
11553          unlock_user(p, arg1, ret);
11554          return ret;
11555      case TARGET_NR_capget:
11556      case TARGET_NR_capset:
11557      {
11558          struct target_user_cap_header *target_header;
11559          struct target_user_cap_data *target_data = NULL;
11560          struct __user_cap_header_struct header;
11561          struct __user_cap_data_struct data[2];
11562          struct __user_cap_data_struct *dataptr = NULL;
11563          int i, target_datalen;
11564          int data_items = 1;
11565  
11566          if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11567              return -TARGET_EFAULT;
11568          }
11569          header.version = tswap32(target_header->version);
11570          header.pid = tswap32(target_header->pid);
11571  
11572          if (header.version != _LINUX_CAPABILITY_VERSION) {
11573              /* Version 2 and up takes pointer to two user_data structs */
11574              data_items = 2;
11575          }
11576  
11577          target_datalen = sizeof(*target_data) * data_items;
11578  
11579          if (arg2) {
11580              if (num == TARGET_NR_capget) {
11581                  target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11582              } else {
11583                  target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11584              }
11585              if (!target_data) {
11586                  unlock_user_struct(target_header, arg1, 0);
11587                  return -TARGET_EFAULT;
11588              }
11589  
11590              if (num == TARGET_NR_capset) {
11591                  for (i = 0; i < data_items; i++) {
11592                      data[i].effective = tswap32(target_data[i].effective);
11593                      data[i].permitted = tswap32(target_data[i].permitted);
11594                      data[i].inheritable = tswap32(target_data[i].inheritable);
11595                  }
11596              }
11597  
11598              dataptr = data;
11599          }
11600  
11601          if (num == TARGET_NR_capget) {
11602              ret = get_errno(capget(&header, dataptr));
11603          } else {
11604              ret = get_errno(capset(&header, dataptr));
11605          }
11606  
11607          /* The kernel always updates version for both capget and capset */
11608          target_header->version = tswap32(header.version);
11609          unlock_user_struct(target_header, arg1, 1);
11610  
11611          if (arg2) {
11612              if (num == TARGET_NR_capget) {
11613                  for (i = 0; i < data_items; i++) {
11614                      target_data[i].effective = tswap32(data[i].effective);
11615                      target_data[i].permitted = tswap32(data[i].permitted);
11616                      target_data[i].inheritable = tswap32(data[i].inheritable);
11617                  }
11618                  unlock_user(target_data, arg2, target_datalen);
11619              } else {
11620                  unlock_user(target_data, arg2, 0);
11621              }
11622          }
11623          return ret;
11624      }
11625      case TARGET_NR_sigaltstack:
11626          return do_sigaltstack(arg1, arg2, cpu_env);
11627  
11628  #ifdef CONFIG_SENDFILE
11629  #ifdef TARGET_NR_sendfile
11630      case TARGET_NR_sendfile:
11631      {
11632          off_t *offp = NULL;
11633          off_t off;
11634          if (arg3) {
11635              ret = get_user_sal(off, arg3);
11636              if (is_error(ret)) {
11637                  return ret;
11638              }
11639              offp = &off;
11640          }
11641          ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11642          if (!is_error(ret) && arg3) {
11643              abi_long ret2 = put_user_sal(off, arg3);
11644              if (is_error(ret2)) {
11645                  ret = ret2;
11646              }
11647          }
11648          return ret;
11649      }
11650  #endif
11651  #ifdef TARGET_NR_sendfile64
11652      case TARGET_NR_sendfile64:
11653      {
11654          off_t *offp = NULL;
11655          off_t off;
11656          if (arg3) {
11657              ret = get_user_s64(off, arg3);
11658              if (is_error(ret)) {
11659                  return ret;
11660              }
11661              offp = &off;
11662          }
11663          ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11664          if (!is_error(ret) && arg3) {
11665              abi_long ret2 = put_user_s64(off, arg3);
11666              if (is_error(ret2)) {
11667                  ret = ret2;
11668              }
11669          }
11670          return ret;
11671      }
11672  #endif
11673  #endif
11674  #ifdef TARGET_NR_vfork
11675      case TARGET_NR_vfork:
11676          return get_errno(do_fork(cpu_env,
11677                           CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11678                           0, 0, 0, 0));
11679  #endif
11680  #ifdef TARGET_NR_ugetrlimit
11681      case TARGET_NR_ugetrlimit:
11682      {
11683  	struct rlimit rlim;
11684  	int resource = target_to_host_resource(arg1);
11685  	ret = get_errno(getrlimit(resource, &rlim));
11686  	if (!is_error(ret)) {
11687  	    struct target_rlimit *target_rlim;
11688              if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11689                  return -TARGET_EFAULT;
11690  	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11691  	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11692              unlock_user_struct(target_rlim, arg2, 1);
11693  	}
11694          return ret;
11695      }
11696  #endif
11697  #ifdef TARGET_NR_truncate64
11698      case TARGET_NR_truncate64:
11699          if (!(p = lock_user_string(arg1)))
11700              return -TARGET_EFAULT;
11701  	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11702          unlock_user(p, arg1, 0);
11703          return ret;
11704  #endif
11705  #ifdef TARGET_NR_ftruncate64
11706      case TARGET_NR_ftruncate64:
11707          return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11708  #endif
11709  #ifdef TARGET_NR_stat64
11710      case TARGET_NR_stat64:
11711          if (!(p = lock_user_string(arg1))) {
11712              return -TARGET_EFAULT;
11713          }
11714          ret = get_errno(stat(path(p), &st));
11715          unlock_user(p, arg1, 0);
11716          if (!is_error(ret))
11717              ret = host_to_target_stat64(cpu_env, arg2, &st);
11718          return ret;
11719  #endif
11720  #ifdef TARGET_NR_lstat64
11721      case TARGET_NR_lstat64:
11722          if (!(p = lock_user_string(arg1))) {
11723              return -TARGET_EFAULT;
11724          }
11725          ret = get_errno(lstat(path(p), &st));
11726          unlock_user(p, arg1, 0);
11727          if (!is_error(ret))
11728              ret = host_to_target_stat64(cpu_env, arg2, &st);
11729          return ret;
11730  #endif
11731  #ifdef TARGET_NR_fstat64
11732      case TARGET_NR_fstat64:
11733          ret = get_errno(fstat(arg1, &st));
11734          if (!is_error(ret))
11735              ret = host_to_target_stat64(cpu_env, arg2, &st);
11736          return ret;
11737  #endif
11738  #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11739  #ifdef TARGET_NR_fstatat64
11740      case TARGET_NR_fstatat64:
11741  #endif
11742  #ifdef TARGET_NR_newfstatat
11743      case TARGET_NR_newfstatat:
11744  #endif
11745          if (!(p = lock_user_string(arg2))) {
11746              return -TARGET_EFAULT;
11747          }
11748          ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11749          unlock_user(p, arg2, 0);
11750          if (!is_error(ret))
11751              ret = host_to_target_stat64(cpu_env, arg3, &st);
11752          return ret;
11753  #endif
11754  #if defined(TARGET_NR_statx)
11755      case TARGET_NR_statx:
11756          {
11757              struct target_statx *target_stx;
11758              int dirfd = arg1;
11759              int flags = arg3;
11760  
11761              p = lock_user_string(arg2);
11762              if (p == NULL) {
11763                  return -TARGET_EFAULT;
11764              }
11765  #if defined(__NR_statx)
11766              {
11767                  /*
11768                   * It is assumed that struct statx is architecture independent.
11769                   */
11770                  struct target_statx host_stx;
11771                  int mask = arg4;
11772  
11773                  ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11774                  if (!is_error(ret)) {
11775                      if (host_to_target_statx(&host_stx, arg5) != 0) {
11776                          unlock_user(p, arg2, 0);
11777                          return -TARGET_EFAULT;
11778                      }
11779                  }
11780  
11781                  if (ret != -TARGET_ENOSYS) {
11782                      unlock_user(p, arg2, 0);
11783                      return ret;
11784                  }
11785              }
11786  #endif
11787              ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11788              unlock_user(p, arg2, 0);
11789  
11790              if (!is_error(ret)) {
11791                  if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11792                      return -TARGET_EFAULT;
11793                  }
11794                  memset(target_stx, 0, sizeof(*target_stx));
11795                  __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11796                  __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11797                  __put_user(st.st_ino, &target_stx->stx_ino);
11798                  __put_user(st.st_mode, &target_stx->stx_mode);
11799                  __put_user(st.st_uid, &target_stx->stx_uid);
11800                  __put_user(st.st_gid, &target_stx->stx_gid);
11801                  __put_user(st.st_nlink, &target_stx->stx_nlink);
11802                  __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11803                  __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11804                  __put_user(st.st_size, &target_stx->stx_size);
11805                  __put_user(st.st_blksize, &target_stx->stx_blksize);
11806                  __put_user(st.st_blocks, &target_stx->stx_blocks);
11807                  __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11808                  __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11809                  __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11810                  unlock_user_struct(target_stx, arg5, 1);
11811              }
11812          }
11813          return ret;
11814  #endif
11815  #ifdef TARGET_NR_lchown
11816      case TARGET_NR_lchown:
11817          if (!(p = lock_user_string(arg1)))
11818              return -TARGET_EFAULT;
11819          ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11820          unlock_user(p, arg1, 0);
11821          return ret;
11822  #endif
11823  #ifdef TARGET_NR_getuid
11824      case TARGET_NR_getuid:
11825          return get_errno(high2lowuid(getuid()));
11826  #endif
11827  #ifdef TARGET_NR_getgid
11828      case TARGET_NR_getgid:
11829          return get_errno(high2lowgid(getgid()));
11830  #endif
11831  #ifdef TARGET_NR_geteuid
11832      case TARGET_NR_geteuid:
11833          return get_errno(high2lowuid(geteuid()));
11834  #endif
11835  #ifdef TARGET_NR_getegid
11836      case TARGET_NR_getegid:
11837          return get_errno(high2lowgid(getegid()));
11838  #endif
11839      case TARGET_NR_setreuid:
11840          return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11841      case TARGET_NR_setregid:
11842          return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11843      case TARGET_NR_getgroups:
11844          { /* the same code as for TARGET_NR_getgroups32 */
11845              int gidsetsize = arg1;
11846              target_id *target_grouplist;
11847              g_autofree gid_t *grouplist = NULL;
11848              int i;
11849  
11850              if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11851                  return -TARGET_EINVAL;
11852              }
11853              if (gidsetsize > 0) {
11854                  grouplist = g_try_new(gid_t, gidsetsize);
11855                  if (!grouplist) {
11856                      return -TARGET_ENOMEM;
11857                  }
11858              }
11859              ret = get_errno(getgroups(gidsetsize, grouplist));
11860              if (!is_error(ret) && gidsetsize > 0) {
11861                  target_grouplist = lock_user(VERIFY_WRITE, arg2,
11862                                               gidsetsize * sizeof(target_id), 0);
11863                  if (!target_grouplist) {
11864                      return -TARGET_EFAULT;
11865                  }
11866                  for (i = 0; i < ret; i++) {
11867                      target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11868                  }
11869                  unlock_user(target_grouplist, arg2,
11870                              gidsetsize * sizeof(target_id));
11871              }
11872              return ret;
11873          }
11874      case TARGET_NR_setgroups:
11875          { /* the same code as for TARGET_NR_setgroups32 */
11876              int gidsetsize = arg1;
11877              target_id *target_grouplist;
11878              g_autofree gid_t *grouplist = NULL;
11879              int i;
11880  
11881              if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11882                  return -TARGET_EINVAL;
11883              }
11884              if (gidsetsize > 0) {
11885                  grouplist = g_try_new(gid_t, gidsetsize);
11886                  if (!grouplist) {
11887                      return -TARGET_ENOMEM;
11888                  }
11889                  target_grouplist = lock_user(VERIFY_READ, arg2,
11890                                               gidsetsize * sizeof(target_id), 1);
11891                  if (!target_grouplist) {
11892                      return -TARGET_EFAULT;
11893                  }
11894                  for (i = 0; i < gidsetsize; i++) {
11895                      grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11896                  }
11897                  unlock_user(target_grouplist, arg2,
11898                              gidsetsize * sizeof(target_id));
11899              }
11900              return get_errno(setgroups(gidsetsize, grouplist));
11901          }
11902      case TARGET_NR_fchown:
11903          return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11904  #if defined(TARGET_NR_fchownat)
11905      case TARGET_NR_fchownat:
11906          if (!(p = lock_user_string(arg2)))
11907              return -TARGET_EFAULT;
11908          ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11909                                   low2highgid(arg4), arg5));
11910          unlock_user(p, arg2, 0);
11911          return ret;
11912  #endif
11913  #ifdef TARGET_NR_setresuid
11914      case TARGET_NR_setresuid:
11915          return get_errno(sys_setresuid(low2highuid(arg1),
11916                                         low2highuid(arg2),
11917                                         low2highuid(arg3)));
11918  #endif
11919  #ifdef TARGET_NR_getresuid
11920      case TARGET_NR_getresuid:
11921          {
11922              uid_t ruid, euid, suid;
11923              ret = get_errno(getresuid(&ruid, &euid, &suid));
11924              if (!is_error(ret)) {
11925                  if (put_user_id(high2lowuid(ruid), arg1)
11926                      || put_user_id(high2lowuid(euid), arg2)
11927                      || put_user_id(high2lowuid(suid), arg3))
11928                      return -TARGET_EFAULT;
11929              }
11930          }
11931          return ret;
11932  #endif
11933  #ifdef TARGET_NR_getresgid
11934      case TARGET_NR_setresgid:
11935          return get_errno(sys_setresgid(low2highgid(arg1),
11936                                         low2highgid(arg2),
11937                                         low2highgid(arg3)));
11938  #endif
11939  #ifdef TARGET_NR_getresgid
11940      case TARGET_NR_getresgid:
11941          {
11942              gid_t rgid, egid, sgid;
11943              ret = get_errno(getresgid(&rgid, &egid, &sgid));
11944              if (!is_error(ret)) {
11945                  if (put_user_id(high2lowgid(rgid), arg1)
11946                      || put_user_id(high2lowgid(egid), arg2)
11947                      || put_user_id(high2lowgid(sgid), arg3))
11948                      return -TARGET_EFAULT;
11949              }
11950          }
11951          return ret;
11952  #endif
11953  #ifdef TARGET_NR_chown
11954      case TARGET_NR_chown:
11955          if (!(p = lock_user_string(arg1)))
11956              return -TARGET_EFAULT;
11957          ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11958          unlock_user(p, arg1, 0);
11959          return ret;
11960  #endif
11961      case TARGET_NR_setuid:
11962          return get_errno(sys_setuid(low2highuid(arg1)));
11963      case TARGET_NR_setgid:
11964          return get_errno(sys_setgid(low2highgid(arg1)));
11965      case TARGET_NR_setfsuid:
11966          return get_errno(setfsuid(arg1));
11967      case TARGET_NR_setfsgid:
11968          return get_errno(setfsgid(arg1));
11969  
11970  #ifdef TARGET_NR_lchown32
11971      case TARGET_NR_lchown32:
11972          if (!(p = lock_user_string(arg1)))
11973              return -TARGET_EFAULT;
11974          ret = get_errno(lchown(p, arg2, arg3));
11975          unlock_user(p, arg1, 0);
11976          return ret;
11977  #endif
11978  #ifdef TARGET_NR_getuid32
11979      case TARGET_NR_getuid32:
11980          return get_errno(getuid());
11981  #endif
11982  
11983  #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11984     /* Alpha specific */
11985      case TARGET_NR_getxuid:
11986           {
11987              uid_t euid;
11988              euid=geteuid();
11989              cpu_env->ir[IR_A4]=euid;
11990           }
11991          return get_errno(getuid());
11992  #endif
11993  #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11994     /* Alpha specific */
11995      case TARGET_NR_getxgid:
11996           {
11997              uid_t egid;
11998              egid=getegid();
11999              cpu_env->ir[IR_A4]=egid;
12000           }
12001          return get_errno(getgid());
12002  #endif
12003  #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12004      /* Alpha specific */
12005      case TARGET_NR_osf_getsysinfo:
12006          ret = -TARGET_EOPNOTSUPP;
12007          switch (arg1) {
12008            case TARGET_GSI_IEEE_FP_CONTROL:
12009              {
12010                  uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12011                  uint64_t swcr = cpu_env->swcr;
12012  
12013                  swcr &= ~SWCR_STATUS_MASK;
12014                  swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12015  
12016                  if (put_user_u64 (swcr, arg2))
12017                          return -TARGET_EFAULT;
12018                  ret = 0;
12019              }
12020              break;
12021  
12022            /* case GSI_IEEE_STATE_AT_SIGNAL:
12023               -- Not implemented in linux kernel.
12024               case GSI_UACPROC:
12025               -- Retrieves current unaligned access state; not much used.
12026               case GSI_PROC_TYPE:
12027               -- Retrieves implver information; surely not used.
12028               case GSI_GET_HWRPB:
12029               -- Grabs a copy of the HWRPB; surely not used.
12030            */
12031          }
12032          return ret;
12033  #endif
12034  #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12035      /* Alpha specific */
12036      case TARGET_NR_osf_setsysinfo:
12037          ret = -TARGET_EOPNOTSUPP;
12038          switch (arg1) {
12039            case TARGET_SSI_IEEE_FP_CONTROL:
12040              {
12041                  uint64_t swcr, fpcr;
12042  
12043                  if (get_user_u64 (swcr, arg2)) {
12044                      return -TARGET_EFAULT;
12045                  }
12046  
12047                  /*
12048                   * The kernel calls swcr_update_status to update the
12049                   * status bits from the fpcr at every point that it
12050                   * could be queried.  Therefore, we store the status
12051                   * bits only in FPCR.
12052                   */
12053                  cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12054  
12055                  fpcr = cpu_alpha_load_fpcr(cpu_env);
12056                  fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12057                  fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12058                  cpu_alpha_store_fpcr(cpu_env, fpcr);
12059                  ret = 0;
12060              }
12061              break;
12062  
12063            case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12064              {
12065                  uint64_t exc, fpcr, fex;
12066  
12067                  if (get_user_u64(exc, arg2)) {
12068                      return -TARGET_EFAULT;
12069                  }
12070                  exc &= SWCR_STATUS_MASK;
12071                  fpcr = cpu_alpha_load_fpcr(cpu_env);
12072  
12073                  /* Old exceptions are not signaled.  */
12074                  fex = alpha_ieee_fpcr_to_swcr(fpcr);
12075                  fex = exc & ~fex;
12076                  fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12077                  fex &= (cpu_env)->swcr;
12078  
12079                  /* Update the hardware fpcr.  */
12080                  fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12081                  cpu_alpha_store_fpcr(cpu_env, fpcr);
12082  
12083                  if (fex) {
12084                      int si_code = TARGET_FPE_FLTUNK;
12085                      target_siginfo_t info;
12086  
12087                      if (fex & SWCR_TRAP_ENABLE_DNO) {
12088                          si_code = TARGET_FPE_FLTUND;
12089                      }
12090                      if (fex & SWCR_TRAP_ENABLE_INE) {
12091                          si_code = TARGET_FPE_FLTRES;
12092                      }
12093                      if (fex & SWCR_TRAP_ENABLE_UNF) {
12094                          si_code = TARGET_FPE_FLTUND;
12095                      }
12096                      if (fex & SWCR_TRAP_ENABLE_OVF) {
12097                          si_code = TARGET_FPE_FLTOVF;
12098                      }
12099                      if (fex & SWCR_TRAP_ENABLE_DZE) {
12100                          si_code = TARGET_FPE_FLTDIV;
12101                      }
12102                      if (fex & SWCR_TRAP_ENABLE_INV) {
12103                          si_code = TARGET_FPE_FLTINV;
12104                      }
12105  
12106                      info.si_signo = SIGFPE;
12107                      info.si_errno = 0;
12108                      info.si_code = si_code;
12109                      info._sifields._sigfault._addr = (cpu_env)->pc;
12110                      queue_signal(cpu_env, info.si_signo,
12111                                   QEMU_SI_FAULT, &info);
12112                  }
12113                  ret = 0;
12114              }
12115              break;
12116  
12117            /* case SSI_NVPAIRS:
12118               -- Used with SSIN_UACPROC to enable unaligned accesses.
12119               case SSI_IEEE_STATE_AT_SIGNAL:
12120               case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12121               -- Not implemented in linux kernel
12122            */
12123          }
12124          return ret;
12125  #endif
12126  #ifdef TARGET_NR_osf_sigprocmask
12127      /* Alpha specific.  */
12128      case TARGET_NR_osf_sigprocmask:
12129          {
12130              abi_ulong mask;
12131              int how;
12132              sigset_t set, oldset;
12133  
12134              switch(arg1) {
12135              case TARGET_SIG_BLOCK:
12136                  how = SIG_BLOCK;
12137                  break;
12138              case TARGET_SIG_UNBLOCK:
12139                  how = SIG_UNBLOCK;
12140                  break;
12141              case TARGET_SIG_SETMASK:
12142                  how = SIG_SETMASK;
12143                  break;
12144              default:
12145                  return -TARGET_EINVAL;
12146              }
12147              mask = arg2;
12148              target_to_host_old_sigset(&set, &mask);
12149              ret = do_sigprocmask(how, &set, &oldset);
12150              if (!ret) {
12151                  host_to_target_old_sigset(&mask, &oldset);
12152                  ret = mask;
12153              }
12154          }
12155          return ret;
12156  #endif
12157  
12158  #ifdef TARGET_NR_getgid32
12159      case TARGET_NR_getgid32:
12160          return get_errno(getgid());
12161  #endif
12162  #ifdef TARGET_NR_geteuid32
12163      case TARGET_NR_geteuid32:
12164          return get_errno(geteuid());
12165  #endif
12166  #ifdef TARGET_NR_getegid32
12167      case TARGET_NR_getegid32:
12168          return get_errno(getegid());
12169  #endif
12170  #ifdef TARGET_NR_setreuid32
12171      case TARGET_NR_setreuid32:
12172          return get_errno(setreuid(arg1, arg2));
12173  #endif
12174  #ifdef TARGET_NR_setregid32
12175      case TARGET_NR_setregid32:
12176          return get_errno(setregid(arg1, arg2));
12177  #endif
12178  #ifdef TARGET_NR_getgroups32
12179      case TARGET_NR_getgroups32:
12180          { /* the same code as for TARGET_NR_getgroups */
12181              int gidsetsize = arg1;
12182              uint32_t *target_grouplist;
12183              g_autofree gid_t *grouplist = NULL;
12184              int i;
12185  
12186              if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12187                  return -TARGET_EINVAL;
12188              }
12189              if (gidsetsize > 0) {
12190                  grouplist = g_try_new(gid_t, gidsetsize);
12191                  if (!grouplist) {
12192                      return -TARGET_ENOMEM;
12193                  }
12194              }
12195              ret = get_errno(getgroups(gidsetsize, grouplist));
12196              if (!is_error(ret) && gidsetsize > 0) {
12197                  target_grouplist = lock_user(VERIFY_WRITE, arg2,
12198                                               gidsetsize * 4, 0);
12199                  if (!target_grouplist) {
12200                      return -TARGET_EFAULT;
12201                  }
12202                  for (i = 0; i < ret; i++) {
12203                      target_grouplist[i] = tswap32(grouplist[i]);
12204                  }
12205                  unlock_user(target_grouplist, arg2, gidsetsize * 4);
12206              }
12207              return ret;
12208          }
12209  #endif
12210  #ifdef TARGET_NR_setgroups32
12211      case TARGET_NR_setgroups32:
12212          { /* the same code as for TARGET_NR_setgroups */
12213              int gidsetsize = arg1;
12214              uint32_t *target_grouplist;
12215              g_autofree gid_t *grouplist = NULL;
12216              int i;
12217  
12218              if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12219                  return -TARGET_EINVAL;
12220              }
12221              if (gidsetsize > 0) {
12222                  grouplist = g_try_new(gid_t, gidsetsize);
12223                  if (!grouplist) {
12224                      return -TARGET_ENOMEM;
12225                  }
12226                  target_grouplist = lock_user(VERIFY_READ, arg2,
12227                                               gidsetsize * 4, 1);
12228                  if (!target_grouplist) {
12229                      return -TARGET_EFAULT;
12230                  }
12231                  for (i = 0; i < gidsetsize; i++) {
12232                      grouplist[i] = tswap32(target_grouplist[i]);
12233                  }
12234                  unlock_user(target_grouplist, arg2, 0);
12235              }
12236              return get_errno(setgroups(gidsetsize, grouplist));
12237          }
12238  #endif
12239  #ifdef TARGET_NR_fchown32
12240      case TARGET_NR_fchown32:
12241          return get_errno(fchown(arg1, arg2, arg3));
12242  #endif
12243  #ifdef TARGET_NR_setresuid32
12244      case TARGET_NR_setresuid32:
12245          return get_errno(sys_setresuid(arg1, arg2, arg3));
12246  #endif
12247  #ifdef TARGET_NR_getresuid32
12248      case TARGET_NR_getresuid32:
12249          {
12250              uid_t ruid, euid, suid;
12251              ret = get_errno(getresuid(&ruid, &euid, &suid));
12252              if (!is_error(ret)) {
12253                  if (put_user_u32(ruid, arg1)
12254                      || put_user_u32(euid, arg2)
12255                      || put_user_u32(suid, arg3))
12256                      return -TARGET_EFAULT;
12257              }
12258          }
12259          return ret;
12260  #endif
12261  #ifdef TARGET_NR_setresgid32
12262      case TARGET_NR_setresgid32:
12263          return get_errno(sys_setresgid(arg1, arg2, arg3));
12264  #endif
12265  #ifdef TARGET_NR_getresgid32
12266      case TARGET_NR_getresgid32:
12267          {
12268              gid_t rgid, egid, sgid;
12269              ret = get_errno(getresgid(&rgid, &egid, &sgid));
12270              if (!is_error(ret)) {
12271                  if (put_user_u32(rgid, arg1)
12272                      || put_user_u32(egid, arg2)
12273                      || put_user_u32(sgid, arg3))
12274                      return -TARGET_EFAULT;
12275              }
12276          }
12277          return ret;
12278  #endif
12279  #ifdef TARGET_NR_chown32
12280      case TARGET_NR_chown32:
12281          if (!(p = lock_user_string(arg1)))
12282              return -TARGET_EFAULT;
12283          ret = get_errno(chown(p, arg2, arg3));
12284          unlock_user(p, arg1, 0);
12285          return ret;
12286  #endif
12287  #ifdef TARGET_NR_setuid32
12288      case TARGET_NR_setuid32:
12289          return get_errno(sys_setuid(arg1));
12290  #endif
12291  #ifdef TARGET_NR_setgid32
12292      case TARGET_NR_setgid32:
12293          return get_errno(sys_setgid(arg1));
12294  #endif
12295  #ifdef TARGET_NR_setfsuid32
12296      case TARGET_NR_setfsuid32:
12297          return get_errno(setfsuid(arg1));
12298  #endif
12299  #ifdef TARGET_NR_setfsgid32
12300      case TARGET_NR_setfsgid32:
12301          return get_errno(setfsgid(arg1));
12302  #endif
12303  #ifdef TARGET_NR_mincore
12304      case TARGET_NR_mincore:
12305          {
12306              void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12307              if (!a) {
12308                  return -TARGET_ENOMEM;
12309              }
12310              p = lock_user_string(arg3);
12311              if (!p) {
12312                  ret = -TARGET_EFAULT;
12313              } else {
12314                  ret = get_errno(mincore(a, arg2, p));
12315                  unlock_user(p, arg3, ret);
12316              }
12317              unlock_user(a, arg1, 0);
12318          }
12319          return ret;
12320  #endif
12321  #ifdef TARGET_NR_arm_fadvise64_64
12322      case TARGET_NR_arm_fadvise64_64:
12323          /* arm_fadvise64_64 looks like fadvise64_64 but
12324           * with different argument order: fd, advice, offset, len
12325           * rather than the usual fd, offset, len, advice.
12326           * Note that offset and len are both 64-bit so appear as
12327           * pairs of 32-bit registers.
12328           */
12329          ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12330                              target_offset64(arg5, arg6), arg2);
12331          return -host_to_target_errno(ret);
12332  #endif
12333  
12334  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12335  
12336  #ifdef TARGET_NR_fadvise64_64
12337      case TARGET_NR_fadvise64_64:
12338  #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12339          /* 6 args: fd, advice, offset (high, low), len (high, low) */
12340          ret = arg2;
12341          arg2 = arg3;
12342          arg3 = arg4;
12343          arg4 = arg5;
12344          arg5 = arg6;
12345          arg6 = ret;
12346  #else
12347          /* 6 args: fd, offset (high, low), len (high, low), advice */
12348          if (regpairs_aligned(cpu_env, num)) {
12349              /* offset is in (3,4), len in (5,6) and advice in 7 */
12350              arg2 = arg3;
12351              arg3 = arg4;
12352              arg4 = arg5;
12353              arg5 = arg6;
12354              arg6 = arg7;
12355          }
12356  #endif
12357          ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12358                              target_offset64(arg4, arg5), arg6);
12359          return -host_to_target_errno(ret);
12360  #endif
12361  
12362  #ifdef TARGET_NR_fadvise64
12363      case TARGET_NR_fadvise64:
12364          /* 5 args: fd, offset (high, low), len, advice */
12365          if (regpairs_aligned(cpu_env, num)) {
12366              /* offset is in (3,4), len in 5 and advice in 6 */
12367              arg2 = arg3;
12368              arg3 = arg4;
12369              arg4 = arg5;
12370              arg5 = arg6;
12371          }
12372          ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12373          return -host_to_target_errno(ret);
12374  #endif
12375  
12376  #else /* not a 32-bit ABI */
12377  #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12378  #ifdef TARGET_NR_fadvise64_64
12379      case TARGET_NR_fadvise64_64:
12380  #endif
12381  #ifdef TARGET_NR_fadvise64
12382      case TARGET_NR_fadvise64:
12383  #endif
12384  #ifdef TARGET_S390X
12385          switch (arg4) {
12386          case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12387          case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12388          case 6: arg4 = POSIX_FADV_DONTNEED; break;
12389          case 7: arg4 = POSIX_FADV_NOREUSE; break;
12390          default: break;
12391          }
12392  #endif
12393          return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12394  #endif
12395  #endif /* end of 64-bit ABI fadvise handling */
12396  
12397  #ifdef TARGET_NR_madvise
12398      case TARGET_NR_madvise:
12399          return target_madvise(arg1, arg2, arg3);
12400  #endif
12401  #ifdef TARGET_NR_fcntl64
12402      case TARGET_NR_fcntl64:
12403      {
12404          int cmd;
12405          struct flock64 fl;
12406          from_flock64_fn *copyfrom = copy_from_user_flock64;
12407          to_flock64_fn *copyto = copy_to_user_flock64;
12408  
12409  #ifdef TARGET_ARM
12410          if (!cpu_env->eabi) {
12411              copyfrom = copy_from_user_oabi_flock64;
12412              copyto = copy_to_user_oabi_flock64;
12413          }
12414  #endif
12415  
12416          cmd = target_to_host_fcntl_cmd(arg2);
12417          if (cmd == -TARGET_EINVAL) {
12418              return cmd;
12419          }
12420  
12421          switch(arg2) {
12422          case TARGET_F_GETLK64:
12423              ret = copyfrom(&fl, arg3);
12424              if (ret) {
12425                  break;
12426              }
12427              ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12428              if (ret == 0) {
12429                  ret = copyto(arg3, &fl);
12430              }
12431  	    break;
12432  
12433          case TARGET_F_SETLK64:
12434          case TARGET_F_SETLKW64:
12435              ret = copyfrom(&fl, arg3);
12436              if (ret) {
12437                  break;
12438              }
12439              ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12440  	    break;
12441          default:
12442              ret = do_fcntl(arg1, arg2, arg3);
12443              break;
12444          }
12445          return ret;
12446      }
12447  #endif
12448  #ifdef TARGET_NR_cacheflush
12449      case TARGET_NR_cacheflush:
12450          /* self-modifying code is handled automatically, so nothing needed */
12451          return 0;
12452  #endif
12453  #ifdef TARGET_NR_getpagesize
12454      case TARGET_NR_getpagesize:
12455          return TARGET_PAGE_SIZE;
12456  #endif
12457      case TARGET_NR_gettid:
12458          return get_errno(sys_gettid());
12459  #ifdef TARGET_NR_readahead
12460      case TARGET_NR_readahead:
12461  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12462          if (regpairs_aligned(cpu_env, num)) {
12463              arg2 = arg3;
12464              arg3 = arg4;
12465              arg4 = arg5;
12466          }
12467          ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12468  #else
12469          ret = get_errno(readahead(arg1, arg2, arg3));
12470  #endif
12471          return ret;
12472  #endif
12473  #ifdef CONFIG_ATTR
12474  #ifdef TARGET_NR_setxattr
12475      case TARGET_NR_listxattr:
12476      case TARGET_NR_llistxattr:
12477      {
12478          void *b = 0;
12479          if (arg2) {
12480              b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12481              if (!b) {
12482                  return -TARGET_EFAULT;
12483              }
12484          }
12485          p = lock_user_string(arg1);
12486          if (p) {
12487              if (num == TARGET_NR_listxattr) {
12488                  ret = get_errno(listxattr(p, b, arg3));
12489              } else {
12490                  ret = get_errno(llistxattr(p, b, arg3));
12491              }
12492          } else {
12493              ret = -TARGET_EFAULT;
12494          }
12495          unlock_user(p, arg1, 0);
12496          unlock_user(b, arg2, arg3);
12497          return ret;
12498      }
12499      case TARGET_NR_flistxattr:
12500      {
12501          void *b = 0;
12502          if (arg2) {
12503              b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12504              if (!b) {
12505                  return -TARGET_EFAULT;
12506              }
12507          }
12508          ret = get_errno(flistxattr(arg1, b, arg3));
12509          unlock_user(b, arg2, arg3);
12510          return ret;
12511      }
12512      case TARGET_NR_setxattr:
12513      case TARGET_NR_lsetxattr:
12514          {
12515              void *n, *v = 0;
12516              if (arg3) {
12517                  v = lock_user(VERIFY_READ, arg3, arg4, 1);
12518                  if (!v) {
12519                      return -TARGET_EFAULT;
12520                  }
12521              }
12522              p = lock_user_string(arg1);
12523              n = lock_user_string(arg2);
12524              if (p && n) {
12525                  if (num == TARGET_NR_setxattr) {
12526                      ret = get_errno(setxattr(p, n, v, arg4, arg5));
12527                  } else {
12528                      ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12529                  }
12530              } else {
12531                  ret = -TARGET_EFAULT;
12532              }
12533              unlock_user(p, arg1, 0);
12534              unlock_user(n, arg2, 0);
12535              unlock_user(v, arg3, 0);
12536          }
12537          return ret;
12538      case TARGET_NR_fsetxattr:
12539          {
12540              void *n, *v = 0;
12541              if (arg3) {
12542                  v = lock_user(VERIFY_READ, arg3, arg4, 1);
12543                  if (!v) {
12544                      return -TARGET_EFAULT;
12545                  }
12546              }
12547              n = lock_user_string(arg2);
12548              if (n) {
12549                  ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12550              } else {
12551                  ret = -TARGET_EFAULT;
12552              }
12553              unlock_user(n, arg2, 0);
12554              unlock_user(v, arg3, 0);
12555          }
12556          return ret;
12557      case TARGET_NR_getxattr:
12558      case TARGET_NR_lgetxattr:
12559          {
12560              void *n, *v = 0;
12561              if (arg3) {
12562                  v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12563                  if (!v) {
12564                      return -TARGET_EFAULT;
12565                  }
12566              }
12567              p = lock_user_string(arg1);
12568              n = lock_user_string(arg2);
12569              if (p && n) {
12570                  if (num == TARGET_NR_getxattr) {
12571                      ret = get_errno(getxattr(p, n, v, arg4));
12572                  } else {
12573                      ret = get_errno(lgetxattr(p, n, v, arg4));
12574                  }
12575              } else {
12576                  ret = -TARGET_EFAULT;
12577              }
12578              unlock_user(p, arg1, 0);
12579              unlock_user(n, arg2, 0);
12580              unlock_user(v, arg3, arg4);
12581          }
12582          return ret;
12583      case TARGET_NR_fgetxattr:
12584          {
12585              void *n, *v = 0;
12586              if (arg3) {
12587                  v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12588                  if (!v) {
12589                      return -TARGET_EFAULT;
12590                  }
12591              }
12592              n = lock_user_string(arg2);
12593              if (n) {
12594                  ret = get_errno(fgetxattr(arg1, n, v, arg4));
12595              } else {
12596                  ret = -TARGET_EFAULT;
12597              }
12598              unlock_user(n, arg2, 0);
12599              unlock_user(v, arg3, arg4);
12600          }
12601          return ret;
12602      case TARGET_NR_removexattr:
12603      case TARGET_NR_lremovexattr:
12604          {
12605              void *n;
12606              p = lock_user_string(arg1);
12607              n = lock_user_string(arg2);
12608              if (p && n) {
12609                  if (num == TARGET_NR_removexattr) {
12610                      ret = get_errno(removexattr(p, n));
12611                  } else {
12612                      ret = get_errno(lremovexattr(p, n));
12613                  }
12614              } else {
12615                  ret = -TARGET_EFAULT;
12616              }
12617              unlock_user(p, arg1, 0);
12618              unlock_user(n, arg2, 0);
12619          }
12620          return ret;
12621      case TARGET_NR_fremovexattr:
12622          {
12623              void *n;
12624              n = lock_user_string(arg2);
12625              if (n) {
12626                  ret = get_errno(fremovexattr(arg1, n));
12627              } else {
12628                  ret = -TARGET_EFAULT;
12629              }
12630              unlock_user(n, arg2, 0);
12631          }
12632          return ret;
12633  #endif
12634  #endif /* CONFIG_ATTR */
12635  #ifdef TARGET_NR_set_thread_area
12636      case TARGET_NR_set_thread_area:
12637  #if defined(TARGET_MIPS)
12638        cpu_env->active_tc.CP0_UserLocal = arg1;
12639        return 0;
12640  #elif defined(TARGET_CRIS)
12641        if (arg1 & 0xff)
12642            ret = -TARGET_EINVAL;
12643        else {
12644            cpu_env->pregs[PR_PID] = arg1;
12645            ret = 0;
12646        }
12647        return ret;
12648  #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12649        return do_set_thread_area(cpu_env, arg1);
12650  #elif defined(TARGET_M68K)
12651        {
12652            TaskState *ts = get_task_state(cpu);
12653            ts->tp_value = arg1;
12654            return 0;
12655        }
12656  #else
12657        return -TARGET_ENOSYS;
12658  #endif
12659  #endif
12660  #ifdef TARGET_NR_get_thread_area
12661      case TARGET_NR_get_thread_area:
12662  #if defined(TARGET_I386) && defined(TARGET_ABI32)
12663          return do_get_thread_area(cpu_env, arg1);
12664  #elif defined(TARGET_M68K)
12665          {
12666              TaskState *ts = get_task_state(cpu);
12667              return ts->tp_value;
12668          }
12669  #else
12670          return -TARGET_ENOSYS;
12671  #endif
12672  #endif
12673  #ifdef TARGET_NR_getdomainname
12674      case TARGET_NR_getdomainname:
12675          return -TARGET_ENOSYS;
12676  #endif
12677  
12678  #ifdef TARGET_NR_clock_settime
12679      case TARGET_NR_clock_settime:
12680      {
12681          struct timespec ts;
12682  
12683          ret = target_to_host_timespec(&ts, arg2);
12684          if (!is_error(ret)) {
12685              ret = get_errno(clock_settime(arg1, &ts));
12686          }
12687          return ret;
12688      }
12689  #endif
12690  #ifdef TARGET_NR_clock_settime64
12691      case TARGET_NR_clock_settime64:
12692      {
12693          struct timespec ts;
12694  
12695          ret = target_to_host_timespec64(&ts, arg2);
12696          if (!is_error(ret)) {
12697              ret = get_errno(clock_settime(arg1, &ts));
12698          }
12699          return ret;
12700      }
12701  #endif
12702  #ifdef TARGET_NR_clock_gettime
12703      case TARGET_NR_clock_gettime:
12704      {
12705          struct timespec ts;
12706          ret = get_errno(clock_gettime(arg1, &ts));
12707          if (!is_error(ret)) {
12708              ret = host_to_target_timespec(arg2, &ts);
12709          }
12710          return ret;
12711      }
12712  #endif
12713  #ifdef TARGET_NR_clock_gettime64
12714      case TARGET_NR_clock_gettime64:
12715      {
12716          struct timespec ts;
12717          ret = get_errno(clock_gettime(arg1, &ts));
12718          if (!is_error(ret)) {
12719              ret = host_to_target_timespec64(arg2, &ts);
12720          }
12721          return ret;
12722      }
12723  #endif
12724  #ifdef TARGET_NR_clock_getres
12725      case TARGET_NR_clock_getres:
12726      {
12727          struct timespec ts;
12728          ret = get_errno(clock_getres(arg1, &ts));
12729          if (!is_error(ret)) {
12730              host_to_target_timespec(arg2, &ts);
12731          }
12732          return ret;
12733      }
12734  #endif
12735  #ifdef TARGET_NR_clock_getres_time64
12736      case TARGET_NR_clock_getres_time64:
12737      {
12738          struct timespec ts;
12739          ret = get_errno(clock_getres(arg1, &ts));
12740          if (!is_error(ret)) {
12741              host_to_target_timespec64(arg2, &ts);
12742          }
12743          return ret;
12744      }
12745  #endif
12746  #ifdef TARGET_NR_clock_nanosleep
12747      case TARGET_NR_clock_nanosleep:
12748      {
12749          struct timespec ts;
12750          if (target_to_host_timespec(&ts, arg3)) {
12751              return -TARGET_EFAULT;
12752          }
12753          ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12754                                               &ts, arg4 ? &ts : NULL));
12755          /*
12756           * if the call is interrupted by a signal handler, it fails
12757           * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12758           * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12759           */
12760          if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12761              host_to_target_timespec(arg4, &ts)) {
12762                return -TARGET_EFAULT;
12763          }
12764  
12765          return ret;
12766      }
12767  #endif
12768  #ifdef TARGET_NR_clock_nanosleep_time64
12769      case TARGET_NR_clock_nanosleep_time64:
12770      {
12771          struct timespec ts;
12772  
12773          if (target_to_host_timespec64(&ts, arg3)) {
12774              return -TARGET_EFAULT;
12775          }
12776  
12777          ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12778                                               &ts, arg4 ? &ts : NULL));
12779  
12780          if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12781              host_to_target_timespec64(arg4, &ts)) {
12782              return -TARGET_EFAULT;
12783          }
12784          return ret;
12785      }
12786  #endif
12787  
12788  #if defined(TARGET_NR_set_tid_address)
12789      case TARGET_NR_set_tid_address:
12790      {
12791          TaskState *ts = get_task_state(cpu);
12792          ts->child_tidptr = arg1;
12793          /* do not call host set_tid_address() syscall, instead return tid() */
12794          return get_errno(sys_gettid());
12795      }
12796  #endif
12797  
12798      case TARGET_NR_tkill:
12799          return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12800  
12801      case TARGET_NR_tgkill:
12802          return get_errno(safe_tgkill((int)arg1, (int)arg2,
12803                           target_to_host_signal(arg3)));
12804  
12805  #ifdef TARGET_NR_set_robust_list
12806      case TARGET_NR_set_robust_list:
12807      case TARGET_NR_get_robust_list:
12808          /* The ABI for supporting robust futexes has userspace pass
12809           * the kernel a pointer to a linked list which is updated by
12810           * userspace after the syscall; the list is walked by the kernel
12811           * when the thread exits. Since the linked list in QEMU guest
12812           * memory isn't a valid linked list for the host and we have
12813           * no way to reliably intercept the thread-death event, we can't
12814           * support these. Silently return ENOSYS so that guest userspace
12815           * falls back to a non-robust futex implementation (which should
12816           * be OK except in the corner case of the guest crashing while
12817           * holding a mutex that is shared with another process via
12818           * shared memory).
12819           */
12820          return -TARGET_ENOSYS;
12821  #endif
12822  
12823  #if defined(TARGET_NR_utimensat)
12824      case TARGET_NR_utimensat:
12825          {
12826              struct timespec *tsp, ts[2];
12827              if (!arg3) {
12828                  tsp = NULL;
12829              } else {
12830                  if (target_to_host_timespec(ts, arg3)) {
12831                      return -TARGET_EFAULT;
12832                  }
12833                  if (target_to_host_timespec(ts + 1, arg3 +
12834                                              sizeof(struct target_timespec))) {
12835                      return -TARGET_EFAULT;
12836                  }
12837                  tsp = ts;
12838              }
12839              if (!arg2)
12840                  ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12841              else {
12842                  if (!(p = lock_user_string(arg2))) {
12843                      return -TARGET_EFAULT;
12844                  }
12845                  ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12846                  unlock_user(p, arg2, 0);
12847              }
12848          }
12849          return ret;
12850  #endif
12851  #ifdef TARGET_NR_utimensat_time64
12852      case TARGET_NR_utimensat_time64:
12853          {
12854              struct timespec *tsp, ts[2];
12855              if (!arg3) {
12856                  tsp = NULL;
12857              } else {
12858                  if (target_to_host_timespec64(ts, arg3)) {
12859                      return -TARGET_EFAULT;
12860                  }
12861                  if (target_to_host_timespec64(ts + 1, arg3 +
12862                                       sizeof(struct target__kernel_timespec))) {
12863                      return -TARGET_EFAULT;
12864                  }
12865                  tsp = ts;
12866              }
12867              if (!arg2)
12868                  ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12869              else {
12870                  p = lock_user_string(arg2);
12871                  if (!p) {
12872                      return -TARGET_EFAULT;
12873                  }
12874                  ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12875                  unlock_user(p, arg2, 0);
12876              }
12877          }
12878          return ret;
12879  #endif
12880  #ifdef TARGET_NR_futex
12881      case TARGET_NR_futex:
12882          return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12883  #endif
12884  #ifdef TARGET_NR_futex_time64
12885      case TARGET_NR_futex_time64:
12886          return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12887  #endif
12888  #ifdef CONFIG_INOTIFY
12889  #if defined(TARGET_NR_inotify_init)
12890      case TARGET_NR_inotify_init:
12891          ret = get_errno(inotify_init());
12892          if (ret >= 0) {
12893              fd_trans_register(ret, &target_inotify_trans);
12894          }
12895          return ret;
12896  #endif
12897  #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12898      case TARGET_NR_inotify_init1:
12899          ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12900                                            fcntl_flags_tbl)));
12901          if (ret >= 0) {
12902              fd_trans_register(ret, &target_inotify_trans);
12903          }
12904          return ret;
12905  #endif
12906  #if defined(TARGET_NR_inotify_add_watch)
12907      case TARGET_NR_inotify_add_watch:
12908          p = lock_user_string(arg2);
12909          ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12910          unlock_user(p, arg2, 0);
12911          return ret;
12912  #endif
12913  #if defined(TARGET_NR_inotify_rm_watch)
12914      case TARGET_NR_inotify_rm_watch:
12915          return get_errno(inotify_rm_watch(arg1, arg2));
12916  #endif
12917  #endif
12918  
12919  #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12920      case TARGET_NR_mq_open:
12921          {
12922              struct mq_attr posix_mq_attr;
12923              struct mq_attr *pposix_mq_attr;
12924              int host_flags;
12925  
12926              host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12927              pposix_mq_attr = NULL;
12928              if (arg4) {
12929                  if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12930                      return -TARGET_EFAULT;
12931                  }
12932                  pposix_mq_attr = &posix_mq_attr;
12933              }
12934              p = lock_user_string(arg1 - 1);
12935              if (!p) {
12936                  return -TARGET_EFAULT;
12937              }
12938              ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12939              unlock_user (p, arg1, 0);
12940          }
12941          return ret;
12942  
12943      case TARGET_NR_mq_unlink:
12944          p = lock_user_string(arg1 - 1);
12945          if (!p) {
12946              return -TARGET_EFAULT;
12947          }
12948          ret = get_errno(mq_unlink(p));
12949          unlock_user (p, arg1, 0);
12950          return ret;
12951  
12952  #ifdef TARGET_NR_mq_timedsend
12953      case TARGET_NR_mq_timedsend:
12954          {
12955              struct timespec ts;
12956  
12957              p = lock_user (VERIFY_READ, arg2, arg3, 1);
12958              if (arg5 != 0) {
12959                  if (target_to_host_timespec(&ts, arg5)) {
12960                      return -TARGET_EFAULT;
12961                  }
12962                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12963                  if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12964                      return -TARGET_EFAULT;
12965                  }
12966              } else {
12967                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12968              }
12969              unlock_user (p, arg2, arg3);
12970          }
12971          return ret;
12972  #endif
12973  #ifdef TARGET_NR_mq_timedsend_time64
12974      case TARGET_NR_mq_timedsend_time64:
12975          {
12976              struct timespec ts;
12977  
12978              p = lock_user(VERIFY_READ, arg2, arg3, 1);
12979              if (arg5 != 0) {
12980                  if (target_to_host_timespec64(&ts, arg5)) {
12981                      return -TARGET_EFAULT;
12982                  }
12983                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12984                  if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12985                      return -TARGET_EFAULT;
12986                  }
12987              } else {
12988                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12989              }
12990              unlock_user(p, arg2, arg3);
12991          }
12992          return ret;
12993  #endif
12994  
12995  #ifdef TARGET_NR_mq_timedreceive
12996      case TARGET_NR_mq_timedreceive:
12997          {
12998              struct timespec ts;
12999              unsigned int prio;
13000  
13001              p = lock_user (VERIFY_READ, arg2, arg3, 1);
13002              if (arg5 != 0) {
13003                  if (target_to_host_timespec(&ts, arg5)) {
13004                      return -TARGET_EFAULT;
13005                  }
13006                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13007                                                       &prio, &ts));
13008                  if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13009                      return -TARGET_EFAULT;
13010                  }
13011              } else {
13012                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13013                                                       &prio, NULL));
13014              }
13015              unlock_user (p, arg2, arg3);
13016              if (arg4 != 0)
13017                  put_user_u32(prio, arg4);
13018          }
13019          return ret;
13020  #endif
13021  #ifdef TARGET_NR_mq_timedreceive_time64
13022      case TARGET_NR_mq_timedreceive_time64:
13023          {
13024              struct timespec ts;
13025              unsigned int prio;
13026  
13027              p = lock_user(VERIFY_READ, arg2, arg3, 1);
13028              if (arg5 != 0) {
13029                  if (target_to_host_timespec64(&ts, arg5)) {
13030                      return -TARGET_EFAULT;
13031                  }
13032                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13033                                                       &prio, &ts));
13034                  if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13035                      return -TARGET_EFAULT;
13036                  }
13037              } else {
13038                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13039                                                       &prio, NULL));
13040              }
13041              unlock_user(p, arg2, arg3);
13042              if (arg4 != 0) {
13043                  put_user_u32(prio, arg4);
13044              }
13045          }
13046          return ret;
13047  #endif
13048  
13049      /* Not implemented for now... */
13050  /*     case TARGET_NR_mq_notify: */
13051  /*         break; */
13052  
13053      case TARGET_NR_mq_getsetattr:
13054          {
13055              struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13056              ret = 0;
13057              if (arg2 != 0) {
13058                  copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13059                  ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13060                                             &posix_mq_attr_out));
13061              } else if (arg3 != 0) {
13062                  ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13063              }
13064              if (ret == 0 && arg3 != 0) {
13065                  copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13066              }
13067          }
13068          return ret;
13069  #endif
13070  
13071  #ifdef CONFIG_SPLICE
13072  #ifdef TARGET_NR_tee
13073      case TARGET_NR_tee:
13074          {
13075              ret = get_errno(tee(arg1,arg2,arg3,arg4));
13076          }
13077          return ret;
13078  #endif
13079  #ifdef TARGET_NR_splice
13080      case TARGET_NR_splice:
13081          {
13082              loff_t loff_in, loff_out;
13083              loff_t *ploff_in = NULL, *ploff_out = NULL;
13084              if (arg2) {
13085                  if (get_user_u64(loff_in, arg2)) {
13086                      return -TARGET_EFAULT;
13087                  }
13088                  ploff_in = &loff_in;
13089              }
13090              if (arg4) {
13091                  if (get_user_u64(loff_out, arg4)) {
13092                      return -TARGET_EFAULT;
13093                  }
13094                  ploff_out = &loff_out;
13095              }
13096              ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13097              if (arg2) {
13098                  if (put_user_u64(loff_in, arg2)) {
13099                      return -TARGET_EFAULT;
13100                  }
13101              }
13102              if (arg4) {
13103                  if (put_user_u64(loff_out, arg4)) {
13104                      return -TARGET_EFAULT;
13105                  }
13106              }
13107          }
13108          return ret;
13109  #endif
13110  #ifdef TARGET_NR_vmsplice
13111  	case TARGET_NR_vmsplice:
13112          {
13113              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13114              if (vec != NULL) {
13115                  ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13116                  unlock_iovec(vec, arg2, arg3, 0);
13117              } else {
13118                  ret = -host_to_target_errno(errno);
13119              }
13120          }
13121          return ret;
13122  #endif
13123  #endif /* CONFIG_SPLICE */
13124  #ifdef CONFIG_EVENTFD
13125  #if defined(TARGET_NR_eventfd)
13126      case TARGET_NR_eventfd:
13127          ret = get_errno(eventfd(arg1, 0));
13128          if (ret >= 0) {
13129              fd_trans_register(ret, &target_eventfd_trans);
13130          }
13131          return ret;
13132  #endif
13133  #if defined(TARGET_NR_eventfd2)
13134      case TARGET_NR_eventfd2:
13135      {
13136          int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13137          if (arg2 & TARGET_O_NONBLOCK) {
13138              host_flags |= O_NONBLOCK;
13139          }
13140          if (arg2 & TARGET_O_CLOEXEC) {
13141              host_flags |= O_CLOEXEC;
13142          }
13143          ret = get_errno(eventfd(arg1, host_flags));
13144          if (ret >= 0) {
13145              fd_trans_register(ret, &target_eventfd_trans);
13146          }
13147          return ret;
13148      }
13149  #endif
13150  #endif /* CONFIG_EVENTFD  */
13151  #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13152      case TARGET_NR_fallocate:
13153  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13154          ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13155                                    target_offset64(arg5, arg6)));
13156  #else
13157          ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13158  #endif
13159          return ret;
13160  #endif
13161  #if defined(CONFIG_SYNC_FILE_RANGE)
13162  #if defined(TARGET_NR_sync_file_range)
13163      case TARGET_NR_sync_file_range:
13164  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13165  #if defined(TARGET_MIPS)
13166          ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13167                                          target_offset64(arg5, arg6), arg7));
13168  #else
13169          ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13170                                          target_offset64(arg4, arg5), arg6));
13171  #endif /* !TARGET_MIPS */
13172  #else
13173          ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13174  #endif
13175          return ret;
13176  #endif
13177  #if defined(TARGET_NR_sync_file_range2) || \
13178      defined(TARGET_NR_arm_sync_file_range)
13179  #if defined(TARGET_NR_sync_file_range2)
13180      case TARGET_NR_sync_file_range2:
13181  #endif
13182  #if defined(TARGET_NR_arm_sync_file_range)
13183      case TARGET_NR_arm_sync_file_range:
13184  #endif
13185          /* This is like sync_file_range but the arguments are reordered */
13186  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13187          ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13188                                          target_offset64(arg5, arg6), arg2));
13189  #else
13190          ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13191  #endif
13192          return ret;
13193  #endif
13194  #endif
13195  #if defined(TARGET_NR_signalfd4)
13196      case TARGET_NR_signalfd4:
13197          return do_signalfd4(arg1, arg2, arg4);
13198  #endif
13199  #if defined(TARGET_NR_signalfd)
13200      case TARGET_NR_signalfd:
13201          return do_signalfd4(arg1, arg2, 0);
13202  #endif
13203  #if defined(CONFIG_EPOLL)
13204  #if defined(TARGET_NR_epoll_create)
13205      case TARGET_NR_epoll_create:
13206          return get_errno(epoll_create(arg1));
13207  #endif
13208  #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13209      case TARGET_NR_epoll_create1:
13210          return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13211  #endif
13212  #if defined(TARGET_NR_epoll_ctl)
13213      case TARGET_NR_epoll_ctl:
13214      {
13215          struct epoll_event ep;
13216          struct epoll_event *epp = 0;
13217          if (arg4) {
13218              if (arg2 != EPOLL_CTL_DEL) {
13219                  struct target_epoll_event *target_ep;
13220                  if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13221                      return -TARGET_EFAULT;
13222                  }
13223                  ep.events = tswap32(target_ep->events);
13224                  /*
13225                   * The epoll_data_t union is just opaque data to the kernel,
13226                   * so we transfer all 64 bits across and need not worry what
13227                   * actual data type it is.
13228                   */
13229                  ep.data.u64 = tswap64(target_ep->data.u64);
13230                  unlock_user_struct(target_ep, arg4, 0);
13231              }
13232              /*
13233               * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13234               * non-null pointer, even though this argument is ignored.
13235               *
13236               */
13237              epp = &ep;
13238          }
13239          return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13240      }
13241  #endif
13242  
13243  #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13244  #if defined(TARGET_NR_epoll_wait)
13245      case TARGET_NR_epoll_wait:
13246  #endif
13247  #if defined(TARGET_NR_epoll_pwait)
13248      case TARGET_NR_epoll_pwait:
13249  #endif
13250      {
13251          struct target_epoll_event *target_ep;
13252          struct epoll_event *ep;
13253          int epfd = arg1;
13254          int maxevents = arg3;
13255          int timeout = arg4;
13256  
13257          if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13258              return -TARGET_EINVAL;
13259          }
13260  
13261          target_ep = lock_user(VERIFY_WRITE, arg2,
13262                                maxevents * sizeof(struct target_epoll_event), 1);
13263          if (!target_ep) {
13264              return -TARGET_EFAULT;
13265          }
13266  
13267          ep = g_try_new(struct epoll_event, maxevents);
13268          if (!ep) {
13269              unlock_user(target_ep, arg2, 0);
13270              return -TARGET_ENOMEM;
13271          }
13272  
13273          switch (num) {
13274  #if defined(TARGET_NR_epoll_pwait)
13275          case TARGET_NR_epoll_pwait:
13276          {
13277              sigset_t *set = NULL;
13278  
13279              if (arg5) {
13280                  ret = process_sigsuspend_mask(&set, arg5, arg6);
13281                  if (ret != 0) {
13282                      break;
13283                  }
13284              }
13285  
13286              ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13287                                               set, SIGSET_T_SIZE));
13288  
13289              if (set) {
13290                  finish_sigsuspend_mask(ret);
13291              }
13292              break;
13293          }
13294  #endif
13295  #if defined(TARGET_NR_epoll_wait)
13296          case TARGET_NR_epoll_wait:
13297              ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13298                                               NULL, 0));
13299              break;
13300  #endif
13301          default:
13302              ret = -TARGET_ENOSYS;
13303          }
13304          if (!is_error(ret)) {
13305              int i;
13306              for (i = 0; i < ret; i++) {
13307                  target_ep[i].events = tswap32(ep[i].events);
13308                  target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13309              }
13310              unlock_user(target_ep, arg2,
13311                          ret * sizeof(struct target_epoll_event));
13312          } else {
13313              unlock_user(target_ep, arg2, 0);
13314          }
13315          g_free(ep);
13316          return ret;
13317      }
13318  #endif
13319  #endif
13320  #ifdef TARGET_NR_prlimit64
13321      case TARGET_NR_prlimit64:
13322      {
13323          /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13324          struct target_rlimit64 *target_rnew, *target_rold;
13325          struct host_rlimit64 rnew, rold, *rnewp = 0;
13326          int resource = target_to_host_resource(arg2);
13327  
13328          if (arg3 && (resource != RLIMIT_AS &&
13329                       resource != RLIMIT_DATA &&
13330                       resource != RLIMIT_STACK)) {
13331              if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13332                  return -TARGET_EFAULT;
13333              }
13334              __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13335              __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13336              unlock_user_struct(target_rnew, arg3, 0);
13337              rnewp = &rnew;
13338          }
13339  
13340          ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13341          if (!is_error(ret) && arg4) {
13342              if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13343                  return -TARGET_EFAULT;
13344              }
13345              __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13346              __put_user(rold.rlim_max, &target_rold->rlim_max);
13347              unlock_user_struct(target_rold, arg4, 1);
13348          }
13349          return ret;
13350      }
13351  #endif
13352  #ifdef TARGET_NR_gethostname
13353      case TARGET_NR_gethostname:
13354      {
13355          char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13356          if (name) {
13357              ret = get_errno(gethostname(name, arg2));
13358              unlock_user(name, arg1, arg2);
13359          } else {
13360              ret = -TARGET_EFAULT;
13361          }
13362          return ret;
13363      }
13364  #endif
13365  #ifdef TARGET_NR_atomic_cmpxchg_32
13366      case TARGET_NR_atomic_cmpxchg_32:
13367      {
13368          /* should use start_exclusive from main.c */
13369          abi_ulong mem_value;
13370          if (get_user_u32(mem_value, arg6)) {
13371              target_siginfo_t info;
13372              info.si_signo = SIGSEGV;
13373              info.si_errno = 0;
13374              info.si_code = TARGET_SEGV_MAPERR;
13375              info._sifields._sigfault._addr = arg6;
13376              queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13377              ret = 0xdeadbeef;
13378  
13379          }
13380          if (mem_value == arg2)
13381              put_user_u32(arg1, arg6);
13382          return mem_value;
13383      }
13384  #endif
13385  #ifdef TARGET_NR_atomic_barrier
13386      case TARGET_NR_atomic_barrier:
13387          /* Like the kernel implementation and the
13388             qemu arm barrier, no-op this? */
13389          return 0;
13390  #endif
13391  
13392  #ifdef TARGET_NR_timer_create
13393      case TARGET_NR_timer_create:
13394      {
13395          /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13396  
13397          struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13398  
13399          int clkid = arg1;
13400          int timer_index = next_free_host_timer();
13401  
13402          if (timer_index < 0) {
13403              ret = -TARGET_EAGAIN;
13404          } else {
13405              timer_t *phtimer = g_posix_timers  + timer_index;
13406  
13407              if (arg2) {
13408                  phost_sevp = &host_sevp;
13409                  ret = target_to_host_sigevent(phost_sevp, arg2);
13410                  if (ret != 0) {
13411                      free_host_timer_slot(timer_index);
13412                      return ret;
13413                  }
13414              }
13415  
13416              ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13417              if (ret) {
13418                  free_host_timer_slot(timer_index);
13419              } else {
13420                  if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13421                      timer_delete(*phtimer);
13422                      free_host_timer_slot(timer_index);
13423                      return -TARGET_EFAULT;
13424                  }
13425              }
13426          }
13427          return ret;
13428      }
13429  #endif
13430  
13431  #ifdef TARGET_NR_timer_settime
13432      case TARGET_NR_timer_settime:
13433      {
13434          /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13435           * struct itimerspec * old_value */
13436          target_timer_t timerid = get_timer_id(arg1);
13437  
13438          if (timerid < 0) {
13439              ret = timerid;
13440          } else if (arg3 == 0) {
13441              ret = -TARGET_EINVAL;
13442          } else {
13443              timer_t htimer = g_posix_timers[timerid];
13444              struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13445  
13446              if (target_to_host_itimerspec(&hspec_new, arg3)) {
13447                  return -TARGET_EFAULT;
13448              }
13449              ret = get_errno(
13450                            timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13451              if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13452                  return -TARGET_EFAULT;
13453              }
13454          }
13455          return ret;
13456      }
13457  #endif
13458  
13459  #ifdef TARGET_NR_timer_settime64
13460      case TARGET_NR_timer_settime64:
13461      {
13462          target_timer_t timerid = get_timer_id(arg1);
13463  
13464          if (timerid < 0) {
13465              ret = timerid;
13466          } else if (arg3 == 0) {
13467              ret = -TARGET_EINVAL;
13468          } else {
13469              timer_t htimer = g_posix_timers[timerid];
13470              struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13471  
13472              if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13473                  return -TARGET_EFAULT;
13474              }
13475              ret = get_errno(
13476                            timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13477              if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13478                  return -TARGET_EFAULT;
13479              }
13480          }
13481          return ret;
13482      }
13483  #endif
13484  
13485  #ifdef TARGET_NR_timer_gettime
13486      case TARGET_NR_timer_gettime:
13487      {
13488          /* args: timer_t timerid, struct itimerspec *curr_value */
13489          target_timer_t timerid = get_timer_id(arg1);
13490  
13491          if (timerid < 0) {
13492              ret = timerid;
13493          } else if (!arg2) {
13494              ret = -TARGET_EFAULT;
13495          } else {
13496              timer_t htimer = g_posix_timers[timerid];
13497              struct itimerspec hspec;
13498              ret = get_errno(timer_gettime(htimer, &hspec));
13499  
13500              if (host_to_target_itimerspec(arg2, &hspec)) {
13501                  ret = -TARGET_EFAULT;
13502              }
13503          }
13504          return ret;
13505      }
13506  #endif
13507  
13508  #ifdef TARGET_NR_timer_gettime64
13509      case TARGET_NR_timer_gettime64:
13510      {
13511          /* args: timer_t timerid, struct itimerspec64 *curr_value */
13512          target_timer_t timerid = get_timer_id(arg1);
13513  
13514          if (timerid < 0) {
13515              ret = timerid;
13516          } else if (!arg2) {
13517              ret = -TARGET_EFAULT;
13518          } else {
13519              timer_t htimer = g_posix_timers[timerid];
13520              struct itimerspec hspec;
13521              ret = get_errno(timer_gettime(htimer, &hspec));
13522  
13523              if (host_to_target_itimerspec64(arg2, &hspec)) {
13524                  ret = -TARGET_EFAULT;
13525              }
13526          }
13527          return ret;
13528      }
13529  #endif
13530  
13531  #ifdef TARGET_NR_timer_getoverrun
13532      case TARGET_NR_timer_getoverrun:
13533      {
13534          /* args: timer_t timerid */
13535          target_timer_t timerid = get_timer_id(arg1);
13536  
13537          if (timerid < 0) {
13538              ret = timerid;
13539          } else {
13540              timer_t htimer = g_posix_timers[timerid];
13541              ret = get_errno(timer_getoverrun(htimer));
13542          }
13543          return ret;
13544      }
13545  #endif
13546  
13547  #ifdef TARGET_NR_timer_delete
13548      case TARGET_NR_timer_delete:
13549      {
13550          /* args: timer_t timerid */
13551          target_timer_t timerid = get_timer_id(arg1);
13552  
13553          if (timerid < 0) {
13554              ret = timerid;
13555          } else {
13556              timer_t htimer = g_posix_timers[timerid];
13557              ret = get_errno(timer_delete(htimer));
13558              free_host_timer_slot(timerid);
13559          }
13560          return ret;
13561      }
13562  #endif
13563  
13564  #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13565      case TARGET_NR_timerfd_create:
13566          ret = get_errno(timerfd_create(arg1,
13567                          target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13568          if (ret >= 0) {
13569              fd_trans_register(ret, &target_timerfd_trans);
13570          }
13571          return ret;
13572  #endif
13573  
13574  #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13575      case TARGET_NR_timerfd_gettime:
13576          {
13577              struct itimerspec its_curr;
13578  
13579              ret = get_errno(timerfd_gettime(arg1, &its_curr));
13580  
13581              if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13582                  return -TARGET_EFAULT;
13583              }
13584          }
13585          return ret;
13586  #endif
13587  
13588  #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13589      case TARGET_NR_timerfd_gettime64:
13590          {
13591              struct itimerspec its_curr;
13592  
13593              ret = get_errno(timerfd_gettime(arg1, &its_curr));
13594  
13595              if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13596                  return -TARGET_EFAULT;
13597              }
13598          }
13599          return ret;
13600  #endif
13601  
13602  #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13603      case TARGET_NR_timerfd_settime:
13604          {
13605              struct itimerspec its_new, its_old, *p_new;
13606  
13607              if (arg3) {
13608                  if (target_to_host_itimerspec(&its_new, arg3)) {
13609                      return -TARGET_EFAULT;
13610                  }
13611                  p_new = &its_new;
13612              } else {
13613                  p_new = NULL;
13614              }
13615  
13616              ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13617  
13618              if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13619                  return -TARGET_EFAULT;
13620              }
13621          }
13622          return ret;
13623  #endif
13624  
13625  #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13626      case TARGET_NR_timerfd_settime64:
13627          {
13628              struct itimerspec its_new, its_old, *p_new;
13629  
13630              if (arg3) {
13631                  if (target_to_host_itimerspec64(&its_new, arg3)) {
13632                      return -TARGET_EFAULT;
13633                  }
13634                  p_new = &its_new;
13635              } else {
13636                  p_new = NULL;
13637              }
13638  
13639              ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13640  
13641              if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13642                  return -TARGET_EFAULT;
13643              }
13644          }
13645          return ret;
13646  #endif
13647  
13648  #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13649      case TARGET_NR_ioprio_get:
13650          return get_errno(ioprio_get(arg1, arg2));
13651  #endif
13652  
13653  #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13654      case TARGET_NR_ioprio_set:
13655          return get_errno(ioprio_set(arg1, arg2, arg3));
13656  #endif
13657  
13658  #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13659      case TARGET_NR_setns:
13660          return get_errno(setns(arg1, arg2));
13661  #endif
13662  #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13663      case TARGET_NR_unshare:
13664          return get_errno(unshare(arg1));
13665  #endif
13666  #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13667      case TARGET_NR_kcmp:
13668          return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13669  #endif
13670  #ifdef TARGET_NR_swapcontext
13671      case TARGET_NR_swapcontext:
13672          /* PowerPC specific.  */
13673          return do_swapcontext(cpu_env, arg1, arg2, arg3);
13674  #endif
13675  #ifdef TARGET_NR_memfd_create
13676      case TARGET_NR_memfd_create:
13677          p = lock_user_string(arg1);
13678          if (!p) {
13679              return -TARGET_EFAULT;
13680          }
13681          ret = get_errno(memfd_create(p, arg2));
13682          fd_trans_unregister(ret);
13683          unlock_user(p, arg1, 0);
13684          return ret;
13685  #endif
13686  #if defined TARGET_NR_membarrier && defined __NR_membarrier
13687      case TARGET_NR_membarrier:
13688          return get_errno(membarrier(arg1, arg2));
13689  #endif
13690  
13691  #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13692      case TARGET_NR_copy_file_range:
13693          {
13694              loff_t inoff, outoff;
13695              loff_t *pinoff = NULL, *poutoff = NULL;
13696  
13697              if (arg2) {
13698                  if (get_user_u64(inoff, arg2)) {
13699                      return -TARGET_EFAULT;
13700                  }
13701                  pinoff = &inoff;
13702              }
13703              if (arg4) {
13704                  if (get_user_u64(outoff, arg4)) {
13705                      return -TARGET_EFAULT;
13706                  }
13707                  poutoff = &outoff;
13708              }
13709              /* Do not sign-extend the count parameter. */
13710              ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13711                                                   (abi_ulong)arg5, arg6));
13712              if (!is_error(ret) && ret > 0) {
13713                  if (arg2) {
13714                      if (put_user_u64(inoff, arg2)) {
13715                          return -TARGET_EFAULT;
13716                      }
13717                  }
13718                  if (arg4) {
13719                      if (put_user_u64(outoff, arg4)) {
13720                          return -TARGET_EFAULT;
13721                      }
13722                  }
13723              }
13724          }
13725          return ret;
13726  #endif
13727  
13728  #if defined(TARGET_NR_pivot_root)
13729      case TARGET_NR_pivot_root:
13730          {
13731              void *p2;
13732              p = lock_user_string(arg1); /* new_root */
13733              p2 = lock_user_string(arg2); /* put_old */
13734              if (!p || !p2) {
13735                  ret = -TARGET_EFAULT;
13736              } else {
13737                  ret = get_errno(pivot_root(p, p2));
13738              }
13739              unlock_user(p2, arg2, 0);
13740              unlock_user(p, arg1, 0);
13741          }
13742          return ret;
13743  #endif
13744  
13745  #if defined(TARGET_NR_riscv_hwprobe)
13746      case TARGET_NR_riscv_hwprobe:
13747          return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13748  #endif
13749  
13750      default:
13751          qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13752          return -TARGET_ENOSYS;
13753      }
13754      return ret;
13755  }
13756  
13757  abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13758                      abi_long arg2, abi_long arg3, abi_long arg4,
13759                      abi_long arg5, abi_long arg6, abi_long arg7,
13760                      abi_long arg8)
13761  {
13762      CPUState *cpu = env_cpu(cpu_env);
13763      abi_long ret;
13764  
13765  #ifdef DEBUG_ERESTARTSYS
13766      /* Debug-only code for exercising the syscall-restart code paths
13767       * in the per-architecture cpu main loops: restart every syscall
13768       * the guest makes once before letting it through.
13769       */
13770      {
13771          static bool flag;
13772          flag = !flag;
13773          if (flag) {
13774              return -QEMU_ERESTARTSYS;
13775          }
13776      }
13777  #endif
13778  
13779      record_syscall_start(cpu, num, arg1,
13780                           arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13781  
13782      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13783          print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13784      }
13785  
13786      ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13787                        arg5, arg6, arg7, arg8);
13788  
13789      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13790          print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13791                            arg3, arg4, arg5, arg6);
13792      }
13793  
13794      record_syscall_return(cpu, num, ret);
13795      return ret;
13796  }
13797