xref: /openbmc/qemu/linux-user/syscall.c (revision 6b99bb046dd36a6dd5525b8f88c2dcddae49222a)
1  /*
2   *  Linux syscalls
3   *
4   *  Copyright (c) 2003 Fabrice Bellard
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
8   *  the Free Software Foundation; either version 2 of the License, or
9   *  (at your option) any later version.
10   *
11   *  This program is distributed in the hope that it will be useful,
12   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   *  GNU General Public License for more details.
15   *
16   *  You should have received a copy of the GNU General Public License
17   *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18   */
19  #define _ATFILE_SOURCE
20  #include "qemu/osdep.h"
21  #include "qemu/cutils.h"
22  #include "qemu/path.h"
23  #include "qemu/memfd.h"
24  #include "qemu/queue.h"
25  #include "qemu/plugin.h"
26  #include "tcg/startup.h"
27  #include "target_mman.h"
28  #include "exec/page-protection.h"
29  #include <elf.h>
30  #include <endian.h>
31  #include <grp.h>
32  #include <sys/ipc.h>
33  #include <sys/msg.h>
34  #include <sys/wait.h>
35  #include <sys/mount.h>
36  #include <sys/file.h>
37  #include <sys/fsuid.h>
38  #include <sys/personality.h>
39  #include <sys/prctl.h>
40  #include <sys/resource.h>
41  #include <sys/swap.h>
42  #include <linux/capability.h>
43  #include <sched.h>
44  #include <sys/timex.h>
45  #include <sys/socket.h>
46  #include <linux/sockios.h>
47  #include <sys/un.h>
48  #include <sys/uio.h>
49  #include <poll.h>
50  #include <sys/times.h>
51  #include <sys/shm.h>
52  #include <sys/sem.h>
53  #include <sys/statfs.h>
54  #include <utime.h>
55  #include <sys/sysinfo.h>
56  #include <sys/signalfd.h>
57  #include <netinet/in.h>
58  #include <netinet/ip.h>
59  #include <netinet/tcp.h>
60  #include <netinet/udp.h>
61  #include <linux/wireless.h>
62  #include <linux/icmp.h>
63  #include <linux/icmpv6.h>
64  #include <linux/if_tun.h>
65  #include <linux/in6.h>
66  #include <linux/errqueue.h>
67  #include <linux/random.h>
68  #ifdef CONFIG_TIMERFD
69  #include <sys/timerfd.h>
70  #endif
71  #ifdef CONFIG_EVENTFD
72  #include <sys/eventfd.h>
73  #endif
74  #ifdef CONFIG_EPOLL
75  #include <sys/epoll.h>
76  #endif
77  #ifdef CONFIG_ATTR
78  #include "qemu/xattr.h"
79  #endif
80  #ifdef CONFIG_SENDFILE
81  #include <sys/sendfile.h>
82  #endif
83  #ifdef HAVE_SYS_KCOV_H
84  #include <sys/kcov.h>
85  #endif
86  
87  #define termios host_termios
88  #define winsize host_winsize
89  #define termio host_termio
90  #define sgttyb host_sgttyb /* same as target */
91  #define tchars host_tchars /* same as target */
92  #define ltchars host_ltchars /* same as target */
93  
94  #include <linux/termios.h>
95  #include <linux/unistd.h>
96  #include <linux/cdrom.h>
97  #include <linux/hdreg.h>
98  #include <linux/soundcard.h>
99  #include <linux/kd.h>
100  #include <linux/mtio.h>
101  #include <linux/fs.h>
102  #include <linux/fd.h>
103  #if defined(CONFIG_FIEMAP)
104  #include <linux/fiemap.h>
105  #endif
106  #include <linux/fb.h>
107  #if defined(CONFIG_USBFS)
108  #include <linux/usbdevice_fs.h>
109  #include <linux/usb/ch9.h>
110  #endif
111  #include <linux/vt.h>
112  #include <linux/dm-ioctl.h>
113  #include <linux/reboot.h>
114  #include <linux/route.h>
115  #include <linux/filter.h>
116  #include <linux/blkpg.h>
117  #include <netpacket/packet.h>
118  #include <linux/netlink.h>
119  #include <linux/if_alg.h>
120  #include <linux/rtc.h>
121  #include <sound/asound.h>
122  #ifdef HAVE_BTRFS_H
123  #include <linux/btrfs.h>
124  #endif
125  #ifdef HAVE_DRM_H
126  #include <libdrm/drm.h>
127  #include <libdrm/i915_drm.h>
128  #endif
129  #include "linux_loop.h"
130  #include "uname.h"
131  
132  #include "qemu.h"
133  #include "user-internals.h"
134  #include "strace.h"
135  #include "signal-common.h"
136  #include "loader.h"
137  #include "user-mmap.h"
138  #include "user/safe-syscall.h"
139  #include "qemu/guest-random.h"
140  #include "qemu/selfmap.h"
141  #include "user/syscall-trace.h"
142  #include "special-errno.h"
143  #include "qapi/error.h"
144  #include "fd-trans.h"
145  #include "cpu_loop-common.h"
146  
147  #ifndef CLONE_IO
148  #define CLONE_IO                0x80000000      /* Clone io context */
149  #endif
150  
151  /* We can't directly call the host clone syscall, because this will
152   * badly confuse libc (breaking mutexes, for example). So we must
153   * divide clone flags into:
154   *  * flag combinations that look like pthread_create()
155   *  * flag combinations that look like fork()
156   *  * flags we can implement within QEMU itself
157   *  * flags we can't support and will return an error for
158   */
159  /* For thread creation, all these flags must be present; for
160   * fork, none must be present.
161   */
162  #define CLONE_THREAD_FLAGS                              \
163      (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164       CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165  
166  /* These flags are ignored:
167   * CLONE_DETACHED is now ignored by the kernel;
168   * CLONE_IO is just an optimisation hint to the I/O scheduler
169   */
170  #define CLONE_IGNORED_FLAGS                     \
171      (CLONE_DETACHED | CLONE_IO)
172  
173  #ifndef CLONE_PIDFD
174  # define CLONE_PIDFD 0x00001000
175  #endif
176  
177  /* Flags for fork which we can implement within QEMU itself */
178  #define CLONE_OPTIONAL_FORK_FLAGS               \
179      (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180       CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181  
182  /* Flags for thread creation which we can implement within QEMU itself */
183  #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184      (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185       CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186  
187  #define CLONE_INVALID_FORK_FLAGS                                        \
188      (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189  
190  #define CLONE_INVALID_THREAD_FLAGS                                      \
191      (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192         CLONE_IGNORED_FLAGS))
193  
194  /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195   * have almost all been allocated. We cannot support any of
196   * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197   * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198   * The checks against the invalid thread masks above will catch these.
199   * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200   */
201  
202  /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203   * once. This exercises the codepaths for restart.
204   */
205  //#define DEBUG_ERESTARTSYS
206  
207  //#include <linux/msdos_fs.h>
208  #define VFAT_IOCTL_READDIR_BOTH \
209      _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210  #define VFAT_IOCTL_READDIR_SHORT \
211      _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212  
213  #undef _syscall0
214  #undef _syscall1
215  #undef _syscall2
216  #undef _syscall3
217  #undef _syscall4
218  #undef _syscall5
219  #undef _syscall6
220  
221  #define _syscall0(type,name)		\
222  static type name (void)			\
223  {					\
224  	return syscall(__NR_##name);	\
225  }
226  
227  #define _syscall1(type,name,type1,arg1)		\
228  static type name (type1 arg1)			\
229  {						\
230  	return syscall(__NR_##name, arg1);	\
231  }
232  
233  #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234  static type name (type1 arg1,type2 arg2)		\
235  {							\
236  	return syscall(__NR_##name, arg1, arg2);	\
237  }
238  
239  #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240  static type name (type1 arg1,type2 arg2,type3 arg3)		\
241  {								\
242  	return syscall(__NR_##name, arg1, arg2, arg3);		\
243  }
244  
245  #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247  {										\
248  	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249  }
250  
251  #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252  		  type5,arg5)							\
253  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254  {										\
255  	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256  }
257  
258  
259  #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260  		  type5,arg5,type6,arg6)					\
261  static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                    type6 arg6)							\
263  {										\
264  	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265  }
266  
267  
268  #define __NR_sys_uname __NR_uname
269  #define __NR_sys_getcwd1 __NR_getcwd
270  #define __NR_sys_getdents __NR_getdents
271  #define __NR_sys_getdents64 __NR_getdents64
272  #define __NR_sys_getpriority __NR_getpriority
273  #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274  #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275  #define __NR_sys_syslog __NR_syslog
276  #if defined(__NR_futex)
277  # define __NR_sys_futex __NR_futex
278  #endif
279  #if defined(__NR_futex_time64)
280  # define __NR_sys_futex_time64 __NR_futex_time64
281  #endif
282  #define __NR_sys_statx __NR_statx
283  
284  #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285  #define __NR__llseek __NR_lseek
286  #endif
287  
288  /* Newer kernel ports have llseek() instead of _llseek() */
289  #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290  #define TARGET_NR__llseek TARGET_NR_llseek
291  #endif
292  
293  /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294  #ifndef TARGET_O_NONBLOCK_MASK
295  #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296  #endif
297  
298  #define __NR_sys_gettid __NR_gettid
299  _syscall0(int, sys_gettid)
300  
301  /* For the 64-bit guest on 32-bit host case we must emulate
302   * getdents using getdents64, because otherwise the host
303   * might hand us back more dirent records than we can fit
304   * into the guest buffer after structure format conversion.
305   * Otherwise we emulate getdents with getdents if the host has it.
306   */
307  #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308  #define EMULATE_GETDENTS_WITH_GETDENTS
309  #endif
310  
311  #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312  _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313  #endif
314  #if (defined(TARGET_NR_getdents) && \
315        !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316      (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317  _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318  #endif
319  #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320  _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321            loff_t *, res, unsigned int, wh);
322  #endif
323  _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324  _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325            siginfo_t *, uinfo)
326  _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327  #ifdef __NR_exit_group
328  _syscall1(int,exit_group,int,error_code)
329  #endif
330  #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331  #define __NR_sys_close_range __NR_close_range
332  _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333  #ifndef CLOSE_RANGE_CLOEXEC
334  #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335  #endif
336  #endif
337  #if defined(__NR_futex)
338  _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339            const struct timespec *,timeout,int *,uaddr2,int,val3)
340  #endif
341  #if defined(__NR_futex_time64)
342  _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343            const struct timespec *,timeout,int *,uaddr2,int,val3)
344  #endif
345  #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346  _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347  #endif
348  #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349  _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                               unsigned int, flags);
351  #endif
352  #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353  _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354  #endif
355  #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356  _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357            unsigned long *, user_mask_ptr);
358  #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359  _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360            unsigned long *, user_mask_ptr);
361  /* sched_attr is not defined in glibc */
362  struct sched_attr {
363      uint32_t size;
364      uint32_t sched_policy;
365      uint64_t sched_flags;
366      int32_t sched_nice;
367      uint32_t sched_priority;
368      uint64_t sched_runtime;
369      uint64_t sched_deadline;
370      uint64_t sched_period;
371      uint32_t sched_util_min;
372      uint32_t sched_util_max;
373  };
374  #define __NR_sys_sched_getattr __NR_sched_getattr
375  _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376            unsigned int, size, unsigned int, flags);
377  #define __NR_sys_sched_setattr __NR_sched_setattr
378  _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379            unsigned int, flags);
380  #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381  _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382  #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383  _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384            const struct sched_param *, param);
385  #define __NR_sys_sched_getparam __NR_sched_getparam
386  _syscall2(int, sys_sched_getparam, pid_t, pid,
387            struct sched_param *, param);
388  #define __NR_sys_sched_setparam __NR_sched_setparam
389  _syscall2(int, sys_sched_setparam, pid_t, pid,
390            const struct sched_param *, param);
391  #define __NR_sys_getcpu __NR_getcpu
392  _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393  _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394            void *, arg);
395  _syscall2(int, capget, struct __user_cap_header_struct *, header,
396            struct __user_cap_data_struct *, data);
397  _syscall2(int, capset, struct __user_cap_header_struct *, header,
398            struct __user_cap_data_struct *, data);
399  #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400  _syscall2(int, ioprio_get, int, which, int, who)
401  #endif
402  #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403  _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404  #endif
405  #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406  _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407  #endif
408  
409  #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410  _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411            unsigned long, idx1, unsigned long, idx2)
412  #endif
413  
414  /*
415   * It is assumed that struct statx is architecture independent.
416   */
417  #if defined(TARGET_NR_statx) && defined(__NR_statx)
418  _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419            unsigned int, mask, struct target_statx *, statxbuf)
420  #endif
421  #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422  _syscall2(int, membarrier, int, cmd, int, flags)
423  #endif
424  
425  static const bitmask_transtbl fcntl_flags_tbl[] = {
426    { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427    { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428    { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429    { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430    { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431    { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432    { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433    { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434    { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435    { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436    { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437    { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438    { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439  #if defined(O_DIRECT)
440    { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441  #endif
442  #if defined(O_NOATIME)
443    { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444  #endif
445  #if defined(O_CLOEXEC)
446    { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447  #endif
448  #if defined(O_PATH)
449    { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450  #endif
451  #if defined(O_TMPFILE)
452    { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453  #endif
454    /* Don't terminate the list prematurely on 64-bit host+guest.  */
455  #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456    { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457  #endif
458  };
459  
460  _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
461  
462  #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
463  #if defined(__NR_utimensat)
464  #define __NR_sys_utimensat __NR_utimensat
465  _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
466            const struct timespec *,tsp,int,flags)
467  #else
468  static int sys_utimensat(int dirfd, const char *pathname,
469                           const struct timespec times[2], int flags)
470  {
471      errno = ENOSYS;
472      return -1;
473  }
474  #endif
475  #endif /* TARGET_NR_utimensat */
476  
477  #ifdef TARGET_NR_renameat2
478  #if defined(__NR_renameat2)
479  #define __NR_sys_renameat2 __NR_renameat2
480  _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
481            const char *, new, unsigned int, flags)
482  #else
483  static int sys_renameat2(int oldfd, const char *old,
484                           int newfd, const char *new, int flags)
485  {
486      if (flags == 0) {
487          return renameat(oldfd, old, newfd, new);
488      }
489      errno = ENOSYS;
490      return -1;
491  }
492  #endif
493  #endif /* TARGET_NR_renameat2 */
494  
495  #ifdef CONFIG_INOTIFY
496  #include <sys/inotify.h>
497  #else
498  /* Userspace can usually survive runtime without inotify */
499  #undef TARGET_NR_inotify_init
500  #undef TARGET_NR_inotify_init1
501  #undef TARGET_NR_inotify_add_watch
502  #undef TARGET_NR_inotify_rm_watch
503  #endif /* CONFIG_INOTIFY  */
504  
505  #if defined(TARGET_NR_prlimit64)
506  #ifndef __NR_prlimit64
507  # define __NR_prlimit64 -1
508  #endif
509  #define __NR_sys_prlimit64 __NR_prlimit64
510  /* The glibc rlimit structure may not be that used by the underlying syscall */
511  struct host_rlimit64 {
512      uint64_t rlim_cur;
513      uint64_t rlim_max;
514  };
515  _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
516            const struct host_rlimit64 *, new_limit,
517            struct host_rlimit64 *, old_limit)
518  #endif
519  
520  
521  #if defined(TARGET_NR_timer_create)
522  /* Maximum of 32 active POSIX timers allowed at any one time. */
523  #define GUEST_TIMER_MAX 32
524  static timer_t g_posix_timers[GUEST_TIMER_MAX];
525  static int g_posix_timer_allocated[GUEST_TIMER_MAX];
526  
next_free_host_timer(void)527  static inline int next_free_host_timer(void)
528  {
529      int k;
530      for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
531          if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
532              return k;
533          }
534      }
535      return -1;
536  }
537  
free_host_timer_slot(int id)538  static inline void free_host_timer_slot(int id)
539  {
540      qatomic_store_release(g_posix_timer_allocated + id, 0);
541  }
542  #endif
543  
host_to_target_errno(int host_errno)544  static inline int host_to_target_errno(int host_errno)
545  {
546      switch (host_errno) {
547  #define E(X)  case X: return TARGET_##X;
548  #include "errnos.c.inc"
549  #undef E
550      default:
551          return host_errno;
552      }
553  }
554  
target_to_host_errno(int target_errno)555  static inline int target_to_host_errno(int target_errno)
556  {
557      switch (target_errno) {
558  #define E(X)  case TARGET_##X: return X;
559  #include "errnos.c.inc"
560  #undef E
561      default:
562          return target_errno;
563      }
564  }
565  
get_errno(abi_long ret)566  abi_long get_errno(abi_long ret)
567  {
568      if (ret == -1)
569          return -host_to_target_errno(errno);
570      else
571          return ret;
572  }
573  
target_strerror(int err)574  const char *target_strerror(int err)
575  {
576      if (err == QEMU_ERESTARTSYS) {
577          return "To be restarted";
578      }
579      if (err == QEMU_ESIGRETURN) {
580          return "Successful exit from sigreturn";
581      }
582  
583      return strerror(target_to_host_errno(err));
584  }
585  
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)586  static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
587  {
588      int i;
589      uint8_t b;
590      if (usize <= ksize) {
591          return 1;
592      }
593      for (i = ksize; i < usize; i++) {
594          if (get_user_u8(b, addr + i)) {
595              return -TARGET_EFAULT;
596          }
597          if (b != 0) {
598              return 0;
599          }
600      }
601      return 1;
602  }
603  
604  /*
605   * Copies a target struct to a host struct, in a way that guarantees
606   * backwards-compatibility for struct syscall arguments.
607   *
608   * Similar to kernels uaccess.h:copy_struct_from_user()
609   */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)610  int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
611  {
612      size_t size = MIN(ksize, usize);
613      size_t rest = MAX(ksize, usize) - size;
614  
615      /* Deal with trailing bytes. */
616      if (usize < ksize) {
617          memset(dst + size, 0, rest);
618      } else if (usize > ksize) {
619          int ret = check_zeroed_user(src, ksize, usize);
620          if (ret <= 0) {
621              return ret ?: -TARGET_E2BIG;
622          }
623      }
624      /* Copy the interoperable parts of the struct. */
625      if (copy_from_user(dst, src, size)) {
626          return -TARGET_EFAULT;
627      }
628      return 0;
629  }
630  
631  #define safe_syscall0(type, name) \
632  static type safe_##name(void) \
633  { \
634      return safe_syscall(__NR_##name); \
635  }
636  
637  #define safe_syscall1(type, name, type1, arg1) \
638  static type safe_##name(type1 arg1) \
639  { \
640      return safe_syscall(__NR_##name, arg1); \
641  }
642  
643  #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
644  static type safe_##name(type1 arg1, type2 arg2) \
645  { \
646      return safe_syscall(__NR_##name, arg1, arg2); \
647  }
648  
649  #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
650  static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
651  { \
652      return safe_syscall(__NR_##name, arg1, arg2, arg3); \
653  }
654  
655  #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
656      type4, arg4) \
657  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
658  { \
659      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
660  }
661  
662  #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
663      type4, arg4, type5, arg5) \
664  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
665      type5 arg5) \
666  { \
667      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
668  }
669  
670  #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
671      type4, arg4, type5, arg5, type6, arg6) \
672  static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
673      type5 arg5, type6 arg6) \
674  { \
675      return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
676  }
677  
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)678  safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
679  safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
680  safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
681                int, flags, mode_t, mode)
682  
683  safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
684                const struct open_how_ver0 *, how, size_t, size)
685  
686  #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
687  safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
688                struct rusage *, rusage)
689  #endif
690  safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
691                int, options, struct rusage *, rusage)
692  safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
693  safe_syscall5(int, execveat, int, dirfd, const char *, filename,
694                char **, argv, char **, envp, int, flags)
695  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
696      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
697  safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
698                fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
699  #endif
700  #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
701  safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
702                struct timespec *, tsp, const sigset_t *, sigmask,
703                size_t, sigsetsize)
704  #endif
705  safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
706                int, maxevents, int, timeout, const sigset_t *, sigmask,
707                size_t, sigsetsize)
708  #if defined(__NR_futex)
709  safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710                const struct timespec *,timeout,int *,uaddr2,int,val3)
711  #endif
712  #if defined(__NR_futex_time64)
713  safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
714                const struct timespec *,timeout,int *,uaddr2,int,val3)
715  #endif
716  safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
717  safe_syscall2(int, kill, pid_t, pid, int, sig)
718  safe_syscall2(int, tkill, int, tid, int, sig)
719  safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
720  safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
721  safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
722  safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
723                unsigned long, pos_l, unsigned long, pos_h)
724  safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
725                unsigned long, pos_l, unsigned long, pos_h)
726  safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
727                socklen_t, addrlen)
728  safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
729                int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
730  safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
731                int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
732  safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
733  safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
734  safe_syscall2(int, flock, int, fd, int, operation)
735  #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
736  safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
737                const struct timespec *, uts, size_t, sigsetsize)
738  #endif
739  safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
740                int, flags)
741  #if defined(TARGET_NR_nanosleep)
742  safe_syscall2(int, nanosleep, const struct timespec *, req,
743                struct timespec *, rem)
744  #endif
745  #if defined(TARGET_NR_clock_nanosleep) || \
746      defined(TARGET_NR_clock_nanosleep_time64)
747  safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
748                const struct timespec *, req, struct timespec *, rem)
749  #endif
750  #ifdef __NR_ipc
751  #ifdef __s390x__
752  safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
753                void *, ptr)
754  #else
755  safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
756                void *, ptr, long, fifth)
757  #endif
758  #endif
759  #ifdef __NR_msgsnd
760  safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
761                int, flags)
762  #endif
763  #ifdef __NR_msgrcv
764  safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
765                long, msgtype, int, flags)
766  #endif
767  #ifdef __NR_semtimedop
768  safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
769                unsigned, nsops, const struct timespec *, timeout)
770  #endif
771  #if defined(TARGET_NR_mq_timedsend) || \
772      defined(TARGET_NR_mq_timedsend_time64)
773  safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
774                size_t, len, unsigned, prio, const struct timespec *, timeout)
775  #endif
776  #if defined(TARGET_NR_mq_timedreceive) || \
777      defined(TARGET_NR_mq_timedreceive_time64)
778  safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
779                size_t, len, unsigned *, prio, const struct timespec *, timeout)
780  #endif
781  #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
782  safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
783                int, outfd, loff_t *, poutoff, size_t, length,
784                unsigned int, flags)
785  #endif
786  
787  /* We do ioctl like this rather than via safe_syscall3 to preserve the
788   * "third argument might be integer or pointer or not present" behaviour of
789   * the libc function.
790   */
791  #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
792  /* Similarly for fcntl. Since we always build with LFS enabled,
793   * we should be using the 64-bit structures automatically.
794   */
795  #ifdef __NR_fcntl64
796  #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
797  #else
798  #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
799  #endif
800  
801  static inline int host_to_target_sock_type(int host_type)
802  {
803      int target_type;
804  
805      switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
806      case SOCK_DGRAM:
807          target_type = TARGET_SOCK_DGRAM;
808          break;
809      case SOCK_STREAM:
810          target_type = TARGET_SOCK_STREAM;
811          break;
812      default:
813          target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
814          break;
815      }
816  
817  #if defined(SOCK_CLOEXEC)
818      if (host_type & SOCK_CLOEXEC) {
819          target_type |= TARGET_SOCK_CLOEXEC;
820      }
821  #endif
822  
823  #if defined(SOCK_NONBLOCK)
824      if (host_type & SOCK_NONBLOCK) {
825          target_type |= TARGET_SOCK_NONBLOCK;
826      }
827  #endif
828  
829      return target_type;
830  }
831  
832  static abi_ulong target_brk, initial_target_brk;
833  
target_set_brk(abi_ulong new_brk)834  void target_set_brk(abi_ulong new_brk)
835  {
836      target_brk = TARGET_PAGE_ALIGN(new_brk);
837      initial_target_brk = target_brk;
838  }
839  
840  /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)841  abi_long do_brk(abi_ulong brk_val)
842  {
843      abi_long mapped_addr;
844      abi_ulong new_brk;
845      abi_ulong old_brk;
846  
847      /* brk pointers are always untagged */
848  
849      /* do not allow to shrink below initial brk value */
850      if (brk_val < initial_target_brk) {
851          return target_brk;
852      }
853  
854      new_brk = TARGET_PAGE_ALIGN(brk_val);
855      old_brk = TARGET_PAGE_ALIGN(target_brk);
856  
857      /* new and old target_brk might be on the same page */
858      if (new_brk == old_brk) {
859          target_brk = brk_val;
860          return target_brk;
861      }
862  
863      /* Release heap if necessary */
864      if (new_brk < old_brk) {
865          target_munmap(new_brk, old_brk - new_brk);
866  
867          target_brk = brk_val;
868          return target_brk;
869      }
870  
871      mapped_addr = target_mmap(old_brk, new_brk - old_brk,
872                                PROT_READ | PROT_WRITE,
873                                MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
874                                -1, 0);
875  
876      if (mapped_addr == old_brk) {
877          target_brk = brk_val;
878          return target_brk;
879      }
880  
881  #if defined(TARGET_ALPHA)
882      /* We (partially) emulate OSF/1 on Alpha, which requires we
883         return a proper errno, not an unchanged brk value.  */
884      return -TARGET_ENOMEM;
885  #endif
886      /* For everything else, return the previous break. */
887      return target_brk;
888  }
889  
890  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)892  static inline abi_long copy_from_user_fdset(fd_set *fds,
893                                              abi_ulong target_fds_addr,
894                                              int n)
895  {
896      int i, nw, j, k;
897      abi_ulong b, *target_fds;
898  
899      nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900      if (!(target_fds = lock_user(VERIFY_READ,
901                                   target_fds_addr,
902                                   sizeof(abi_ulong) * nw,
903                                   1)))
904          return -TARGET_EFAULT;
905  
906      FD_ZERO(fds);
907      k = 0;
908      for (i = 0; i < nw; i++) {
909          /* grab the abi_ulong */
910          __get_user(b, &target_fds[i]);
911          for (j = 0; j < TARGET_ABI_BITS; j++) {
912              /* check the bit inside the abi_ulong */
913              if ((b >> j) & 1)
914                  FD_SET(k, fds);
915              k++;
916          }
917      }
918  
919      unlock_user(target_fds, target_fds_addr, 0);
920  
921      return 0;
922  }
923  
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)924  static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925                                                   abi_ulong target_fds_addr,
926                                                   int n)
927  {
928      if (target_fds_addr) {
929          if (copy_from_user_fdset(fds, target_fds_addr, n))
930              return -TARGET_EFAULT;
931          *fds_ptr = fds;
932      } else {
933          *fds_ptr = NULL;
934      }
935      return 0;
936  }
937  
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)938  static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939                                            const fd_set *fds,
940                                            int n)
941  {
942      int i, nw, j, k;
943      abi_long v;
944      abi_ulong *target_fds;
945  
946      nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947      if (!(target_fds = lock_user(VERIFY_WRITE,
948                                   target_fds_addr,
949                                   sizeof(abi_ulong) * nw,
950                                   0)))
951          return -TARGET_EFAULT;
952  
953      k = 0;
954      for (i = 0; i < nw; i++) {
955          v = 0;
956          for (j = 0; j < TARGET_ABI_BITS; j++) {
957              v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958              k++;
959          }
960          __put_user(v, &target_fds[i]);
961      }
962  
963      unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964  
965      return 0;
966  }
967  #endif
968  
969  #if defined(__alpha__)
970  #define HOST_HZ 1024
971  #else
972  #define HOST_HZ 100
973  #endif
974  
host_to_target_clock_t(long ticks)975  static inline abi_long host_to_target_clock_t(long ticks)
976  {
977  #if HOST_HZ == TARGET_HZ
978      return ticks;
979  #else
980      return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981  #endif
982  }
983  
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)984  static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985                                               const struct rusage *rusage)
986  {
987      struct target_rusage *target_rusage;
988  
989      if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990          return -TARGET_EFAULT;
991      target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992      target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993      target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994      target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995      target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996      target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997      target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998      target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999      target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000      target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001      target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002      target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003      target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004      target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005      target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006      target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007      target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008      target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009      unlock_user_struct(target_rusage, target_addr, 1);
1010  
1011      return 0;
1012  }
1013  
1014  #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1015  static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016  {
1017      abi_ulong target_rlim_swap;
1018      rlim_t result;
1019  
1020      target_rlim_swap = tswapal(target_rlim);
1021      if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022          return RLIM_INFINITY;
1023  
1024      result = target_rlim_swap;
1025      if (target_rlim_swap != (rlim_t)result)
1026          return RLIM_INFINITY;
1027  
1028      return result;
1029  }
1030  #endif
1031  
1032  #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1033  static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034  {
1035      abi_ulong target_rlim_swap;
1036      abi_ulong result;
1037  
1038      if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039          target_rlim_swap = TARGET_RLIM_INFINITY;
1040      else
1041          target_rlim_swap = rlim;
1042      result = tswapal(target_rlim_swap);
1043  
1044      return result;
1045  }
1046  #endif
1047  
target_to_host_resource(int code)1048  static inline int target_to_host_resource(int code)
1049  {
1050      switch (code) {
1051      case TARGET_RLIMIT_AS:
1052          return RLIMIT_AS;
1053      case TARGET_RLIMIT_CORE:
1054          return RLIMIT_CORE;
1055      case TARGET_RLIMIT_CPU:
1056          return RLIMIT_CPU;
1057      case TARGET_RLIMIT_DATA:
1058          return RLIMIT_DATA;
1059      case TARGET_RLIMIT_FSIZE:
1060          return RLIMIT_FSIZE;
1061      case TARGET_RLIMIT_LOCKS:
1062          return RLIMIT_LOCKS;
1063      case TARGET_RLIMIT_MEMLOCK:
1064          return RLIMIT_MEMLOCK;
1065      case TARGET_RLIMIT_MSGQUEUE:
1066          return RLIMIT_MSGQUEUE;
1067      case TARGET_RLIMIT_NICE:
1068          return RLIMIT_NICE;
1069      case TARGET_RLIMIT_NOFILE:
1070          return RLIMIT_NOFILE;
1071      case TARGET_RLIMIT_NPROC:
1072          return RLIMIT_NPROC;
1073      case TARGET_RLIMIT_RSS:
1074          return RLIMIT_RSS;
1075      case TARGET_RLIMIT_RTPRIO:
1076          return RLIMIT_RTPRIO;
1077  #ifdef RLIMIT_RTTIME
1078      case TARGET_RLIMIT_RTTIME:
1079          return RLIMIT_RTTIME;
1080  #endif
1081      case TARGET_RLIMIT_SIGPENDING:
1082          return RLIMIT_SIGPENDING;
1083      case TARGET_RLIMIT_STACK:
1084          return RLIMIT_STACK;
1085      default:
1086          return code;
1087      }
1088  }
1089  
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1090  static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091                                                abi_ulong target_tv_addr)
1092  {
1093      struct target_timeval *target_tv;
1094  
1095      if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096          return -TARGET_EFAULT;
1097      }
1098  
1099      __get_user(tv->tv_sec, &target_tv->tv_sec);
1100      __get_user(tv->tv_usec, &target_tv->tv_usec);
1101  
1102      unlock_user_struct(target_tv, target_tv_addr, 0);
1103  
1104      return 0;
1105  }
1106  
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1107  static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108                                              const struct timeval *tv)
1109  {
1110      struct target_timeval *target_tv;
1111  
1112      if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113          return -TARGET_EFAULT;
1114      }
1115  
1116      __put_user(tv->tv_sec, &target_tv->tv_sec);
1117      __put_user(tv->tv_usec, &target_tv->tv_usec);
1118  
1119      unlock_user_struct(target_tv, target_tv_addr, 1);
1120  
1121      return 0;
1122  }
1123  
1124  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1125  static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126                                                  abi_ulong target_tv_addr)
1127  {
1128      struct target__kernel_sock_timeval *target_tv;
1129  
1130      if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131          return -TARGET_EFAULT;
1132      }
1133  
1134      __get_user(tv->tv_sec, &target_tv->tv_sec);
1135      __get_user(tv->tv_usec, &target_tv->tv_usec);
1136  
1137      unlock_user_struct(target_tv, target_tv_addr, 0);
1138  
1139      return 0;
1140  }
1141  #endif
1142  
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1143  static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144                                                const struct timeval *tv)
1145  {
1146      struct target__kernel_sock_timeval *target_tv;
1147  
1148      if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149          return -TARGET_EFAULT;
1150      }
1151  
1152      __put_user(tv->tv_sec, &target_tv->tv_sec);
1153      __put_user(tv->tv_usec, &target_tv->tv_usec);
1154  
1155      unlock_user_struct(target_tv, target_tv_addr, 1);
1156  
1157      return 0;
1158  }
1159  
1160  #if defined(TARGET_NR_futex) || \
1161      defined(TARGET_NR_rt_sigtimedwait) || \
1162      defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163      defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164      defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165      defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166      defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167      defined(TARGET_NR_timer_settime) || \
1168      (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1169  static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170                                                 abi_ulong target_addr)
1171  {
1172      struct target_timespec *target_ts;
1173  
1174      if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175          return -TARGET_EFAULT;
1176      }
1177      __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178      __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179      unlock_user_struct(target_ts, target_addr, 0);
1180      return 0;
1181  }
1182  #endif
1183  
1184  #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185      defined(TARGET_NR_timer_settime64) || \
1186      defined(TARGET_NR_mq_timedsend_time64) || \
1187      defined(TARGET_NR_mq_timedreceive_time64) || \
1188      (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189      defined(TARGET_NR_clock_nanosleep_time64) || \
1190      defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191      defined(TARGET_NR_utimensat) || \
1192      defined(TARGET_NR_utimensat_time64) || \
1193      defined(TARGET_NR_semtimedop_time64) || \
1194      defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1195  static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196                                                   abi_ulong target_addr)
1197  {
1198      struct target__kernel_timespec *target_ts;
1199  
1200      if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201          return -TARGET_EFAULT;
1202      }
1203      __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204      __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205      /* in 32bit mode, this drops the padding */
1206      host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207      unlock_user_struct(target_ts, target_addr, 0);
1208      return 0;
1209  }
1210  #endif
1211  
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1212  static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213                                                 struct timespec *host_ts)
1214  {
1215      struct target_timespec *target_ts;
1216  
1217      if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218          return -TARGET_EFAULT;
1219      }
1220      __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221      __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222      unlock_user_struct(target_ts, target_addr, 1);
1223      return 0;
1224  }
1225  
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1226  static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227                                                   struct timespec *host_ts)
1228  {
1229      struct target__kernel_timespec *target_ts;
1230  
1231      if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232          return -TARGET_EFAULT;
1233      }
1234      __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235      __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236      unlock_user_struct(target_ts, target_addr, 1);
1237      return 0;
1238  }
1239  
1240  #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1241  static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242                                               struct timezone *tz)
1243  {
1244      struct target_timezone *target_tz;
1245  
1246      if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247          return -TARGET_EFAULT;
1248      }
1249  
1250      __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251      __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252  
1253      unlock_user_struct(target_tz, target_tz_addr, 1);
1254  
1255      return 0;
1256  }
1257  #endif
1258  
1259  #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1260  static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261                                                 abi_ulong target_tz_addr)
1262  {
1263      struct target_timezone *target_tz;
1264  
1265      if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266          return -TARGET_EFAULT;
1267      }
1268  
1269      __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270      __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271  
1272      unlock_user_struct(target_tz, target_tz_addr, 0);
1273  
1274      return 0;
1275  }
1276  #endif
1277  
1278  #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279  #include <mqueue.h>
1280  
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1281  static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282                                                abi_ulong target_mq_attr_addr)
1283  {
1284      struct target_mq_attr *target_mq_attr;
1285  
1286      if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287                            target_mq_attr_addr, 1))
1288          return -TARGET_EFAULT;
1289  
1290      __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291      __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292      __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293      __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294  
1295      unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296  
1297      return 0;
1298  }
1299  
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1300  static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301                                              const struct mq_attr *attr)
1302  {
1303      struct target_mq_attr *target_mq_attr;
1304  
1305      if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306                            target_mq_attr_addr, 0))
1307          return -TARGET_EFAULT;
1308  
1309      __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310      __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311      __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312      __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313  
1314      unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315  
1316      return 0;
1317  }
1318  #endif
1319  
1320  #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321  /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1322  static abi_long do_select(int n,
1323                            abi_ulong rfd_addr, abi_ulong wfd_addr,
1324                            abi_ulong efd_addr, abi_ulong target_tv_addr)
1325  {
1326      fd_set rfds, wfds, efds;
1327      fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328      struct timeval tv;
1329      struct timespec ts, *ts_ptr;
1330      abi_long ret;
1331  
1332      ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333      if (ret) {
1334          return ret;
1335      }
1336      ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337      if (ret) {
1338          return ret;
1339      }
1340      ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341      if (ret) {
1342          return ret;
1343      }
1344  
1345      if (target_tv_addr) {
1346          if (copy_from_user_timeval(&tv, target_tv_addr))
1347              return -TARGET_EFAULT;
1348          ts.tv_sec = tv.tv_sec;
1349          ts.tv_nsec = tv.tv_usec * 1000;
1350          ts_ptr = &ts;
1351      } else {
1352          ts_ptr = NULL;
1353      }
1354  
1355      ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356                                    ts_ptr, NULL));
1357  
1358      if (!is_error(ret)) {
1359          if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360              return -TARGET_EFAULT;
1361          if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362              return -TARGET_EFAULT;
1363          if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364              return -TARGET_EFAULT;
1365  
1366          if (target_tv_addr) {
1367              tv.tv_sec = ts.tv_sec;
1368              tv.tv_usec = ts.tv_nsec / 1000;
1369              if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370                  return -TARGET_EFAULT;
1371              }
1372          }
1373      }
1374  
1375      return ret;
1376  }
1377  
1378  #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1379  static abi_long do_old_select(abi_ulong arg1)
1380  {
1381      struct target_sel_arg_struct *sel;
1382      abi_ulong inp, outp, exp, tvp;
1383      long nsel;
1384  
1385      if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386          return -TARGET_EFAULT;
1387      }
1388  
1389      nsel = tswapal(sel->n);
1390      inp = tswapal(sel->inp);
1391      outp = tswapal(sel->outp);
1392      exp = tswapal(sel->exp);
1393      tvp = tswapal(sel->tvp);
1394  
1395      unlock_user_struct(sel, arg1, 0);
1396  
1397      return do_select(nsel, inp, outp, exp, tvp);
1398  }
1399  #endif
1400  #endif
1401  
1402  #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1403  static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404                              abi_long arg4, abi_long arg5, abi_long arg6,
1405                              bool time64)
1406  {
1407      abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408      fd_set rfds, wfds, efds;
1409      fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410      struct timespec ts, *ts_ptr;
1411      abi_long ret;
1412  
1413      /*
1414       * The 6th arg is actually two args smashed together,
1415       * so we cannot use the C library.
1416       */
1417      struct {
1418          sigset_t *set;
1419          size_t size;
1420      } sig, *sig_ptr;
1421  
1422      abi_ulong arg_sigset, arg_sigsize, *arg7;
1423  
1424      n = arg1;
1425      rfd_addr = arg2;
1426      wfd_addr = arg3;
1427      efd_addr = arg4;
1428      ts_addr = arg5;
1429  
1430      ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431      if (ret) {
1432          return ret;
1433      }
1434      ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435      if (ret) {
1436          return ret;
1437      }
1438      ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439      if (ret) {
1440          return ret;
1441      }
1442  
1443      /*
1444       * This takes a timespec, and not a timeval, so we cannot
1445       * use the do_select() helper ...
1446       */
1447      if (ts_addr) {
1448          if (time64) {
1449              if (target_to_host_timespec64(&ts, ts_addr)) {
1450                  return -TARGET_EFAULT;
1451              }
1452          } else {
1453              if (target_to_host_timespec(&ts, ts_addr)) {
1454                  return -TARGET_EFAULT;
1455              }
1456          }
1457              ts_ptr = &ts;
1458      } else {
1459          ts_ptr = NULL;
1460      }
1461  
1462      /* Extract the two packed args for the sigset */
1463      sig_ptr = NULL;
1464      if (arg6) {
1465          arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466          if (!arg7) {
1467              return -TARGET_EFAULT;
1468          }
1469          arg_sigset = tswapal(arg7[0]);
1470          arg_sigsize = tswapal(arg7[1]);
1471          unlock_user(arg7, arg6, 0);
1472  
1473          if (arg_sigset) {
1474              ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475              if (ret != 0) {
1476                  return ret;
1477              }
1478              sig_ptr = &sig;
1479              sig.size = SIGSET_T_SIZE;
1480          }
1481      }
1482  
1483      ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484                                    ts_ptr, sig_ptr));
1485  
1486      if (sig_ptr) {
1487          finish_sigsuspend_mask(ret);
1488      }
1489  
1490      if (!is_error(ret)) {
1491          if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492              return -TARGET_EFAULT;
1493          }
1494          if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495              return -TARGET_EFAULT;
1496          }
1497          if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498              return -TARGET_EFAULT;
1499          }
1500          if (time64) {
1501              if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502                  return -TARGET_EFAULT;
1503              }
1504          } else {
1505              if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506                  return -TARGET_EFAULT;
1507              }
1508          }
1509      }
1510      return ret;
1511  }
1512  #endif
1513  
1514  #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515      defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1516  static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517                           abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518  {
1519      struct target_pollfd *target_pfd;
1520      unsigned int nfds = arg2;
1521      struct pollfd *pfd;
1522      unsigned int i;
1523      abi_long ret;
1524  
1525      pfd = NULL;
1526      target_pfd = NULL;
1527      if (nfds) {
1528          if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529              return -TARGET_EINVAL;
1530          }
1531          target_pfd = lock_user(VERIFY_WRITE, arg1,
1532                                 sizeof(struct target_pollfd) * nfds, 1);
1533          if (!target_pfd) {
1534              return -TARGET_EFAULT;
1535          }
1536  
1537          pfd = alloca(sizeof(struct pollfd) * nfds);
1538          for (i = 0; i < nfds; i++) {
1539              pfd[i].fd = tswap32(target_pfd[i].fd);
1540              pfd[i].events = tswap16(target_pfd[i].events);
1541          }
1542      }
1543      if (ppoll) {
1544          struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545          sigset_t *set = NULL;
1546  
1547          if (arg3) {
1548              if (time64) {
1549                  if (target_to_host_timespec64(timeout_ts, arg3)) {
1550                      unlock_user(target_pfd, arg1, 0);
1551                      return -TARGET_EFAULT;
1552                  }
1553              } else {
1554                  if (target_to_host_timespec(timeout_ts, arg3)) {
1555                      unlock_user(target_pfd, arg1, 0);
1556                      return -TARGET_EFAULT;
1557                  }
1558              }
1559          } else {
1560              timeout_ts = NULL;
1561          }
1562  
1563          if (arg4) {
1564              ret = process_sigsuspend_mask(&set, arg4, arg5);
1565              if (ret != 0) {
1566                  unlock_user(target_pfd, arg1, 0);
1567                  return ret;
1568              }
1569          }
1570  
1571          ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572                                     set, SIGSET_T_SIZE));
1573  
1574          if (set) {
1575              finish_sigsuspend_mask(ret);
1576          }
1577          if (!is_error(ret) && arg3) {
1578              if (time64) {
1579                  if (host_to_target_timespec64(arg3, timeout_ts)) {
1580                      return -TARGET_EFAULT;
1581                  }
1582              } else {
1583                  if (host_to_target_timespec(arg3, timeout_ts)) {
1584                      return -TARGET_EFAULT;
1585                  }
1586              }
1587          }
1588      } else {
1589            struct timespec ts, *pts;
1590  
1591            if (arg3 >= 0) {
1592                /* Convert ms to secs, ns */
1593                ts.tv_sec = arg3 / 1000;
1594                ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595                pts = &ts;
1596            } else {
1597                /* -ve poll() timeout means "infinite" */
1598                pts = NULL;
1599            }
1600            ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601      }
1602  
1603      if (!is_error(ret)) {
1604          for (i = 0; i < nfds; i++) {
1605              target_pfd[i].revents = tswap16(pfd[i].revents);
1606          }
1607      }
1608      unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609      return ret;
1610  }
1611  #endif
1612  
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1613  static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614                          int flags, int is_pipe2)
1615  {
1616      int host_pipe[2];
1617      abi_long ret;
1618      ret = pipe2(host_pipe, flags);
1619  
1620      if (is_error(ret))
1621          return get_errno(ret);
1622  
1623      /* Several targets have special calling conventions for the original
1624         pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1625      if (!is_pipe2) {
1626  #if defined(TARGET_ALPHA)
1627          cpu_env->ir[IR_A4] = host_pipe[1];
1628          return host_pipe[0];
1629  #elif defined(TARGET_MIPS)
1630          cpu_env->active_tc.gpr[3] = host_pipe[1];
1631          return host_pipe[0];
1632  #elif defined(TARGET_SH4)
1633          cpu_env->gregs[1] = host_pipe[1];
1634          return host_pipe[0];
1635  #elif defined(TARGET_SPARC)
1636          cpu_env->regwptr[1] = host_pipe[1];
1637          return host_pipe[0];
1638  #endif
1639      }
1640  
1641      if (put_user_s32(host_pipe[0], pipedes)
1642          || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643          return -TARGET_EFAULT;
1644      return get_errno(ret);
1645  }
1646  
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1647  static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1648                                                 abi_ulong target_addr,
1649                                                 socklen_t len)
1650  {
1651      const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1652      sa_family_t sa_family;
1653      struct target_sockaddr *target_saddr;
1654  
1655      if (fd_trans_target_to_host_addr(fd)) {
1656          return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1657      }
1658  
1659      target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1660      if (!target_saddr)
1661          return -TARGET_EFAULT;
1662  
1663      sa_family = tswap16(target_saddr->sa_family);
1664  
1665      /* Oops. The caller might send a incomplete sun_path; sun_path
1666       * must be terminated by \0 (see the manual page), but
1667       * unfortunately it is quite common to specify sockaddr_un
1668       * length as "strlen(x->sun_path)" while it should be
1669       * "strlen(...) + 1". We'll fix that here if needed.
1670       * Linux kernel has a similar feature.
1671       */
1672  
1673      if (sa_family == AF_UNIX) {
1674          if (len < unix_maxlen && len > 0) {
1675              char *cp = (char*)target_saddr;
1676  
1677              if ( cp[len-1] && !cp[len] )
1678                  len++;
1679          }
1680          if (len > unix_maxlen)
1681              len = unix_maxlen;
1682      }
1683  
1684      memcpy(addr, target_saddr, len);
1685      addr->sa_family = sa_family;
1686      if (sa_family == AF_NETLINK) {
1687          struct sockaddr_nl *nladdr;
1688  
1689          nladdr = (struct sockaddr_nl *)addr;
1690          nladdr->nl_pid = tswap32(nladdr->nl_pid);
1691          nladdr->nl_groups = tswap32(nladdr->nl_groups);
1692      } else if (sa_family == AF_PACKET) {
1693  	struct target_sockaddr_ll *lladdr;
1694  
1695  	lladdr = (struct target_sockaddr_ll *)addr;
1696  	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1697  	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1698      } else if (sa_family == AF_INET6) {
1699          struct sockaddr_in6 *in6addr;
1700  
1701          in6addr = (struct sockaddr_in6 *)addr;
1702          in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1703      }
1704      unlock_user(target_saddr, target_addr, 0);
1705  
1706      return 0;
1707  }
1708  
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1709  static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1710                                                 struct sockaddr *addr,
1711                                                 socklen_t len)
1712  {
1713      struct target_sockaddr *target_saddr;
1714  
1715      if (len == 0) {
1716          return 0;
1717      }
1718      assert(addr);
1719  
1720      target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1721      if (!target_saddr)
1722          return -TARGET_EFAULT;
1723      memcpy(target_saddr, addr, len);
1724      if (len >= offsetof(struct target_sockaddr, sa_family) +
1725          sizeof(target_saddr->sa_family)) {
1726          target_saddr->sa_family = tswap16(addr->sa_family);
1727      }
1728      if (addr->sa_family == AF_NETLINK &&
1729          len >= sizeof(struct target_sockaddr_nl)) {
1730          struct target_sockaddr_nl *target_nl =
1731                 (struct target_sockaddr_nl *)target_saddr;
1732          target_nl->nl_pid = tswap32(target_nl->nl_pid);
1733          target_nl->nl_groups = tswap32(target_nl->nl_groups);
1734      } else if (addr->sa_family == AF_PACKET) {
1735          struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1736          target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1737          target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1738      } else if (addr->sa_family == AF_INET6 &&
1739                 len >= sizeof(struct target_sockaddr_in6)) {
1740          struct target_sockaddr_in6 *target_in6 =
1741                 (struct target_sockaddr_in6 *)target_saddr;
1742          target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1743      }
1744      unlock_user(target_saddr, target_addr, len);
1745  
1746      return 0;
1747  }
1748  
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1749  static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1750                                             struct target_msghdr *target_msgh)
1751  {
1752      struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1753      abi_long msg_controllen;
1754      abi_ulong target_cmsg_addr;
1755      struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1756      socklen_t space = 0;
1757  
1758      msg_controllen = tswapal(target_msgh->msg_controllen);
1759      if (msg_controllen < sizeof (struct target_cmsghdr))
1760          goto the_end;
1761      target_cmsg_addr = tswapal(target_msgh->msg_control);
1762      target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1763      target_cmsg_start = target_cmsg;
1764      if (!target_cmsg)
1765          return -TARGET_EFAULT;
1766  
1767      while (cmsg && target_cmsg) {
1768          void *data = CMSG_DATA(cmsg);
1769          void *target_data = TARGET_CMSG_DATA(target_cmsg);
1770  
1771          int len = tswapal(target_cmsg->cmsg_len)
1772              - sizeof(struct target_cmsghdr);
1773  
1774          space += CMSG_SPACE(len);
1775          if (space > msgh->msg_controllen) {
1776              space -= CMSG_SPACE(len);
1777              /* This is a QEMU bug, since we allocated the payload
1778               * area ourselves (unlike overflow in host-to-target
1779               * conversion, which is just the guest giving us a buffer
1780               * that's too small). It can't happen for the payload types
1781               * we currently support; if it becomes an issue in future
1782               * we would need to improve our allocation strategy to
1783               * something more intelligent than "twice the size of the
1784               * target buffer we're reading from".
1785               */
1786              qemu_log_mask(LOG_UNIMP,
1787                            ("Unsupported ancillary data %d/%d: "
1788                             "unhandled msg size\n"),
1789                            tswap32(target_cmsg->cmsg_level),
1790                            tswap32(target_cmsg->cmsg_type));
1791              break;
1792          }
1793  
1794          if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1795              cmsg->cmsg_level = SOL_SOCKET;
1796          } else {
1797              cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1798          }
1799          cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1800          cmsg->cmsg_len = CMSG_LEN(len);
1801  
1802          if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1803              int *fd = (int *)data;
1804              int *target_fd = (int *)target_data;
1805              int i, numfds = len / sizeof(int);
1806  
1807              for (i = 0; i < numfds; i++) {
1808                  __get_user(fd[i], target_fd + i);
1809              }
1810          } else if (cmsg->cmsg_level == SOL_SOCKET
1811                 &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1812              struct ucred *cred = (struct ucred *)data;
1813              struct target_ucred *target_cred =
1814                  (struct target_ucred *)target_data;
1815  
1816              __get_user(cred->pid, &target_cred->pid);
1817              __get_user(cred->uid, &target_cred->uid);
1818              __get_user(cred->gid, &target_cred->gid);
1819          } else if (cmsg->cmsg_level == SOL_ALG) {
1820              uint32_t *dst = (uint32_t *)data;
1821  
1822              memcpy(dst, target_data, len);
1823              /* fix endianness of first 32-bit word */
1824              if (len >= sizeof(uint32_t)) {
1825                  *dst = tswap32(*dst);
1826              }
1827          } else {
1828              qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1829                            cmsg->cmsg_level, cmsg->cmsg_type);
1830              memcpy(data, target_data, len);
1831          }
1832  
1833          cmsg = CMSG_NXTHDR(msgh, cmsg);
1834          target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1835                                           target_cmsg_start);
1836      }
1837      unlock_user(target_cmsg, target_cmsg_addr, 0);
1838   the_end:
1839      msgh->msg_controllen = space;
1840      return 0;
1841  }
1842  
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1843  static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1844                                             struct msghdr *msgh)
1845  {
1846      struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1847      abi_long msg_controllen;
1848      abi_ulong target_cmsg_addr;
1849      struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1850      socklen_t space = 0;
1851  
1852      msg_controllen = tswapal(target_msgh->msg_controllen);
1853      if (msg_controllen < sizeof (struct target_cmsghdr))
1854          goto the_end;
1855      target_cmsg_addr = tswapal(target_msgh->msg_control);
1856      target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1857      target_cmsg_start = target_cmsg;
1858      if (!target_cmsg)
1859          return -TARGET_EFAULT;
1860  
1861      while (cmsg && target_cmsg) {
1862          void *data = CMSG_DATA(cmsg);
1863          void *target_data = TARGET_CMSG_DATA(target_cmsg);
1864  
1865          int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1866          int tgt_len, tgt_space;
1867  
1868          /* We never copy a half-header but may copy half-data;
1869           * this is Linux's behaviour in put_cmsg(). Note that
1870           * truncation here is a guest problem (which we report
1871           * to the guest via the CTRUNC bit), unlike truncation
1872           * in target_to_host_cmsg, which is a QEMU bug.
1873           */
1874          if (msg_controllen < sizeof(struct target_cmsghdr)) {
1875              target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1876              break;
1877          }
1878  
1879          if (cmsg->cmsg_level == SOL_SOCKET) {
1880              target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1881          } else {
1882              target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1883          }
1884          target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1885  
1886          /* Payload types which need a different size of payload on
1887           * the target must adjust tgt_len here.
1888           */
1889          tgt_len = len;
1890          switch (cmsg->cmsg_level) {
1891          case SOL_SOCKET:
1892              switch (cmsg->cmsg_type) {
1893              case SO_TIMESTAMP:
1894                  tgt_len = sizeof(struct target_timeval);
1895                  break;
1896              default:
1897                  break;
1898              }
1899              break;
1900          default:
1901              break;
1902          }
1903  
1904          if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1905              target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1906              tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1907          }
1908  
1909          /* We must now copy-and-convert len bytes of payload
1910           * into tgt_len bytes of destination space. Bear in mind
1911           * that in both source and destination we may be dealing
1912           * with a truncated value!
1913           */
1914          switch (cmsg->cmsg_level) {
1915          case SOL_SOCKET:
1916              switch (cmsg->cmsg_type) {
1917              case SCM_RIGHTS:
1918              {
1919                  int *fd = (int *)data;
1920                  int *target_fd = (int *)target_data;
1921                  int i, numfds = tgt_len / sizeof(int);
1922  
1923                  for (i = 0; i < numfds; i++) {
1924                      __put_user(fd[i], target_fd + i);
1925                  }
1926                  break;
1927              }
1928              case SO_TIMESTAMP:
1929              {
1930                  struct timeval *tv = (struct timeval *)data;
1931                  struct target_timeval *target_tv =
1932                      (struct target_timeval *)target_data;
1933  
1934                  if (len != sizeof(struct timeval) ||
1935                      tgt_len != sizeof(struct target_timeval)) {
1936                      goto unimplemented;
1937                  }
1938  
1939                  /* copy struct timeval to target */
1940                  __put_user(tv->tv_sec, &target_tv->tv_sec);
1941                  __put_user(tv->tv_usec, &target_tv->tv_usec);
1942                  break;
1943              }
1944              case SCM_CREDENTIALS:
1945              {
1946                  struct ucred *cred = (struct ucred *)data;
1947                  struct target_ucred *target_cred =
1948                      (struct target_ucred *)target_data;
1949  
1950                  __put_user(cred->pid, &target_cred->pid);
1951                  __put_user(cred->uid, &target_cred->uid);
1952                  __put_user(cred->gid, &target_cred->gid);
1953                  break;
1954              }
1955              default:
1956                  goto unimplemented;
1957              }
1958              break;
1959  
1960          case SOL_IP:
1961              switch (cmsg->cmsg_type) {
1962              case IP_TTL:
1963              {
1964                  uint32_t *v = (uint32_t *)data;
1965                  uint32_t *t_int = (uint32_t *)target_data;
1966  
1967                  if (len != sizeof(uint32_t) ||
1968                      tgt_len != sizeof(uint32_t)) {
1969                      goto unimplemented;
1970                  }
1971                  __put_user(*v, t_int);
1972                  break;
1973              }
1974              case IP_RECVERR:
1975              {
1976                  struct errhdr_t {
1977                     struct sock_extended_err ee;
1978                     struct sockaddr_in offender;
1979                  };
1980                  struct errhdr_t *errh = (struct errhdr_t *)data;
1981                  struct errhdr_t *target_errh =
1982                      (struct errhdr_t *)target_data;
1983  
1984                  if (len != sizeof(struct errhdr_t) ||
1985                      tgt_len != sizeof(struct errhdr_t)) {
1986                      goto unimplemented;
1987                  }
1988                  __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1989                  __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1990                  __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1991                  __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1992                  __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1993                  __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1994                  __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1995                  host_to_target_sockaddr((unsigned long) &target_errh->offender,
1996                      (void *) &errh->offender, sizeof(errh->offender));
1997                  break;
1998              }
1999              default:
2000                  goto unimplemented;
2001              }
2002              break;
2003  
2004          case SOL_IPV6:
2005              switch (cmsg->cmsg_type) {
2006              case IPV6_HOPLIMIT:
2007              {
2008                  uint32_t *v = (uint32_t *)data;
2009                  uint32_t *t_int = (uint32_t *)target_data;
2010  
2011                  if (len != sizeof(uint32_t) ||
2012                      tgt_len != sizeof(uint32_t)) {
2013                      goto unimplemented;
2014                  }
2015                  __put_user(*v, t_int);
2016                  break;
2017              }
2018              case IPV6_RECVERR:
2019              {
2020                  struct errhdr6_t {
2021                     struct sock_extended_err ee;
2022                     struct sockaddr_in6 offender;
2023                  };
2024                  struct errhdr6_t *errh = (struct errhdr6_t *)data;
2025                  struct errhdr6_t *target_errh =
2026                      (struct errhdr6_t *)target_data;
2027  
2028                  if (len != sizeof(struct errhdr6_t) ||
2029                      tgt_len != sizeof(struct errhdr6_t)) {
2030                      goto unimplemented;
2031                  }
2032                  __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2033                  __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2034                  __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2035                  __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2036                  __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2037                  __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2038                  __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2039                  host_to_target_sockaddr((unsigned long) &target_errh->offender,
2040                      (void *) &errh->offender, sizeof(errh->offender));
2041                  break;
2042              }
2043              default:
2044                  goto unimplemented;
2045              }
2046              break;
2047  
2048          default:
2049          unimplemented:
2050              qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2051                            cmsg->cmsg_level, cmsg->cmsg_type);
2052              memcpy(target_data, data, MIN(len, tgt_len));
2053              if (tgt_len > len) {
2054                  memset(target_data + len, 0, tgt_len - len);
2055              }
2056          }
2057  
2058          target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2059          tgt_space = TARGET_CMSG_SPACE(tgt_len);
2060          if (msg_controllen < tgt_space) {
2061              tgt_space = msg_controllen;
2062          }
2063          msg_controllen -= tgt_space;
2064          space += tgt_space;
2065          cmsg = CMSG_NXTHDR(msgh, cmsg);
2066          target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2067                                           target_cmsg_start);
2068      }
2069      unlock_user(target_cmsg, target_cmsg_addr, space);
2070   the_end:
2071      target_msgh->msg_controllen = tswapal(space);
2072      return 0;
2073  }
2074  
2075  /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2076  static abi_long do_setsockopt(int sockfd, int level, int optname,
2077                                abi_ulong optval_addr, socklen_t optlen)
2078  {
2079      abi_long ret;
2080      int val;
2081  
2082      switch(level) {
2083      case SOL_TCP:
2084      case SOL_UDP:
2085          /* TCP and UDP options all take an 'int' value.  */
2086          if (optlen < sizeof(uint32_t))
2087              return -TARGET_EINVAL;
2088  
2089          if (get_user_u32(val, optval_addr))
2090              return -TARGET_EFAULT;
2091          ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2092          break;
2093      case SOL_IP:
2094          switch(optname) {
2095          case IP_TOS:
2096          case IP_TTL:
2097          case IP_HDRINCL:
2098          case IP_ROUTER_ALERT:
2099          case IP_RECVOPTS:
2100          case IP_RETOPTS:
2101          case IP_PKTINFO:
2102          case IP_MTU_DISCOVER:
2103          case IP_RECVERR:
2104          case IP_RECVTTL:
2105          case IP_RECVTOS:
2106  #ifdef IP_FREEBIND
2107          case IP_FREEBIND:
2108  #endif
2109          case IP_MULTICAST_TTL:
2110          case IP_MULTICAST_LOOP:
2111              val = 0;
2112              if (optlen >= sizeof(uint32_t)) {
2113                  if (get_user_u32(val, optval_addr))
2114                      return -TARGET_EFAULT;
2115              } else if (optlen >= 1) {
2116                  if (get_user_u8(val, optval_addr))
2117                      return -TARGET_EFAULT;
2118              }
2119              ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2120              break;
2121          case IP_ADD_MEMBERSHIP:
2122          case IP_DROP_MEMBERSHIP:
2123          {
2124              struct ip_mreqn ip_mreq;
2125              struct target_ip_mreqn *target_smreqn;
2126  
2127              QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2128                                sizeof(struct target_ip_mreq));
2129  
2130              if (optlen < sizeof (struct target_ip_mreq) ||
2131                  optlen > sizeof (struct target_ip_mreqn)) {
2132                  return -TARGET_EINVAL;
2133              }
2134  
2135              target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2136              if (!target_smreqn) {
2137                  return -TARGET_EFAULT;
2138              }
2139              ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2140              ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2141              if (optlen == sizeof(struct target_ip_mreqn)) {
2142                  ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2143                  optlen = sizeof(struct ip_mreqn);
2144              }
2145              unlock_user(target_smreqn, optval_addr, 0);
2146  
2147              ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2148              break;
2149          }
2150          case IP_BLOCK_SOURCE:
2151          case IP_UNBLOCK_SOURCE:
2152          case IP_ADD_SOURCE_MEMBERSHIP:
2153          case IP_DROP_SOURCE_MEMBERSHIP:
2154          {
2155              struct ip_mreq_source *ip_mreq_source;
2156  
2157              if (optlen != sizeof (struct target_ip_mreq_source))
2158                  return -TARGET_EINVAL;
2159  
2160              ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2161              if (!ip_mreq_source) {
2162                  return -TARGET_EFAULT;
2163              }
2164              ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2165              unlock_user (ip_mreq_source, optval_addr, 0);
2166              break;
2167          }
2168          default:
2169              goto unimplemented;
2170          }
2171          break;
2172      case SOL_IPV6:
2173          switch (optname) {
2174          case IPV6_MTU_DISCOVER:
2175          case IPV6_MTU:
2176          case IPV6_V6ONLY:
2177          case IPV6_RECVPKTINFO:
2178          case IPV6_UNICAST_HOPS:
2179          case IPV6_MULTICAST_HOPS:
2180          case IPV6_MULTICAST_LOOP:
2181          case IPV6_RECVERR:
2182          case IPV6_RECVHOPLIMIT:
2183          case IPV6_2292HOPLIMIT:
2184          case IPV6_CHECKSUM:
2185          case IPV6_ADDRFORM:
2186          case IPV6_2292PKTINFO:
2187          case IPV6_RECVTCLASS:
2188          case IPV6_RECVRTHDR:
2189          case IPV6_2292RTHDR:
2190          case IPV6_RECVHOPOPTS:
2191          case IPV6_2292HOPOPTS:
2192          case IPV6_RECVDSTOPTS:
2193          case IPV6_2292DSTOPTS:
2194          case IPV6_TCLASS:
2195          case IPV6_ADDR_PREFERENCES:
2196  #ifdef IPV6_RECVPATHMTU
2197          case IPV6_RECVPATHMTU:
2198  #endif
2199  #ifdef IPV6_TRANSPARENT
2200          case IPV6_TRANSPARENT:
2201  #endif
2202  #ifdef IPV6_FREEBIND
2203          case IPV6_FREEBIND:
2204  #endif
2205  #ifdef IPV6_RECVORIGDSTADDR
2206          case IPV6_RECVORIGDSTADDR:
2207  #endif
2208              val = 0;
2209              if (optlen < sizeof(uint32_t)) {
2210                  return -TARGET_EINVAL;
2211              }
2212              if (get_user_u32(val, optval_addr)) {
2213                  return -TARGET_EFAULT;
2214              }
2215              ret = get_errno(setsockopt(sockfd, level, optname,
2216                                         &val, sizeof(val)));
2217              break;
2218          case IPV6_PKTINFO:
2219          {
2220              struct in6_pktinfo pki;
2221  
2222              if (optlen < sizeof(pki)) {
2223                  return -TARGET_EINVAL;
2224              }
2225  
2226              if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2227                  return -TARGET_EFAULT;
2228              }
2229  
2230              pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2231  
2232              ret = get_errno(setsockopt(sockfd, level, optname,
2233                                         &pki, sizeof(pki)));
2234              break;
2235          }
2236          case IPV6_ADD_MEMBERSHIP:
2237          case IPV6_DROP_MEMBERSHIP:
2238          {
2239              struct ipv6_mreq ipv6mreq;
2240  
2241              if (optlen < sizeof(ipv6mreq)) {
2242                  return -TARGET_EINVAL;
2243              }
2244  
2245              if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2246                  return -TARGET_EFAULT;
2247              }
2248  
2249              ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2250  
2251              ret = get_errno(setsockopt(sockfd, level, optname,
2252                                         &ipv6mreq, sizeof(ipv6mreq)));
2253              break;
2254          }
2255          default:
2256              goto unimplemented;
2257          }
2258          break;
2259      case SOL_ICMPV6:
2260          switch (optname) {
2261          case ICMPV6_FILTER:
2262          {
2263              struct icmp6_filter icmp6f;
2264  
2265              if (optlen > sizeof(icmp6f)) {
2266                  optlen = sizeof(icmp6f);
2267              }
2268  
2269              if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2270                  return -TARGET_EFAULT;
2271              }
2272  
2273              for (val = 0; val < 8; val++) {
2274                  icmp6f.data[val] = tswap32(icmp6f.data[val]);
2275              }
2276  
2277              ret = get_errno(setsockopt(sockfd, level, optname,
2278                                         &icmp6f, optlen));
2279              break;
2280          }
2281          default:
2282              goto unimplemented;
2283          }
2284          break;
2285      case SOL_RAW:
2286          switch (optname) {
2287          case ICMP_FILTER:
2288          case IPV6_CHECKSUM:
2289              /* those take an u32 value */
2290              if (optlen < sizeof(uint32_t)) {
2291                  return -TARGET_EINVAL;
2292              }
2293  
2294              if (get_user_u32(val, optval_addr)) {
2295                  return -TARGET_EFAULT;
2296              }
2297              ret = get_errno(setsockopt(sockfd, level, optname,
2298                                         &val, sizeof(val)));
2299              break;
2300  
2301          default:
2302              goto unimplemented;
2303          }
2304          break;
2305  #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2306      case SOL_ALG:
2307          switch (optname) {
2308          case ALG_SET_KEY:
2309          {
2310              char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2311              if (!alg_key) {
2312                  return -TARGET_EFAULT;
2313              }
2314              ret = get_errno(setsockopt(sockfd, level, optname,
2315                                         alg_key, optlen));
2316              unlock_user(alg_key, optval_addr, optlen);
2317              break;
2318          }
2319          case ALG_SET_AEAD_AUTHSIZE:
2320          {
2321              ret = get_errno(setsockopt(sockfd, level, optname,
2322                                         NULL, optlen));
2323              break;
2324          }
2325          default:
2326              goto unimplemented;
2327          }
2328          break;
2329  #endif
2330      case TARGET_SOL_SOCKET:
2331          switch (optname) {
2332          case TARGET_SO_RCVTIMEO:
2333          case TARGET_SO_SNDTIMEO:
2334          {
2335                  struct timeval tv;
2336  
2337                  if (optlen != sizeof(struct target_timeval)) {
2338                      return -TARGET_EINVAL;
2339                  }
2340  
2341                  if (copy_from_user_timeval(&tv, optval_addr)) {
2342                      return -TARGET_EFAULT;
2343                  }
2344  
2345                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2346                                  optname == TARGET_SO_RCVTIMEO ?
2347                                      SO_RCVTIMEO : SO_SNDTIMEO,
2348                                  &tv, sizeof(tv)));
2349                  return ret;
2350          }
2351          case TARGET_SO_ATTACH_FILTER:
2352          {
2353                  struct target_sock_fprog *tfprog;
2354                  struct target_sock_filter *tfilter;
2355                  struct sock_fprog fprog;
2356                  struct sock_filter *filter;
2357                  int i;
2358  
2359                  if (optlen != sizeof(*tfprog)) {
2360                      return -TARGET_EINVAL;
2361                  }
2362                  if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2363                      return -TARGET_EFAULT;
2364                  }
2365                  if (!lock_user_struct(VERIFY_READ, tfilter,
2366                                        tswapal(tfprog->filter), 0)) {
2367                      unlock_user_struct(tfprog, optval_addr, 1);
2368                      return -TARGET_EFAULT;
2369                  }
2370  
2371                  fprog.len = tswap16(tfprog->len);
2372                  filter = g_try_new(struct sock_filter, fprog.len);
2373                  if (filter == NULL) {
2374                      unlock_user_struct(tfilter, tfprog->filter, 1);
2375                      unlock_user_struct(tfprog, optval_addr, 1);
2376                      return -TARGET_ENOMEM;
2377                  }
2378                  for (i = 0; i < fprog.len; i++) {
2379                      filter[i].code = tswap16(tfilter[i].code);
2380                      filter[i].jt = tfilter[i].jt;
2381                      filter[i].jf = tfilter[i].jf;
2382                      filter[i].k = tswap32(tfilter[i].k);
2383                  }
2384                  fprog.filter = filter;
2385  
2386                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2387                                  SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2388                  g_free(filter);
2389  
2390                  unlock_user_struct(tfilter, tfprog->filter, 1);
2391                  unlock_user_struct(tfprog, optval_addr, 1);
2392                  return ret;
2393          }
2394  	case TARGET_SO_BINDTODEVICE:
2395  	{
2396  		char *dev_ifname, *addr_ifname;
2397  
2398  		if (optlen > IFNAMSIZ - 1) {
2399  		    optlen = IFNAMSIZ - 1;
2400  		}
2401  		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2402  		if (!dev_ifname) {
2403  		    return -TARGET_EFAULT;
2404  		}
2405  		optname = SO_BINDTODEVICE;
2406  		addr_ifname = alloca(IFNAMSIZ);
2407  		memcpy(addr_ifname, dev_ifname, optlen);
2408  		addr_ifname[optlen] = 0;
2409  		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2410                                             addr_ifname, optlen));
2411  		unlock_user (dev_ifname, optval_addr, 0);
2412  		return ret;
2413  	}
2414          case TARGET_SO_LINGER:
2415          {
2416                  struct linger lg;
2417                  struct target_linger *tlg;
2418  
2419                  if (optlen != sizeof(struct target_linger)) {
2420                      return -TARGET_EINVAL;
2421                  }
2422                  if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2423                      return -TARGET_EFAULT;
2424                  }
2425                  __get_user(lg.l_onoff, &tlg->l_onoff);
2426                  __get_user(lg.l_linger, &tlg->l_linger);
2427                  ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2428                                  &lg, sizeof(lg)));
2429                  unlock_user_struct(tlg, optval_addr, 0);
2430                  return ret;
2431          }
2432              /* Options with 'int' argument.  */
2433          case TARGET_SO_DEBUG:
2434  		optname = SO_DEBUG;
2435  		break;
2436          case TARGET_SO_REUSEADDR:
2437  		optname = SO_REUSEADDR;
2438  		break;
2439  #ifdef SO_REUSEPORT
2440          case TARGET_SO_REUSEPORT:
2441                  optname = SO_REUSEPORT;
2442                  break;
2443  #endif
2444          case TARGET_SO_TYPE:
2445  		optname = SO_TYPE;
2446  		break;
2447          case TARGET_SO_ERROR:
2448  		optname = SO_ERROR;
2449  		break;
2450          case TARGET_SO_DONTROUTE:
2451  		optname = SO_DONTROUTE;
2452  		break;
2453          case TARGET_SO_BROADCAST:
2454  		optname = SO_BROADCAST;
2455  		break;
2456          case TARGET_SO_SNDBUF:
2457  		optname = SO_SNDBUF;
2458  		break;
2459          case TARGET_SO_SNDBUFFORCE:
2460                  optname = SO_SNDBUFFORCE;
2461                  break;
2462          case TARGET_SO_RCVBUF:
2463  		optname = SO_RCVBUF;
2464  		break;
2465          case TARGET_SO_RCVBUFFORCE:
2466                  optname = SO_RCVBUFFORCE;
2467                  break;
2468          case TARGET_SO_KEEPALIVE:
2469  		optname = SO_KEEPALIVE;
2470  		break;
2471          case TARGET_SO_OOBINLINE:
2472  		optname = SO_OOBINLINE;
2473  		break;
2474          case TARGET_SO_NO_CHECK:
2475  		optname = SO_NO_CHECK;
2476  		break;
2477          case TARGET_SO_PRIORITY:
2478  		optname = SO_PRIORITY;
2479  		break;
2480  #ifdef SO_BSDCOMPAT
2481          case TARGET_SO_BSDCOMPAT:
2482  		optname = SO_BSDCOMPAT;
2483  		break;
2484  #endif
2485          case TARGET_SO_PASSCRED:
2486  		optname = SO_PASSCRED;
2487  		break;
2488          case TARGET_SO_PASSSEC:
2489                  optname = SO_PASSSEC;
2490                  break;
2491          case TARGET_SO_TIMESTAMP:
2492  		optname = SO_TIMESTAMP;
2493  		break;
2494          case TARGET_SO_RCVLOWAT:
2495  		optname = SO_RCVLOWAT;
2496  		break;
2497          default:
2498              goto unimplemented;
2499          }
2500  	if (optlen < sizeof(uint32_t))
2501              return -TARGET_EINVAL;
2502  
2503  	if (get_user_u32(val, optval_addr))
2504              return -TARGET_EFAULT;
2505  	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2506          break;
2507  #ifdef SOL_NETLINK
2508      case SOL_NETLINK:
2509          switch (optname) {
2510          case NETLINK_PKTINFO:
2511          case NETLINK_ADD_MEMBERSHIP:
2512          case NETLINK_DROP_MEMBERSHIP:
2513          case NETLINK_BROADCAST_ERROR:
2514          case NETLINK_NO_ENOBUFS:
2515  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2516          case NETLINK_LISTEN_ALL_NSID:
2517          case NETLINK_CAP_ACK:
2518  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2519  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2520          case NETLINK_EXT_ACK:
2521  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2522  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2523          case NETLINK_GET_STRICT_CHK:
2524  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2525              break;
2526          default:
2527              goto unimplemented;
2528          }
2529          val = 0;
2530          if (optlen < sizeof(uint32_t)) {
2531              return -TARGET_EINVAL;
2532          }
2533          if (get_user_u32(val, optval_addr)) {
2534              return -TARGET_EFAULT;
2535          }
2536          ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2537                                     sizeof(val)));
2538          break;
2539  #endif /* SOL_NETLINK */
2540      default:
2541      unimplemented:
2542          qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2543                        level, optname);
2544          ret = -TARGET_ENOPROTOOPT;
2545      }
2546      return ret;
2547  }
2548  
2549  /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2550  static abi_long do_getsockopt(int sockfd, int level, int optname,
2551                                abi_ulong optval_addr, abi_ulong optlen)
2552  {
2553      abi_long ret;
2554      int len, val;
2555      socklen_t lv;
2556  
2557      switch(level) {
2558      case TARGET_SOL_SOCKET:
2559          level = SOL_SOCKET;
2560          switch (optname) {
2561          /* These don't just return a single integer */
2562          case TARGET_SO_PEERNAME:
2563              goto unimplemented;
2564          case TARGET_SO_RCVTIMEO: {
2565              struct timeval tv;
2566              socklen_t tvlen;
2567  
2568              optname = SO_RCVTIMEO;
2569  
2570  get_timeout:
2571              if (get_user_u32(len, optlen)) {
2572                  return -TARGET_EFAULT;
2573              }
2574              if (len < 0) {
2575                  return -TARGET_EINVAL;
2576              }
2577  
2578              tvlen = sizeof(tv);
2579              ret = get_errno(getsockopt(sockfd, level, optname,
2580                                         &tv, &tvlen));
2581              if (ret < 0) {
2582                  return ret;
2583              }
2584              if (len > sizeof(struct target_timeval)) {
2585                  len = sizeof(struct target_timeval);
2586              }
2587              if (copy_to_user_timeval(optval_addr, &tv)) {
2588                  return -TARGET_EFAULT;
2589              }
2590              if (put_user_u32(len, optlen)) {
2591                  return -TARGET_EFAULT;
2592              }
2593              break;
2594          }
2595          case TARGET_SO_SNDTIMEO:
2596              optname = SO_SNDTIMEO;
2597              goto get_timeout;
2598          case TARGET_SO_PEERCRED: {
2599              struct ucred cr;
2600              socklen_t crlen;
2601              struct target_ucred *tcr;
2602  
2603              if (get_user_u32(len, optlen)) {
2604                  return -TARGET_EFAULT;
2605              }
2606              if (len < 0) {
2607                  return -TARGET_EINVAL;
2608              }
2609  
2610              crlen = sizeof(cr);
2611              ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2612                                         &cr, &crlen));
2613              if (ret < 0) {
2614                  return ret;
2615              }
2616              if (len > crlen) {
2617                  len = crlen;
2618              }
2619              if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2620                  return -TARGET_EFAULT;
2621              }
2622              __put_user(cr.pid, &tcr->pid);
2623              __put_user(cr.uid, &tcr->uid);
2624              __put_user(cr.gid, &tcr->gid);
2625              unlock_user_struct(tcr, optval_addr, 1);
2626              if (put_user_u32(len, optlen)) {
2627                  return -TARGET_EFAULT;
2628              }
2629              break;
2630          }
2631          case TARGET_SO_PEERSEC: {
2632              char *name;
2633  
2634              if (get_user_u32(len, optlen)) {
2635                  return -TARGET_EFAULT;
2636              }
2637              if (len < 0) {
2638                  return -TARGET_EINVAL;
2639              }
2640              name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2641              if (!name) {
2642                  return -TARGET_EFAULT;
2643              }
2644              lv = len;
2645              ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2646                                         name, &lv));
2647              if (put_user_u32(lv, optlen)) {
2648                  ret = -TARGET_EFAULT;
2649              }
2650              unlock_user(name, optval_addr, lv);
2651              break;
2652          }
2653          case TARGET_SO_LINGER:
2654          {
2655              struct linger lg;
2656              socklen_t lglen;
2657              struct target_linger *tlg;
2658  
2659              if (get_user_u32(len, optlen)) {
2660                  return -TARGET_EFAULT;
2661              }
2662              if (len < 0) {
2663                  return -TARGET_EINVAL;
2664              }
2665  
2666              lglen = sizeof(lg);
2667              ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2668                                         &lg, &lglen));
2669              if (ret < 0) {
2670                  return ret;
2671              }
2672              if (len > lglen) {
2673                  len = lglen;
2674              }
2675              if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2676                  return -TARGET_EFAULT;
2677              }
2678              __put_user(lg.l_onoff, &tlg->l_onoff);
2679              __put_user(lg.l_linger, &tlg->l_linger);
2680              unlock_user_struct(tlg, optval_addr, 1);
2681              if (put_user_u32(len, optlen)) {
2682                  return -TARGET_EFAULT;
2683              }
2684              break;
2685          }
2686          /* Options with 'int' argument.  */
2687          case TARGET_SO_DEBUG:
2688              optname = SO_DEBUG;
2689              goto int_case;
2690          case TARGET_SO_REUSEADDR:
2691              optname = SO_REUSEADDR;
2692              goto int_case;
2693  #ifdef SO_REUSEPORT
2694          case TARGET_SO_REUSEPORT:
2695              optname = SO_REUSEPORT;
2696              goto int_case;
2697  #endif
2698          case TARGET_SO_TYPE:
2699              optname = SO_TYPE;
2700              goto int_case;
2701          case TARGET_SO_ERROR:
2702              optname = SO_ERROR;
2703              goto int_case;
2704          case TARGET_SO_DONTROUTE:
2705              optname = SO_DONTROUTE;
2706              goto int_case;
2707          case TARGET_SO_BROADCAST:
2708              optname = SO_BROADCAST;
2709              goto int_case;
2710          case TARGET_SO_SNDBUF:
2711              optname = SO_SNDBUF;
2712              goto int_case;
2713          case TARGET_SO_RCVBUF:
2714              optname = SO_RCVBUF;
2715              goto int_case;
2716          case TARGET_SO_KEEPALIVE:
2717              optname = SO_KEEPALIVE;
2718              goto int_case;
2719          case TARGET_SO_OOBINLINE:
2720              optname = SO_OOBINLINE;
2721              goto int_case;
2722          case TARGET_SO_NO_CHECK:
2723              optname = SO_NO_CHECK;
2724              goto int_case;
2725          case TARGET_SO_PRIORITY:
2726              optname = SO_PRIORITY;
2727              goto int_case;
2728  #ifdef SO_BSDCOMPAT
2729          case TARGET_SO_BSDCOMPAT:
2730              optname = SO_BSDCOMPAT;
2731              goto int_case;
2732  #endif
2733          case TARGET_SO_PASSCRED:
2734              optname = SO_PASSCRED;
2735              goto int_case;
2736          case TARGET_SO_TIMESTAMP:
2737              optname = SO_TIMESTAMP;
2738              goto int_case;
2739          case TARGET_SO_RCVLOWAT:
2740              optname = SO_RCVLOWAT;
2741              goto int_case;
2742          case TARGET_SO_ACCEPTCONN:
2743              optname = SO_ACCEPTCONN;
2744              goto int_case;
2745          case TARGET_SO_PROTOCOL:
2746              optname = SO_PROTOCOL;
2747              goto int_case;
2748          case TARGET_SO_DOMAIN:
2749              optname = SO_DOMAIN;
2750              goto int_case;
2751          default:
2752              goto int_case;
2753          }
2754          break;
2755      case SOL_TCP:
2756      case SOL_UDP:
2757          /* TCP and UDP options all take an 'int' value.  */
2758      int_case:
2759          if (get_user_u32(len, optlen))
2760              return -TARGET_EFAULT;
2761          if (len < 0)
2762              return -TARGET_EINVAL;
2763          lv = sizeof(lv);
2764          ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2765          if (ret < 0)
2766              return ret;
2767          switch (optname) {
2768          case SO_TYPE:
2769              val = host_to_target_sock_type(val);
2770              break;
2771          case SO_ERROR:
2772              val = host_to_target_errno(val);
2773              break;
2774          }
2775          if (len > lv)
2776              len = lv;
2777          if (len == 4) {
2778              if (put_user_u32(val, optval_addr))
2779                  return -TARGET_EFAULT;
2780          } else {
2781              if (put_user_u8(val, optval_addr))
2782                  return -TARGET_EFAULT;
2783          }
2784          if (put_user_u32(len, optlen))
2785              return -TARGET_EFAULT;
2786          break;
2787      case SOL_IP:
2788          switch(optname) {
2789          case IP_TOS:
2790          case IP_TTL:
2791          case IP_HDRINCL:
2792          case IP_ROUTER_ALERT:
2793          case IP_RECVOPTS:
2794          case IP_RETOPTS:
2795          case IP_PKTINFO:
2796          case IP_MTU_DISCOVER:
2797          case IP_RECVERR:
2798          case IP_RECVTOS:
2799  #ifdef IP_FREEBIND
2800          case IP_FREEBIND:
2801  #endif
2802          case IP_MULTICAST_TTL:
2803          case IP_MULTICAST_LOOP:
2804              if (get_user_u32(len, optlen))
2805                  return -TARGET_EFAULT;
2806              if (len < 0)
2807                  return -TARGET_EINVAL;
2808              lv = sizeof(lv);
2809              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2810              if (ret < 0)
2811                  return ret;
2812              if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2813                  len = 1;
2814                  if (put_user_u32(len, optlen)
2815                      || put_user_u8(val, optval_addr))
2816                      return -TARGET_EFAULT;
2817              } else {
2818                  if (len > sizeof(int))
2819                      len = sizeof(int);
2820                  if (put_user_u32(len, optlen)
2821                      || put_user_u32(val, optval_addr))
2822                      return -TARGET_EFAULT;
2823              }
2824              break;
2825          default:
2826              ret = -TARGET_ENOPROTOOPT;
2827              break;
2828          }
2829          break;
2830      case SOL_IPV6:
2831          switch (optname) {
2832          case IPV6_MTU_DISCOVER:
2833          case IPV6_MTU:
2834          case IPV6_V6ONLY:
2835          case IPV6_RECVPKTINFO:
2836          case IPV6_UNICAST_HOPS:
2837          case IPV6_MULTICAST_HOPS:
2838          case IPV6_MULTICAST_LOOP:
2839          case IPV6_RECVERR:
2840          case IPV6_RECVHOPLIMIT:
2841          case IPV6_2292HOPLIMIT:
2842          case IPV6_CHECKSUM:
2843          case IPV6_ADDRFORM:
2844          case IPV6_2292PKTINFO:
2845          case IPV6_RECVTCLASS:
2846          case IPV6_RECVRTHDR:
2847          case IPV6_2292RTHDR:
2848          case IPV6_RECVHOPOPTS:
2849          case IPV6_2292HOPOPTS:
2850          case IPV6_RECVDSTOPTS:
2851          case IPV6_2292DSTOPTS:
2852          case IPV6_TCLASS:
2853          case IPV6_ADDR_PREFERENCES:
2854  #ifdef IPV6_RECVPATHMTU
2855          case IPV6_RECVPATHMTU:
2856  #endif
2857  #ifdef IPV6_TRANSPARENT
2858          case IPV6_TRANSPARENT:
2859  #endif
2860  #ifdef IPV6_FREEBIND
2861          case IPV6_FREEBIND:
2862  #endif
2863  #ifdef IPV6_RECVORIGDSTADDR
2864          case IPV6_RECVORIGDSTADDR:
2865  #endif
2866              if (get_user_u32(len, optlen))
2867                  return -TARGET_EFAULT;
2868              if (len < 0)
2869                  return -TARGET_EINVAL;
2870              lv = sizeof(lv);
2871              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2872              if (ret < 0)
2873                  return ret;
2874              if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2875                  len = 1;
2876                  if (put_user_u32(len, optlen)
2877                      || put_user_u8(val, optval_addr))
2878                      return -TARGET_EFAULT;
2879              } else {
2880                  if (len > sizeof(int))
2881                      len = sizeof(int);
2882                  if (put_user_u32(len, optlen)
2883                      || put_user_u32(val, optval_addr))
2884                      return -TARGET_EFAULT;
2885              }
2886              break;
2887          default:
2888              ret = -TARGET_ENOPROTOOPT;
2889              break;
2890          }
2891          break;
2892  #ifdef SOL_NETLINK
2893      case SOL_NETLINK:
2894          switch (optname) {
2895          case NETLINK_PKTINFO:
2896          case NETLINK_BROADCAST_ERROR:
2897          case NETLINK_NO_ENOBUFS:
2898  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899          case NETLINK_LISTEN_ALL_NSID:
2900          case NETLINK_CAP_ACK:
2901  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2902  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2903          case NETLINK_EXT_ACK:
2904  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2905  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2906          case NETLINK_GET_STRICT_CHK:
2907  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2908              if (get_user_u32(len, optlen)) {
2909                  return -TARGET_EFAULT;
2910              }
2911              if (len != sizeof(val)) {
2912                  return -TARGET_EINVAL;
2913              }
2914              lv = len;
2915              ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2916              if (ret < 0) {
2917                  return ret;
2918              }
2919              if (put_user_u32(lv, optlen)
2920                  || put_user_u32(val, optval_addr)) {
2921                  return -TARGET_EFAULT;
2922              }
2923              break;
2924  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2925          case NETLINK_LIST_MEMBERSHIPS:
2926          {
2927              uint32_t *results;
2928              int i;
2929              if (get_user_u32(len, optlen)) {
2930                  return -TARGET_EFAULT;
2931              }
2932              if (len < 0) {
2933                  return -TARGET_EINVAL;
2934              }
2935              results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2936              if (!results && len > 0) {
2937                  return -TARGET_EFAULT;
2938              }
2939              lv = len;
2940              ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2941              if (ret < 0) {
2942                  unlock_user(results, optval_addr, 0);
2943                  return ret;
2944              }
2945              /* swap host endianness to target endianness. */
2946              for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2947                  results[i] = tswap32(results[i]);
2948              }
2949              if (put_user_u32(lv, optlen)) {
2950                  return -TARGET_EFAULT;
2951              }
2952              unlock_user(results, optval_addr, 0);
2953              break;
2954          }
2955  #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2956          default:
2957              goto unimplemented;
2958          }
2959          break;
2960  #endif /* SOL_NETLINK */
2961      default:
2962      unimplemented:
2963          qemu_log_mask(LOG_UNIMP,
2964                        "getsockopt level=%d optname=%d not yet supported\n",
2965                        level, optname);
2966          ret = -TARGET_EOPNOTSUPP;
2967          break;
2968      }
2969      return ret;
2970  }
2971  
2972  /* Convert target low/high pair representing file offset into the host
2973   * low/high pair. This function doesn't handle offsets bigger than 64 bits
2974   * as the kernel doesn't handle them either.
2975   */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)2976  static void target_to_host_low_high(abi_ulong tlow,
2977                                      abi_ulong thigh,
2978                                      unsigned long *hlow,
2979                                      unsigned long *hhigh)
2980  {
2981      uint64_t off = tlow |
2982          ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2983          TARGET_LONG_BITS / 2;
2984  
2985      *hlow = off;
2986      *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2987  }
2988  
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)2989  static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2990                                  abi_ulong count, int copy)
2991  {
2992      struct target_iovec *target_vec;
2993      struct iovec *vec;
2994      abi_ulong total_len, max_len;
2995      int i;
2996      int err = 0;
2997      bool bad_address = false;
2998  
2999      if (count == 0) {
3000          errno = 0;
3001          return NULL;
3002      }
3003      if (count > IOV_MAX) {
3004          errno = EINVAL;
3005          return NULL;
3006      }
3007  
3008      vec = g_try_new0(struct iovec, count);
3009      if (vec == NULL) {
3010          errno = ENOMEM;
3011          return NULL;
3012      }
3013  
3014      target_vec = lock_user(VERIFY_READ, target_addr,
3015                             count * sizeof(struct target_iovec), 1);
3016      if (target_vec == NULL) {
3017          err = EFAULT;
3018          goto fail2;
3019      }
3020  
3021      /* ??? If host page size > target page size, this will result in a
3022         value larger than what we can actually support.  */
3023      max_len = 0x7fffffff & TARGET_PAGE_MASK;
3024      total_len = 0;
3025  
3026      for (i = 0; i < count; i++) {
3027          abi_ulong base = tswapal(target_vec[i].iov_base);
3028          abi_long len = tswapal(target_vec[i].iov_len);
3029  
3030          if (len < 0) {
3031              err = EINVAL;
3032              goto fail;
3033          } else if (len == 0) {
3034              /* Zero length pointer is ignored.  */
3035              vec[i].iov_base = 0;
3036          } else {
3037              vec[i].iov_base = lock_user(type, base, len, copy);
3038              /* If the first buffer pointer is bad, this is a fault.  But
3039               * subsequent bad buffers will result in a partial write; this
3040               * is realized by filling the vector with null pointers and
3041               * zero lengths. */
3042              if (!vec[i].iov_base) {
3043                  if (i == 0) {
3044                      err = EFAULT;
3045                      goto fail;
3046                  } else {
3047                      bad_address = true;
3048                  }
3049              }
3050              if (bad_address) {
3051                  len = 0;
3052              }
3053              if (len > max_len - total_len) {
3054                  len = max_len - total_len;
3055              }
3056          }
3057          vec[i].iov_len = len;
3058          total_len += len;
3059      }
3060  
3061      unlock_user(target_vec, target_addr, 0);
3062      return vec;
3063  
3064   fail:
3065      while (--i >= 0) {
3066          if (tswapal(target_vec[i].iov_len) > 0) {
3067              unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3068          }
3069      }
3070      unlock_user(target_vec, target_addr, 0);
3071   fail2:
3072      g_free(vec);
3073      errno = err;
3074      return NULL;
3075  }
3076  
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3077  static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3078                           abi_ulong count, int copy)
3079  {
3080      struct target_iovec *target_vec;
3081      int i;
3082  
3083      target_vec = lock_user(VERIFY_READ, target_addr,
3084                             count * sizeof(struct target_iovec), 1);
3085      if (target_vec) {
3086          for (i = 0; i < count; i++) {
3087              abi_ulong base = tswapal(target_vec[i].iov_base);
3088              abi_long len = tswapal(target_vec[i].iov_len);
3089              if (len < 0) {
3090                  break;
3091              }
3092              unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3093          }
3094          unlock_user(target_vec, target_addr, 0);
3095      }
3096  
3097      g_free(vec);
3098  }
3099  
target_to_host_sock_type(int * type)3100  static inline int target_to_host_sock_type(int *type)
3101  {
3102      int host_type = 0;
3103      int target_type = *type;
3104  
3105      switch (target_type & TARGET_SOCK_TYPE_MASK) {
3106      case TARGET_SOCK_DGRAM:
3107          host_type = SOCK_DGRAM;
3108          break;
3109      case TARGET_SOCK_STREAM:
3110          host_type = SOCK_STREAM;
3111          break;
3112      default:
3113          host_type = target_type & TARGET_SOCK_TYPE_MASK;
3114          break;
3115      }
3116      if (target_type & TARGET_SOCK_CLOEXEC) {
3117  #if defined(SOCK_CLOEXEC)
3118          host_type |= SOCK_CLOEXEC;
3119  #else
3120          return -TARGET_EINVAL;
3121  #endif
3122      }
3123      if (target_type & TARGET_SOCK_NONBLOCK) {
3124  #if defined(SOCK_NONBLOCK)
3125          host_type |= SOCK_NONBLOCK;
3126  #elif !defined(O_NONBLOCK)
3127          return -TARGET_EINVAL;
3128  #endif
3129      }
3130      *type = host_type;
3131      return 0;
3132  }
3133  
3134  /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3135  static int sock_flags_fixup(int fd, int target_type)
3136  {
3137  #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3138      if (target_type & TARGET_SOCK_NONBLOCK) {
3139          int flags = fcntl(fd, F_GETFL);
3140          if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3141              close(fd);
3142              return -TARGET_EINVAL;
3143          }
3144      }
3145  #endif
3146      return fd;
3147  }
3148  
3149  /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3150  static abi_long do_socket(int domain, int type, int protocol)
3151  {
3152      int target_type = type;
3153      int ret;
3154  
3155      ret = target_to_host_sock_type(&type);
3156      if (ret) {
3157          return ret;
3158      }
3159  
3160      if (domain == PF_NETLINK && !(
3161  #ifdef CONFIG_RTNETLINK
3162           protocol == NETLINK_ROUTE ||
3163  #endif
3164           protocol == NETLINK_KOBJECT_UEVENT ||
3165           protocol == NETLINK_AUDIT)) {
3166          return -TARGET_EPROTONOSUPPORT;
3167      }
3168  
3169      if (domain == AF_PACKET ||
3170          (domain == AF_INET && type == SOCK_PACKET)) {
3171          protocol = tswap16(protocol);
3172      }
3173  
3174      ret = get_errno(socket(domain, type, protocol));
3175      if (ret >= 0) {
3176          ret = sock_flags_fixup(ret, target_type);
3177          if (type == SOCK_PACKET) {
3178              /* Manage an obsolete case :
3179               * if socket type is SOCK_PACKET, bind by name
3180               */
3181              fd_trans_register(ret, &target_packet_trans);
3182          } else if (domain == PF_NETLINK) {
3183              switch (protocol) {
3184  #ifdef CONFIG_RTNETLINK
3185              case NETLINK_ROUTE:
3186                  fd_trans_register(ret, &target_netlink_route_trans);
3187                  break;
3188  #endif
3189              case NETLINK_KOBJECT_UEVENT:
3190                  /* nothing to do: messages are strings */
3191                  break;
3192              case NETLINK_AUDIT:
3193                  fd_trans_register(ret, &target_netlink_audit_trans);
3194                  break;
3195              default:
3196                  g_assert_not_reached();
3197              }
3198          }
3199      }
3200      return ret;
3201  }
3202  
3203  /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3204  static abi_long do_bind(int sockfd, abi_ulong target_addr,
3205                          socklen_t addrlen)
3206  {
3207      void *addr;
3208      abi_long ret;
3209  
3210      if ((int)addrlen < 0) {
3211          return -TARGET_EINVAL;
3212      }
3213  
3214      addr = alloca(addrlen+1);
3215  
3216      ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3217      if (ret)
3218          return ret;
3219  
3220      return get_errno(bind(sockfd, addr, addrlen));
3221  }
3222  
3223  /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3224  static abi_long do_connect(int sockfd, abi_ulong target_addr,
3225                             socklen_t addrlen)
3226  {
3227      void *addr;
3228      abi_long ret;
3229  
3230      if ((int)addrlen < 0) {
3231          return -TARGET_EINVAL;
3232      }
3233  
3234      addr = alloca(addrlen+1);
3235  
3236      ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3237      if (ret)
3238          return ret;
3239  
3240      return get_errno(safe_connect(sockfd, addr, addrlen));
3241  }
3242  
3243  /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3244  static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3245                                        int flags, int send)
3246  {
3247      abi_long ret, len;
3248      struct msghdr msg;
3249      abi_ulong count;
3250      struct iovec *vec;
3251      abi_ulong target_vec;
3252  
3253      if (msgp->msg_name) {
3254          msg.msg_namelen = tswap32(msgp->msg_namelen);
3255          msg.msg_name = alloca(msg.msg_namelen+1);
3256          ret = target_to_host_sockaddr(fd, msg.msg_name,
3257                                        tswapal(msgp->msg_name),
3258                                        msg.msg_namelen);
3259          if (ret == -TARGET_EFAULT) {
3260              /* For connected sockets msg_name and msg_namelen must
3261               * be ignored, so returning EFAULT immediately is wrong.
3262               * Instead, pass a bad msg_name to the host kernel, and
3263               * let it decide whether to return EFAULT or not.
3264               */
3265              msg.msg_name = (void *)-1;
3266          } else if (ret) {
3267              goto out2;
3268          }
3269      } else {
3270          msg.msg_name = NULL;
3271          msg.msg_namelen = 0;
3272      }
3273      msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3274      msg.msg_control = alloca(msg.msg_controllen);
3275      memset(msg.msg_control, 0, msg.msg_controllen);
3276  
3277      msg.msg_flags = tswap32(msgp->msg_flags);
3278  
3279      count = tswapal(msgp->msg_iovlen);
3280      target_vec = tswapal(msgp->msg_iov);
3281  
3282      if (count > IOV_MAX) {
3283          /* sendrcvmsg returns a different errno for this condition than
3284           * readv/writev, so we must catch it here before lock_iovec() does.
3285           */
3286          ret = -TARGET_EMSGSIZE;
3287          goto out2;
3288      }
3289  
3290      vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3291                       target_vec, count, send);
3292      if (vec == NULL) {
3293          ret = -host_to_target_errno(errno);
3294          /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3295          if (!send || ret) {
3296              goto out2;
3297          }
3298      }
3299      msg.msg_iovlen = count;
3300      msg.msg_iov = vec;
3301  
3302      if (send) {
3303          if (fd_trans_target_to_host_data(fd)) {
3304              void *host_msg;
3305  
3306              host_msg = g_malloc(msg.msg_iov->iov_len);
3307              memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3308              ret = fd_trans_target_to_host_data(fd)(host_msg,
3309                                                     msg.msg_iov->iov_len);
3310              if (ret >= 0) {
3311                  msg.msg_iov->iov_base = host_msg;
3312                  ret = get_errno(safe_sendmsg(fd, &msg, flags));
3313              }
3314              g_free(host_msg);
3315          } else {
3316              ret = target_to_host_cmsg(&msg, msgp);
3317              if (ret == 0) {
3318                  ret = get_errno(safe_sendmsg(fd, &msg, flags));
3319              }
3320          }
3321      } else {
3322          ret = get_errno(safe_recvmsg(fd, &msg, flags));
3323          if (!is_error(ret)) {
3324              len = ret;
3325              if (fd_trans_host_to_target_data(fd)) {
3326                  ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3327                                                 MIN(msg.msg_iov->iov_len, len));
3328              }
3329              if (!is_error(ret)) {
3330                  ret = host_to_target_cmsg(msgp, &msg);
3331              }
3332              if (!is_error(ret)) {
3333                  msgp->msg_namelen = tswap32(msg.msg_namelen);
3334                  msgp->msg_flags = tswap32(msg.msg_flags);
3335                  if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3336                      ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3337                                      msg.msg_name, msg.msg_namelen);
3338                      if (ret) {
3339                          goto out;
3340                      }
3341                  }
3342  
3343                  ret = len;
3344              }
3345          }
3346      }
3347  
3348  out:
3349      if (vec) {
3350          unlock_iovec(vec, target_vec, count, !send);
3351      }
3352  out2:
3353      return ret;
3354  }
3355  
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3356  static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3357                                 int flags, int send)
3358  {
3359      abi_long ret;
3360      struct target_msghdr *msgp;
3361  
3362      if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3363                            msgp,
3364                            target_msg,
3365                            send ? 1 : 0)) {
3366          return -TARGET_EFAULT;
3367      }
3368      ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3369      unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3370      return ret;
3371  }
3372  
3373  /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3374   * so it might not have this *mmsg-specific flag either.
3375   */
3376  #ifndef MSG_WAITFORONE
3377  #define MSG_WAITFORONE 0x10000
3378  #endif
3379  
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3380  static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3381                                  unsigned int vlen, unsigned int flags,
3382                                  int send)
3383  {
3384      struct target_mmsghdr *mmsgp;
3385      abi_long ret = 0;
3386      int i;
3387  
3388      if (vlen > UIO_MAXIOV) {
3389          vlen = UIO_MAXIOV;
3390      }
3391  
3392      mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3393      if (!mmsgp) {
3394          return -TARGET_EFAULT;
3395      }
3396  
3397      for (i = 0; i < vlen; i++) {
3398          ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3399          if (is_error(ret)) {
3400              break;
3401          }
3402          mmsgp[i].msg_len = tswap32(ret);
3403          /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3404          if (flags & MSG_WAITFORONE) {
3405              flags |= MSG_DONTWAIT;
3406          }
3407      }
3408  
3409      unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3410  
3411      /* Return number of datagrams sent if we sent any at all;
3412       * otherwise return the error.
3413       */
3414      if (i) {
3415          return i;
3416      }
3417      return ret;
3418  }
3419  
3420  /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3421  static abi_long do_accept4(int fd, abi_ulong target_addr,
3422                             abi_ulong target_addrlen_addr, int flags)
3423  {
3424      socklen_t addrlen, ret_addrlen;
3425      void *addr;
3426      abi_long ret;
3427      int host_flags;
3428  
3429      if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3430          return -TARGET_EINVAL;
3431      }
3432  
3433      host_flags = 0;
3434      if (flags & TARGET_SOCK_NONBLOCK) {
3435          host_flags |= SOCK_NONBLOCK;
3436      }
3437      if (flags & TARGET_SOCK_CLOEXEC) {
3438          host_flags |= SOCK_CLOEXEC;
3439      }
3440  
3441      if (target_addr == 0) {
3442          return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3443      }
3444  
3445      /* linux returns EFAULT if addrlen pointer is invalid */
3446      if (get_user_u32(addrlen, target_addrlen_addr))
3447          return -TARGET_EFAULT;
3448  
3449      if ((int)addrlen < 0) {
3450          return -TARGET_EINVAL;
3451      }
3452  
3453      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3454          return -TARGET_EFAULT;
3455      }
3456  
3457      addr = alloca(addrlen);
3458  
3459      ret_addrlen = addrlen;
3460      ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3461      if (!is_error(ret)) {
3462          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3463          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3464              ret = -TARGET_EFAULT;
3465          }
3466      }
3467      return ret;
3468  }
3469  
3470  /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3471  static abi_long do_getpeername(int fd, abi_ulong target_addr,
3472                                 abi_ulong target_addrlen_addr)
3473  {
3474      socklen_t addrlen, ret_addrlen;
3475      void *addr;
3476      abi_long ret;
3477  
3478      if (get_user_u32(addrlen, target_addrlen_addr))
3479          return -TARGET_EFAULT;
3480  
3481      if ((int)addrlen < 0) {
3482          return -TARGET_EINVAL;
3483      }
3484  
3485      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3486          return -TARGET_EFAULT;
3487      }
3488  
3489      addr = alloca(addrlen);
3490  
3491      ret_addrlen = addrlen;
3492      ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3493      if (!is_error(ret)) {
3494          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3495          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3496              ret = -TARGET_EFAULT;
3497          }
3498      }
3499      return ret;
3500  }
3501  
3502  /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3503  static abi_long do_getsockname(int fd, abi_ulong target_addr,
3504                                 abi_ulong target_addrlen_addr)
3505  {
3506      socklen_t addrlen, ret_addrlen;
3507      void *addr;
3508      abi_long ret;
3509  
3510      if (get_user_u32(addrlen, target_addrlen_addr))
3511          return -TARGET_EFAULT;
3512  
3513      if ((int)addrlen < 0) {
3514          return -TARGET_EINVAL;
3515      }
3516  
3517      if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3518          return -TARGET_EFAULT;
3519      }
3520  
3521      addr = alloca(addrlen);
3522  
3523      ret_addrlen = addrlen;
3524      ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3525      if (!is_error(ret)) {
3526          host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3527          if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3528              ret = -TARGET_EFAULT;
3529          }
3530      }
3531      return ret;
3532  }
3533  
3534  /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3535  static abi_long do_socketpair(int domain, int type, int protocol,
3536                                abi_ulong target_tab_addr)
3537  {
3538      int tab[2];
3539      abi_long ret;
3540  
3541      target_to_host_sock_type(&type);
3542  
3543      ret = get_errno(socketpair(domain, type, protocol, tab));
3544      if (!is_error(ret)) {
3545          if (put_user_s32(tab[0], target_tab_addr)
3546              || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3547              ret = -TARGET_EFAULT;
3548      }
3549      return ret;
3550  }
3551  
3552  /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3553  static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3554                            abi_ulong target_addr, socklen_t addrlen)
3555  {
3556      void *addr;
3557      void *host_msg;
3558      void *copy_msg = NULL;
3559      abi_long ret;
3560  
3561      if ((int)addrlen < 0) {
3562          return -TARGET_EINVAL;
3563      }
3564  
3565      host_msg = lock_user(VERIFY_READ, msg, len, 1);
3566      if (!host_msg)
3567          return -TARGET_EFAULT;
3568      if (fd_trans_target_to_host_data(fd)) {
3569          copy_msg = host_msg;
3570          host_msg = g_malloc(len);
3571          memcpy(host_msg, copy_msg, len);
3572          ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3573          if (ret < 0) {
3574              goto fail;
3575          }
3576      }
3577      if (target_addr) {
3578          addr = alloca(addrlen+1);
3579          ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3580          if (ret) {
3581              goto fail;
3582          }
3583          ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3584      } else {
3585          ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3586      }
3587  fail:
3588      if (copy_msg) {
3589          g_free(host_msg);
3590          host_msg = copy_msg;
3591      }
3592      unlock_user(host_msg, msg, 0);
3593      return ret;
3594  }
3595  
3596  /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3597  static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3598                              abi_ulong target_addr,
3599                              abi_ulong target_addrlen)
3600  {
3601      socklen_t addrlen, ret_addrlen;
3602      void *addr;
3603      void *host_msg;
3604      abi_long ret;
3605  
3606      if (!msg) {
3607          host_msg = NULL;
3608      } else {
3609          host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3610          if (!host_msg) {
3611              return -TARGET_EFAULT;
3612          }
3613      }
3614      if (target_addr) {
3615          if (get_user_u32(addrlen, target_addrlen)) {
3616              ret = -TARGET_EFAULT;
3617              goto fail;
3618          }
3619          if ((int)addrlen < 0) {
3620              ret = -TARGET_EINVAL;
3621              goto fail;
3622          }
3623          addr = alloca(addrlen);
3624          ret_addrlen = addrlen;
3625          ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3626                                        addr, &ret_addrlen));
3627      } else {
3628          addr = NULL; /* To keep compiler quiet.  */
3629          addrlen = 0; /* To keep compiler quiet.  */
3630          ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3631      }
3632      if (!is_error(ret)) {
3633          if (fd_trans_host_to_target_data(fd)) {
3634              abi_long trans;
3635              trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3636              if (is_error(trans)) {
3637                  ret = trans;
3638                  goto fail;
3639              }
3640          }
3641          if (target_addr) {
3642              host_to_target_sockaddr(target_addr, addr,
3643                                      MIN(addrlen, ret_addrlen));
3644              if (put_user_u32(ret_addrlen, target_addrlen)) {
3645                  ret = -TARGET_EFAULT;
3646                  goto fail;
3647              }
3648          }
3649          unlock_user(host_msg, msg, len);
3650      } else {
3651  fail:
3652          unlock_user(host_msg, msg, 0);
3653      }
3654      return ret;
3655  }
3656  
3657  #ifdef TARGET_NR_socketcall
3658  /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3659  static abi_long do_socketcall(int num, abi_ulong vptr)
3660  {
3661      static const unsigned nargs[] = { /* number of arguments per operation */
3662          [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3663          [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3664          [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3665          [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3666          [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3667          [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3668          [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3669          [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3670          [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3671          [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3672          [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3673          [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3674          [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3675          [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3676          [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3677          [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3678          [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3679          [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3680          [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3681          [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3682      };
3683      abi_long a[6]; /* max 6 args */
3684      unsigned i;
3685  
3686      /* check the range of the first argument num */
3687      /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3688      if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3689          return -TARGET_EINVAL;
3690      }
3691      /* ensure we have space for args */
3692      if (nargs[num] > ARRAY_SIZE(a)) {
3693          return -TARGET_EINVAL;
3694      }
3695      /* collect the arguments in a[] according to nargs[] */
3696      for (i = 0; i < nargs[num]; ++i) {
3697          if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3698              return -TARGET_EFAULT;
3699          }
3700      }
3701      /* now when we have the args, invoke the appropriate underlying function */
3702      switch (num) {
3703      case TARGET_SYS_SOCKET: /* domain, type, protocol */
3704          return do_socket(a[0], a[1], a[2]);
3705      case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3706          return do_bind(a[0], a[1], a[2]);
3707      case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3708          return do_connect(a[0], a[1], a[2]);
3709      case TARGET_SYS_LISTEN: /* sockfd, backlog */
3710          return get_errno(listen(a[0], a[1]));
3711      case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3712          return do_accept4(a[0], a[1], a[2], 0);
3713      case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3714          return do_getsockname(a[0], a[1], a[2]);
3715      case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3716          return do_getpeername(a[0], a[1], a[2]);
3717      case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3718          return do_socketpair(a[0], a[1], a[2], a[3]);
3719      case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3720          return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3721      case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3722          return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3723      case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3724          return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3725      case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3726          return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3727      case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3728          return get_errno(shutdown(a[0], a[1]));
3729      case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3730          return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3731      case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3732          return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3733      case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3734          return do_sendrecvmsg(a[0], a[1], a[2], 1);
3735      case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3736          return do_sendrecvmsg(a[0], a[1], a[2], 0);
3737      case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3738          return do_accept4(a[0], a[1], a[2], a[3]);
3739      case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3740          return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3741      case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3742          return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3743      default:
3744          qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3745          return -TARGET_EINVAL;
3746      }
3747  }
3748  #endif
3749  
3750  #ifndef TARGET_SEMID64_DS
3751  /* asm-generic version of this struct */
3752  struct target_semid64_ds
3753  {
3754    struct target_ipc_perm sem_perm;
3755    abi_ulong sem_otime;
3756  #if TARGET_ABI_BITS == 32
3757    abi_ulong __unused1;
3758  #endif
3759    abi_ulong sem_ctime;
3760  #if TARGET_ABI_BITS == 32
3761    abi_ulong __unused2;
3762  #endif
3763    abi_ulong sem_nsems;
3764    abi_ulong __unused3;
3765    abi_ulong __unused4;
3766  };
3767  #endif
3768  
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3769  static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3770                                                 abi_ulong target_addr)
3771  {
3772      struct target_ipc_perm *target_ip;
3773      struct target_semid64_ds *target_sd;
3774  
3775      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3776          return -TARGET_EFAULT;
3777      target_ip = &(target_sd->sem_perm);
3778      host_ip->__key = tswap32(target_ip->__key);
3779      host_ip->uid = tswap32(target_ip->uid);
3780      host_ip->gid = tswap32(target_ip->gid);
3781      host_ip->cuid = tswap32(target_ip->cuid);
3782      host_ip->cgid = tswap32(target_ip->cgid);
3783  #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3784      host_ip->mode = tswap32(target_ip->mode);
3785  #else
3786      host_ip->mode = tswap16(target_ip->mode);
3787  #endif
3788  #if defined(TARGET_PPC)
3789      host_ip->__seq = tswap32(target_ip->__seq);
3790  #else
3791      host_ip->__seq = tswap16(target_ip->__seq);
3792  #endif
3793      unlock_user_struct(target_sd, target_addr, 0);
3794      return 0;
3795  }
3796  
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3797  static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3798                                                 struct ipc_perm *host_ip)
3799  {
3800      struct target_ipc_perm *target_ip;
3801      struct target_semid64_ds *target_sd;
3802  
3803      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804          return -TARGET_EFAULT;
3805      target_ip = &(target_sd->sem_perm);
3806      target_ip->__key = tswap32(host_ip->__key);
3807      target_ip->uid = tswap32(host_ip->uid);
3808      target_ip->gid = tswap32(host_ip->gid);
3809      target_ip->cuid = tswap32(host_ip->cuid);
3810      target_ip->cgid = tswap32(host_ip->cgid);
3811  #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812      target_ip->mode = tswap32(host_ip->mode);
3813  #else
3814      target_ip->mode = tswap16(host_ip->mode);
3815  #endif
3816  #if defined(TARGET_PPC)
3817      target_ip->__seq = tswap32(host_ip->__seq);
3818  #else
3819      target_ip->__seq = tswap16(host_ip->__seq);
3820  #endif
3821      unlock_user_struct(target_sd, target_addr, 1);
3822      return 0;
3823  }
3824  
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3825  static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3826                                                 abi_ulong target_addr)
3827  {
3828      struct target_semid64_ds *target_sd;
3829  
3830      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3831          return -TARGET_EFAULT;
3832      if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3833          return -TARGET_EFAULT;
3834      host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3835      host_sd->sem_otime = tswapal(target_sd->sem_otime);
3836      host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3837      unlock_user_struct(target_sd, target_addr, 0);
3838      return 0;
3839  }
3840  
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3841  static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3842                                                 struct semid_ds *host_sd)
3843  {
3844      struct target_semid64_ds *target_sd;
3845  
3846      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3847          return -TARGET_EFAULT;
3848      if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3849          return -TARGET_EFAULT;
3850      target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3851      target_sd->sem_otime = tswapal(host_sd->sem_otime);
3852      target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3853      unlock_user_struct(target_sd, target_addr, 1);
3854      return 0;
3855  }
3856  
3857  struct target_seminfo {
3858      int semmap;
3859      int semmni;
3860      int semmns;
3861      int semmnu;
3862      int semmsl;
3863      int semopm;
3864      int semume;
3865      int semusz;
3866      int semvmx;
3867      int semaem;
3868  };
3869  
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3870  static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3871                                                struct seminfo *host_seminfo)
3872  {
3873      struct target_seminfo *target_seminfo;
3874      if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3875          return -TARGET_EFAULT;
3876      __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3877      __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3878      __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3879      __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3880      __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3881      __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3882      __put_user(host_seminfo->semume, &target_seminfo->semume);
3883      __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3884      __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3885      __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3886      unlock_user_struct(target_seminfo, target_addr, 1);
3887      return 0;
3888  }
3889  
3890  union semun {
3891  	int val;
3892  	struct semid_ds *buf;
3893  	unsigned short *array;
3894  	struct seminfo *__buf;
3895  };
3896  
3897  union target_semun {
3898  	int val;
3899  	abi_ulong buf;
3900  	abi_ulong array;
3901  	abi_ulong __buf;
3902  };
3903  
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3904  static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3905                                                 abi_ulong target_addr)
3906  {
3907      int nsems;
3908      unsigned short *array;
3909      union semun semun;
3910      struct semid_ds semid_ds;
3911      int i, ret;
3912  
3913      semun.buf = &semid_ds;
3914  
3915      ret = semctl(semid, 0, IPC_STAT, semun);
3916      if (ret == -1)
3917          return get_errno(ret);
3918  
3919      nsems = semid_ds.sem_nsems;
3920  
3921      *host_array = g_try_new(unsigned short, nsems);
3922      if (!*host_array) {
3923          return -TARGET_ENOMEM;
3924      }
3925      array = lock_user(VERIFY_READ, target_addr,
3926                        nsems*sizeof(unsigned short), 1);
3927      if (!array) {
3928          g_free(*host_array);
3929          return -TARGET_EFAULT;
3930      }
3931  
3932      for(i=0; i<nsems; i++) {
3933          __get_user((*host_array)[i], &array[i]);
3934      }
3935      unlock_user(array, target_addr, 0);
3936  
3937      return 0;
3938  }
3939  
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3940  static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3941                                                 unsigned short **host_array)
3942  {
3943      int nsems;
3944      unsigned short *array;
3945      union semun semun;
3946      struct semid_ds semid_ds;
3947      int i, ret;
3948  
3949      semun.buf = &semid_ds;
3950  
3951      ret = semctl(semid, 0, IPC_STAT, semun);
3952      if (ret == -1)
3953          return get_errno(ret);
3954  
3955      nsems = semid_ds.sem_nsems;
3956  
3957      array = lock_user(VERIFY_WRITE, target_addr,
3958                        nsems*sizeof(unsigned short), 0);
3959      if (!array)
3960          return -TARGET_EFAULT;
3961  
3962      for(i=0; i<nsems; i++) {
3963          __put_user((*host_array)[i], &array[i]);
3964      }
3965      g_free(*host_array);
3966      unlock_user(array, target_addr, 1);
3967  
3968      return 0;
3969  }
3970  
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3971  static inline abi_long do_semctl(int semid, int semnum, int cmd,
3972                                   abi_ulong target_arg)
3973  {
3974      union target_semun target_su = { .buf = target_arg };
3975      union semun arg;
3976      struct semid_ds dsarg;
3977      unsigned short *array = NULL;
3978      struct seminfo seminfo;
3979      abi_long ret = -TARGET_EINVAL;
3980      abi_long err;
3981      cmd &= 0xff;
3982  
3983      switch( cmd ) {
3984  	case GETVAL:
3985  	case SETVAL:
3986              /* In 64 bit cross-endian situations, we will erroneously pick up
3987               * the wrong half of the union for the "val" element.  To rectify
3988               * this, the entire 8-byte structure is byteswapped, followed by
3989  	     * a swap of the 4 byte val field. In other cases, the data is
3990  	     * already in proper host byte order. */
3991  	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3992  		target_su.buf = tswapal(target_su.buf);
3993  		arg.val = tswap32(target_su.val);
3994  	    } else {
3995  		arg.val = target_su.val;
3996  	    }
3997              ret = get_errno(semctl(semid, semnum, cmd, arg));
3998              break;
3999  	case GETALL:
4000  	case SETALL:
4001              err = target_to_host_semarray(semid, &array, target_su.array);
4002              if (err)
4003                  return err;
4004              arg.array = array;
4005              ret = get_errno(semctl(semid, semnum, cmd, arg));
4006              err = host_to_target_semarray(semid, target_su.array, &array);
4007              if (err)
4008                  return err;
4009              break;
4010  	case IPC_STAT:
4011  	case IPC_SET:
4012  	case SEM_STAT:
4013              err = target_to_host_semid_ds(&dsarg, target_su.buf);
4014              if (err)
4015                  return err;
4016              arg.buf = &dsarg;
4017              ret = get_errno(semctl(semid, semnum, cmd, arg));
4018              err = host_to_target_semid_ds(target_su.buf, &dsarg);
4019              if (err)
4020                  return err;
4021              break;
4022  	case IPC_INFO:
4023  	case SEM_INFO:
4024              arg.__buf = &seminfo;
4025              ret = get_errno(semctl(semid, semnum, cmd, arg));
4026              err = host_to_target_seminfo(target_su.__buf, &seminfo);
4027              if (err)
4028                  return err;
4029              break;
4030  	case IPC_RMID:
4031  	case GETPID:
4032  	case GETNCNT:
4033  	case GETZCNT:
4034              ret = get_errno(semctl(semid, semnum, cmd, NULL));
4035              break;
4036      }
4037  
4038      return ret;
4039  }
4040  
4041  struct target_sembuf {
4042      unsigned short sem_num;
4043      short sem_op;
4044      short sem_flg;
4045  };
4046  
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4047  static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4048                                               abi_ulong target_addr,
4049                                               unsigned nsops)
4050  {
4051      struct target_sembuf *target_sembuf;
4052      int i;
4053  
4054      target_sembuf = lock_user(VERIFY_READ, target_addr,
4055                                nsops*sizeof(struct target_sembuf), 1);
4056      if (!target_sembuf)
4057          return -TARGET_EFAULT;
4058  
4059      for(i=0; i<nsops; i++) {
4060          __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4061          __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4062          __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4063      }
4064  
4065      unlock_user(target_sembuf, target_addr, 0);
4066  
4067      return 0;
4068  }
4069  
4070  #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4071      defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4072  
4073  /*
4074   * This macro is required to handle the s390 variants, which passes the
4075   * arguments in a different order than default.
4076   */
4077  #ifdef __s390x__
4078  #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4079    (__nsops), (__timeout), (__sops)
4080  #else
4081  #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4082    (__nsops), 0, (__sops), (__timeout)
4083  #endif
4084  
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4085  static inline abi_long do_semtimedop(int semid,
4086                                       abi_long ptr,
4087                                       unsigned nsops,
4088                                       abi_long timeout, bool time64)
4089  {
4090      struct sembuf *sops;
4091      struct timespec ts, *pts = NULL;
4092      abi_long ret;
4093  
4094      if (timeout) {
4095          pts = &ts;
4096          if (time64) {
4097              if (target_to_host_timespec64(pts, timeout)) {
4098                  return -TARGET_EFAULT;
4099              }
4100          } else {
4101              if (target_to_host_timespec(pts, timeout)) {
4102                  return -TARGET_EFAULT;
4103              }
4104          }
4105      }
4106  
4107      if (nsops > TARGET_SEMOPM) {
4108          return -TARGET_E2BIG;
4109      }
4110  
4111      sops = g_new(struct sembuf, nsops);
4112  
4113      if (target_to_host_sembuf(sops, ptr, nsops)) {
4114          g_free(sops);
4115          return -TARGET_EFAULT;
4116      }
4117  
4118      ret = -TARGET_ENOSYS;
4119  #ifdef __NR_semtimedop
4120      ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4121  #endif
4122  #ifdef __NR_ipc
4123      if (ret == -TARGET_ENOSYS) {
4124          ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4125                                   SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4126      }
4127  #endif
4128      g_free(sops);
4129      return ret;
4130  }
4131  #endif
4132  
4133  struct target_msqid_ds
4134  {
4135      struct target_ipc_perm msg_perm;
4136      abi_ulong msg_stime;
4137  #if TARGET_ABI_BITS == 32
4138      abi_ulong __unused1;
4139  #endif
4140      abi_ulong msg_rtime;
4141  #if TARGET_ABI_BITS == 32
4142      abi_ulong __unused2;
4143  #endif
4144      abi_ulong msg_ctime;
4145  #if TARGET_ABI_BITS == 32
4146      abi_ulong __unused3;
4147  #endif
4148      abi_ulong __msg_cbytes;
4149      abi_ulong msg_qnum;
4150      abi_ulong msg_qbytes;
4151      abi_ulong msg_lspid;
4152      abi_ulong msg_lrpid;
4153      abi_ulong __unused4;
4154      abi_ulong __unused5;
4155  };
4156  
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4157  static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4158                                                 abi_ulong target_addr)
4159  {
4160      struct target_msqid_ds *target_md;
4161  
4162      if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4163          return -TARGET_EFAULT;
4164      if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4165          return -TARGET_EFAULT;
4166      host_md->msg_stime = tswapal(target_md->msg_stime);
4167      host_md->msg_rtime = tswapal(target_md->msg_rtime);
4168      host_md->msg_ctime = tswapal(target_md->msg_ctime);
4169      host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4170      host_md->msg_qnum = tswapal(target_md->msg_qnum);
4171      host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4172      host_md->msg_lspid = tswapal(target_md->msg_lspid);
4173      host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4174      unlock_user_struct(target_md, target_addr, 0);
4175      return 0;
4176  }
4177  
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4178  static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4179                                                 struct msqid_ds *host_md)
4180  {
4181      struct target_msqid_ds *target_md;
4182  
4183      if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4184          return -TARGET_EFAULT;
4185      if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4186          return -TARGET_EFAULT;
4187      target_md->msg_stime = tswapal(host_md->msg_stime);
4188      target_md->msg_rtime = tswapal(host_md->msg_rtime);
4189      target_md->msg_ctime = tswapal(host_md->msg_ctime);
4190      target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4191      target_md->msg_qnum = tswapal(host_md->msg_qnum);
4192      target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4193      target_md->msg_lspid = tswapal(host_md->msg_lspid);
4194      target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4195      unlock_user_struct(target_md, target_addr, 1);
4196      return 0;
4197  }
4198  
4199  struct target_msginfo {
4200      int msgpool;
4201      int msgmap;
4202      int msgmax;
4203      int msgmnb;
4204      int msgmni;
4205      int msgssz;
4206      int msgtql;
4207      unsigned short int msgseg;
4208  };
4209  
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4210  static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4211                                                struct msginfo *host_msginfo)
4212  {
4213      struct target_msginfo *target_msginfo;
4214      if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4215          return -TARGET_EFAULT;
4216      __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4217      __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4218      __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4219      __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4220      __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4221      __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4222      __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4223      __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4224      unlock_user_struct(target_msginfo, target_addr, 1);
4225      return 0;
4226  }
4227  
do_msgctl(int msgid,int cmd,abi_long ptr)4228  static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4229  {
4230      struct msqid_ds dsarg;
4231      struct msginfo msginfo;
4232      abi_long ret = -TARGET_EINVAL;
4233  
4234      cmd &= 0xff;
4235  
4236      switch (cmd) {
4237      case IPC_STAT:
4238      case IPC_SET:
4239      case MSG_STAT:
4240          if (target_to_host_msqid_ds(&dsarg,ptr))
4241              return -TARGET_EFAULT;
4242          ret = get_errno(msgctl(msgid, cmd, &dsarg));
4243          if (host_to_target_msqid_ds(ptr,&dsarg))
4244              return -TARGET_EFAULT;
4245          break;
4246      case IPC_RMID:
4247          ret = get_errno(msgctl(msgid, cmd, NULL));
4248          break;
4249      case IPC_INFO:
4250      case MSG_INFO:
4251          ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4252          if (host_to_target_msginfo(ptr, &msginfo))
4253              return -TARGET_EFAULT;
4254          break;
4255      }
4256  
4257      return ret;
4258  }
4259  
4260  struct target_msgbuf {
4261      abi_long mtype;
4262      char	mtext[1];
4263  };
4264  
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4265  static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4266                                   ssize_t msgsz, int msgflg)
4267  {
4268      struct target_msgbuf *target_mb;
4269      struct msgbuf *host_mb;
4270      abi_long ret = 0;
4271  
4272      if (msgsz < 0) {
4273          return -TARGET_EINVAL;
4274      }
4275  
4276      if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4277          return -TARGET_EFAULT;
4278      host_mb = g_try_malloc(msgsz + sizeof(long));
4279      if (!host_mb) {
4280          unlock_user_struct(target_mb, msgp, 0);
4281          return -TARGET_ENOMEM;
4282      }
4283      host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4284      memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4285      ret = -TARGET_ENOSYS;
4286  #ifdef __NR_msgsnd
4287      ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4288  #endif
4289  #ifdef __NR_ipc
4290      if (ret == -TARGET_ENOSYS) {
4291  #ifdef __s390x__
4292          ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4293                                   host_mb));
4294  #else
4295          ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4296                                   host_mb, 0));
4297  #endif
4298      }
4299  #endif
4300      g_free(host_mb);
4301      unlock_user_struct(target_mb, msgp, 0);
4302  
4303      return ret;
4304  }
4305  
4306  #ifdef __NR_ipc
4307  #if defined(__sparc__)
4308  /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4309  #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4310  #elif defined(__s390x__)
4311  /* The s390 sys_ipc variant has only five parameters.  */
4312  #define MSGRCV_ARGS(__msgp, __msgtyp) \
4313      ((long int[]){(long int)__msgp, __msgtyp})
4314  #else
4315  #define MSGRCV_ARGS(__msgp, __msgtyp) \
4316      ((long int[]){(long int)__msgp, __msgtyp}), 0
4317  #endif
4318  #endif
4319  
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4320  static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4321                                   ssize_t msgsz, abi_long msgtyp,
4322                                   int msgflg)
4323  {
4324      struct target_msgbuf *target_mb;
4325      char *target_mtext;
4326      struct msgbuf *host_mb;
4327      abi_long ret = 0;
4328  
4329      if (msgsz < 0) {
4330          return -TARGET_EINVAL;
4331      }
4332  
4333      if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4334          return -TARGET_EFAULT;
4335  
4336      host_mb = g_try_malloc(msgsz + sizeof(long));
4337      if (!host_mb) {
4338          ret = -TARGET_ENOMEM;
4339          goto end;
4340      }
4341      ret = -TARGET_ENOSYS;
4342  #ifdef __NR_msgrcv
4343      ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4344  #endif
4345  #ifdef __NR_ipc
4346      if (ret == -TARGET_ENOSYS) {
4347          ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4348                          msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4349      }
4350  #endif
4351  
4352      if (ret > 0) {
4353          abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4354          target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4355          if (!target_mtext) {
4356              ret = -TARGET_EFAULT;
4357              goto end;
4358          }
4359          memcpy(target_mb->mtext, host_mb->mtext, ret);
4360          unlock_user(target_mtext, target_mtext_addr, ret);
4361      }
4362  
4363      target_mb->mtype = tswapal(host_mb->mtype);
4364  
4365  end:
4366      if (target_mb)
4367          unlock_user_struct(target_mb, msgp, 1);
4368      g_free(host_mb);
4369      return ret;
4370  }
4371  
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4372  static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4373                                                 abi_ulong target_addr)
4374  {
4375      struct target_shmid_ds *target_sd;
4376  
4377      if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4378          return -TARGET_EFAULT;
4379      if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4380          return -TARGET_EFAULT;
4381      __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4382      __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4383      __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4384      __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4385      __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4386      __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4387      __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4388      unlock_user_struct(target_sd, target_addr, 0);
4389      return 0;
4390  }
4391  
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4392  static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4393                                                 struct shmid_ds *host_sd)
4394  {
4395      struct target_shmid_ds *target_sd;
4396  
4397      if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4398          return -TARGET_EFAULT;
4399      if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4400          return -TARGET_EFAULT;
4401      __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4402      __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4403      __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4404      __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4405      __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4406      __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4407      __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4408      unlock_user_struct(target_sd, target_addr, 1);
4409      return 0;
4410  }
4411  
4412  struct  target_shminfo {
4413      abi_ulong shmmax;
4414      abi_ulong shmmin;
4415      abi_ulong shmmni;
4416      abi_ulong shmseg;
4417      abi_ulong shmall;
4418  };
4419  
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4420  static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4421                                                struct shminfo *host_shminfo)
4422  {
4423      struct target_shminfo *target_shminfo;
4424      if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4425          return -TARGET_EFAULT;
4426      __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4427      __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4428      __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4429      __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4430      __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4431      unlock_user_struct(target_shminfo, target_addr, 1);
4432      return 0;
4433  }
4434  
4435  struct target_shm_info {
4436      int used_ids;
4437      abi_ulong shm_tot;
4438      abi_ulong shm_rss;
4439      abi_ulong shm_swp;
4440      abi_ulong swap_attempts;
4441      abi_ulong swap_successes;
4442  };
4443  
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4444  static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4445                                                 struct shm_info *host_shm_info)
4446  {
4447      struct target_shm_info *target_shm_info;
4448      if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4449          return -TARGET_EFAULT;
4450      __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4451      __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4452      __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4453      __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4454      __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4455      __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4456      unlock_user_struct(target_shm_info, target_addr, 1);
4457      return 0;
4458  }
4459  
do_shmctl(int shmid,int cmd,abi_long buf)4460  static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4461  {
4462      struct shmid_ds dsarg;
4463      struct shminfo shminfo;
4464      struct shm_info shm_info;
4465      abi_long ret = -TARGET_EINVAL;
4466  
4467      cmd &= 0xff;
4468  
4469      switch(cmd) {
4470      case IPC_STAT:
4471      case IPC_SET:
4472      case SHM_STAT:
4473          if (target_to_host_shmid_ds(&dsarg, buf))
4474              return -TARGET_EFAULT;
4475          ret = get_errno(shmctl(shmid, cmd, &dsarg));
4476          if (host_to_target_shmid_ds(buf, &dsarg))
4477              return -TARGET_EFAULT;
4478          break;
4479      case IPC_INFO:
4480          ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4481          if (host_to_target_shminfo(buf, &shminfo))
4482              return -TARGET_EFAULT;
4483          break;
4484      case SHM_INFO:
4485          ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4486          if (host_to_target_shm_info(buf, &shm_info))
4487              return -TARGET_EFAULT;
4488          break;
4489      case IPC_RMID:
4490      case SHM_LOCK:
4491      case SHM_UNLOCK:
4492          ret = get_errno(shmctl(shmid, cmd, NULL));
4493          break;
4494      }
4495  
4496      return ret;
4497  }
4498  
4499  #ifdef TARGET_NR_ipc
4500  /* ??? This only works with linear mappings.  */
4501  /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4502  static abi_long do_ipc(CPUArchState *cpu_env,
4503                         unsigned int call, abi_long first,
4504                         abi_long second, abi_long third,
4505                         abi_long ptr, abi_long fifth)
4506  {
4507      int version;
4508      abi_long ret = 0;
4509  
4510      version = call >> 16;
4511      call &= 0xffff;
4512  
4513      switch (call) {
4514      case IPCOP_semop:
4515          ret = do_semtimedop(first, ptr, second, 0, false);
4516          break;
4517      case IPCOP_semtimedop:
4518      /*
4519       * The s390 sys_ipc variant has only five parameters instead of six
4520       * (as for default variant) and the only difference is the handling of
4521       * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4522       * to a struct timespec where the generic variant uses fifth parameter.
4523       */
4524  #if defined(TARGET_S390X)
4525          ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4526  #else
4527          ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4528  #endif
4529          break;
4530  
4531      case IPCOP_semget:
4532          ret = get_errno(semget(first, second, third));
4533          break;
4534  
4535      case IPCOP_semctl: {
4536          /* The semun argument to semctl is passed by value, so dereference the
4537           * ptr argument. */
4538          abi_ulong atptr;
4539          get_user_ual(atptr, ptr);
4540          ret = do_semctl(first, second, third, atptr);
4541          break;
4542      }
4543  
4544      case IPCOP_msgget:
4545          ret = get_errno(msgget(first, second));
4546          break;
4547  
4548      case IPCOP_msgsnd:
4549          ret = do_msgsnd(first, ptr, second, third);
4550          break;
4551  
4552      case IPCOP_msgctl:
4553          ret = do_msgctl(first, second, ptr);
4554          break;
4555  
4556      case IPCOP_msgrcv:
4557          switch (version) {
4558          case 0:
4559              {
4560                  struct target_ipc_kludge {
4561                      abi_long msgp;
4562                      abi_long msgtyp;
4563                  } *tmp;
4564  
4565                  if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4566                      ret = -TARGET_EFAULT;
4567                      break;
4568                  }
4569  
4570                  ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4571  
4572                  unlock_user_struct(tmp, ptr, 0);
4573                  break;
4574              }
4575          default:
4576              ret = do_msgrcv(first, ptr, second, fifth, third);
4577          }
4578          break;
4579  
4580      case IPCOP_shmat:
4581          switch (version) {
4582          default:
4583          {
4584              abi_ulong raddr;
4585              raddr = target_shmat(cpu_env, first, ptr, second);
4586              if (is_error(raddr))
4587                  return get_errno(raddr);
4588              if (put_user_ual(raddr, third))
4589                  return -TARGET_EFAULT;
4590              break;
4591          }
4592          case 1:
4593              ret = -TARGET_EINVAL;
4594              break;
4595          }
4596  	break;
4597      case IPCOP_shmdt:
4598          ret = target_shmdt(ptr);
4599  	break;
4600  
4601      case IPCOP_shmget:
4602  	/* IPC_* flag values are the same on all linux platforms */
4603  	ret = get_errno(shmget(first, second, third));
4604  	break;
4605  
4606  	/* IPC_* and SHM_* command values are the same on all linux platforms */
4607      case IPCOP_shmctl:
4608          ret = do_shmctl(first, second, ptr);
4609          break;
4610      default:
4611          qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4612                        call, version);
4613  	ret = -TARGET_ENOSYS;
4614  	break;
4615      }
4616      return ret;
4617  }
4618  #endif
4619  
4620  /* kernel structure types definitions */
4621  
4622  #define STRUCT(name, ...) STRUCT_ ## name,
4623  #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4624  enum {
4625  #include "syscall_types.h"
4626  STRUCT_MAX
4627  };
4628  #undef STRUCT
4629  #undef STRUCT_SPECIAL
4630  
4631  #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4632  #define STRUCT_SPECIAL(name)
4633  #include "syscall_types.h"
4634  #undef STRUCT
4635  #undef STRUCT_SPECIAL
4636  
4637  #define MAX_STRUCT_SIZE 4096
4638  
4639  #ifdef CONFIG_FIEMAP
4640  /* So fiemap access checks don't overflow on 32 bit systems.
4641   * This is very slightly smaller than the limit imposed by
4642   * the underlying kernel.
4643   */
4644  #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4645                              / sizeof(struct fiemap_extent))
4646  
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4647  static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4648                                         int fd, int cmd, abi_long arg)
4649  {
4650      /* The parameter for this ioctl is a struct fiemap followed
4651       * by an array of struct fiemap_extent whose size is set
4652       * in fiemap->fm_extent_count. The array is filled in by the
4653       * ioctl.
4654       */
4655      int target_size_in, target_size_out;
4656      struct fiemap *fm;
4657      const argtype *arg_type = ie->arg_type;
4658      const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4659      void *argptr, *p;
4660      abi_long ret;
4661      int i, extent_size = thunk_type_size(extent_arg_type, 0);
4662      uint32_t outbufsz;
4663      int free_fm = 0;
4664  
4665      assert(arg_type[0] == TYPE_PTR);
4666      assert(ie->access == IOC_RW);
4667      arg_type++;
4668      target_size_in = thunk_type_size(arg_type, 0);
4669      argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4670      if (!argptr) {
4671          return -TARGET_EFAULT;
4672      }
4673      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4674      unlock_user(argptr, arg, 0);
4675      fm = (struct fiemap *)buf_temp;
4676      if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4677          return -TARGET_EINVAL;
4678      }
4679  
4680      outbufsz = sizeof (*fm) +
4681          (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4682  
4683      if (outbufsz > MAX_STRUCT_SIZE) {
4684          /* We can't fit all the extents into the fixed size buffer.
4685           * Allocate one that is large enough and use it instead.
4686           */
4687          fm = g_try_malloc(outbufsz);
4688          if (!fm) {
4689              return -TARGET_ENOMEM;
4690          }
4691          memcpy(fm, buf_temp, sizeof(struct fiemap));
4692          free_fm = 1;
4693      }
4694      ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4695      if (!is_error(ret)) {
4696          target_size_out = target_size_in;
4697          /* An extent_count of 0 means we were only counting the extents
4698           * so there are no structs to copy
4699           */
4700          if (fm->fm_extent_count != 0) {
4701              target_size_out += fm->fm_mapped_extents * extent_size;
4702          }
4703          argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4704          if (!argptr) {
4705              ret = -TARGET_EFAULT;
4706          } else {
4707              /* Convert the struct fiemap */
4708              thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4709              if (fm->fm_extent_count != 0) {
4710                  p = argptr + target_size_in;
4711                  /* ...and then all the struct fiemap_extents */
4712                  for (i = 0; i < fm->fm_mapped_extents; i++) {
4713                      thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4714                                    THUNK_TARGET);
4715                      p += extent_size;
4716                  }
4717              }
4718              unlock_user(argptr, arg, target_size_out);
4719          }
4720      }
4721      if (free_fm) {
4722          g_free(fm);
4723      }
4724      return ret;
4725  }
4726  #endif
4727  
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4728  static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4729                                  int fd, int cmd, abi_long arg)
4730  {
4731      const argtype *arg_type = ie->arg_type;
4732      int target_size;
4733      void *argptr;
4734      int ret;
4735      struct ifconf *host_ifconf;
4736      uint32_t outbufsz;
4737      const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4738      const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4739      int target_ifreq_size;
4740      int nb_ifreq;
4741      int free_buf = 0;
4742      int i;
4743      int target_ifc_len;
4744      abi_long target_ifc_buf;
4745      int host_ifc_len;
4746      char *host_ifc_buf;
4747  
4748      assert(arg_type[0] == TYPE_PTR);
4749      assert(ie->access == IOC_RW);
4750  
4751      arg_type++;
4752      target_size = thunk_type_size(arg_type, 0);
4753  
4754      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4755      if (!argptr)
4756          return -TARGET_EFAULT;
4757      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4758      unlock_user(argptr, arg, 0);
4759  
4760      host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4761      target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4762      target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4763  
4764      if (target_ifc_buf != 0) {
4765          target_ifc_len = host_ifconf->ifc_len;
4766          nb_ifreq = target_ifc_len / target_ifreq_size;
4767          host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4768  
4769          outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4770          if (outbufsz > MAX_STRUCT_SIZE) {
4771              /*
4772               * We can't fit all the extents into the fixed size buffer.
4773               * Allocate one that is large enough and use it instead.
4774               */
4775              host_ifconf = g_try_malloc(outbufsz);
4776              if (!host_ifconf) {
4777                  return -TARGET_ENOMEM;
4778              }
4779              memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4780              free_buf = 1;
4781          }
4782          host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4783  
4784          host_ifconf->ifc_len = host_ifc_len;
4785      } else {
4786        host_ifc_buf = NULL;
4787      }
4788      host_ifconf->ifc_buf = host_ifc_buf;
4789  
4790      ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4791      if (!is_error(ret)) {
4792  	/* convert host ifc_len to target ifc_len */
4793  
4794          nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4795          target_ifc_len = nb_ifreq * target_ifreq_size;
4796          host_ifconf->ifc_len = target_ifc_len;
4797  
4798  	/* restore target ifc_buf */
4799  
4800          host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4801  
4802  	/* copy struct ifconf to target user */
4803  
4804          argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4805          if (!argptr)
4806              return -TARGET_EFAULT;
4807          thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4808          unlock_user(argptr, arg, target_size);
4809  
4810          if (target_ifc_buf != 0) {
4811              /* copy ifreq[] to target user */
4812              argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4813              for (i = 0; i < nb_ifreq ; i++) {
4814                  thunk_convert(argptr + i * target_ifreq_size,
4815                                host_ifc_buf + i * sizeof(struct ifreq),
4816                                ifreq_arg_type, THUNK_TARGET);
4817              }
4818              unlock_user(argptr, target_ifc_buf, target_ifc_len);
4819          }
4820      }
4821  
4822      if (free_buf) {
4823          g_free(host_ifconf);
4824      }
4825  
4826      return ret;
4827  }
4828  
4829  #if defined(CONFIG_USBFS)
4830  #if HOST_LONG_BITS > 64
4831  #error USBDEVFS thunks do not support >64 bit hosts yet.
4832  #endif
4833  struct live_urb {
4834      uint64_t target_urb_adr;
4835      uint64_t target_buf_adr;
4836      char *target_buf_ptr;
4837      struct usbdevfs_urb host_urb;
4838  };
4839  
usbdevfs_urb_hashtable(void)4840  static GHashTable *usbdevfs_urb_hashtable(void)
4841  {
4842      static GHashTable *urb_hashtable;
4843  
4844      if (!urb_hashtable) {
4845          urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4846      }
4847      return urb_hashtable;
4848  }
4849  
urb_hashtable_insert(struct live_urb * urb)4850  static void urb_hashtable_insert(struct live_urb *urb)
4851  {
4852      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4853      g_hash_table_insert(urb_hashtable, urb, urb);
4854  }
4855  
urb_hashtable_lookup(uint64_t target_urb_adr)4856  static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4857  {
4858      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4859      return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4860  }
4861  
urb_hashtable_remove(struct live_urb * urb)4862  static void urb_hashtable_remove(struct live_urb *urb)
4863  {
4864      GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4865      g_hash_table_remove(urb_hashtable, urb);
4866  }
4867  
4868  static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4869  do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4870                            int fd, int cmd, abi_long arg)
4871  {
4872      const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4873      const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4874      struct live_urb *lurb;
4875      void *argptr;
4876      uint64_t hurb;
4877      int target_size;
4878      uintptr_t target_urb_adr;
4879      abi_long ret;
4880  
4881      target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4882  
4883      memset(buf_temp, 0, sizeof(uint64_t));
4884      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4885      if (is_error(ret)) {
4886          return ret;
4887      }
4888  
4889      memcpy(&hurb, buf_temp, sizeof(uint64_t));
4890      lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4891      if (!lurb->target_urb_adr) {
4892          return -TARGET_EFAULT;
4893      }
4894      urb_hashtable_remove(lurb);
4895      unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4896          lurb->host_urb.buffer_length);
4897      lurb->target_buf_ptr = NULL;
4898  
4899      /* restore the guest buffer pointer */
4900      lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4901  
4902      /* update the guest urb struct */
4903      argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4904      if (!argptr) {
4905          g_free(lurb);
4906          return -TARGET_EFAULT;
4907      }
4908      thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4909      unlock_user(argptr, lurb->target_urb_adr, target_size);
4910  
4911      target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4912      /* write back the urb handle */
4913      argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4914      if (!argptr) {
4915          g_free(lurb);
4916          return -TARGET_EFAULT;
4917      }
4918  
4919      /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4920      target_urb_adr = lurb->target_urb_adr;
4921      thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4922      unlock_user(argptr, arg, target_size);
4923  
4924      g_free(lurb);
4925      return ret;
4926  }
4927  
4928  static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4929  do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4930                               uint8_t *buf_temp __attribute__((unused)),
4931                               int fd, int cmd, abi_long arg)
4932  {
4933      struct live_urb *lurb;
4934  
4935      /* map target address back to host URB with metadata. */
4936      lurb = urb_hashtable_lookup(arg);
4937      if (!lurb) {
4938          return -TARGET_EFAULT;
4939      }
4940      return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4941  }
4942  
4943  static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4944  do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4945                              int fd, int cmd, abi_long arg)
4946  {
4947      const argtype *arg_type = ie->arg_type;
4948      int target_size;
4949      abi_long ret;
4950      void *argptr;
4951      int rw_dir;
4952      struct live_urb *lurb;
4953  
4954      /*
4955       * each submitted URB needs to map to a unique ID for the
4956       * kernel, and that unique ID needs to be a pointer to
4957       * host memory.  hence, we need to malloc for each URB.
4958       * isochronous transfers have a variable length struct.
4959       */
4960      arg_type++;
4961      target_size = thunk_type_size(arg_type, THUNK_TARGET);
4962  
4963      /* construct host copy of urb and metadata */
4964      lurb = g_try_new0(struct live_urb, 1);
4965      if (!lurb) {
4966          return -TARGET_ENOMEM;
4967      }
4968  
4969      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4970      if (!argptr) {
4971          g_free(lurb);
4972          return -TARGET_EFAULT;
4973      }
4974      thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4975      unlock_user(argptr, arg, 0);
4976  
4977      lurb->target_urb_adr = arg;
4978      lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4979  
4980      /* buffer space used depends on endpoint type so lock the entire buffer */
4981      /* control type urbs should check the buffer contents for true direction */
4982      rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4983      lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4984          lurb->host_urb.buffer_length, 1);
4985      if (lurb->target_buf_ptr == NULL) {
4986          g_free(lurb);
4987          return -TARGET_EFAULT;
4988      }
4989  
4990      /* update buffer pointer in host copy */
4991      lurb->host_urb.buffer = lurb->target_buf_ptr;
4992  
4993      ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4994      if (is_error(ret)) {
4995          unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4996          g_free(lurb);
4997      } else {
4998          urb_hashtable_insert(lurb);
4999      }
5000  
5001      return ret;
5002  }
5003  #endif /* CONFIG_USBFS */
5004  
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5005  static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5006                              int cmd, abi_long arg)
5007  {
5008      void *argptr;
5009      struct dm_ioctl *host_dm;
5010      abi_long guest_data;
5011      uint32_t guest_data_size;
5012      int target_size;
5013      const argtype *arg_type = ie->arg_type;
5014      abi_long ret;
5015      void *big_buf = NULL;
5016      char *host_data;
5017  
5018      arg_type++;
5019      target_size = thunk_type_size(arg_type, 0);
5020      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5021      if (!argptr) {
5022          ret = -TARGET_EFAULT;
5023          goto out;
5024      }
5025      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5026      unlock_user(argptr, arg, 0);
5027  
5028      /* buf_temp is too small, so fetch things into a bigger buffer */
5029      big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5030      memcpy(big_buf, buf_temp, target_size);
5031      buf_temp = big_buf;
5032      host_dm = big_buf;
5033  
5034      guest_data = arg + host_dm->data_start;
5035      if ((guest_data - arg) < 0) {
5036          ret = -TARGET_EINVAL;
5037          goto out;
5038      }
5039      guest_data_size = host_dm->data_size - host_dm->data_start;
5040      host_data = (char*)host_dm + host_dm->data_start;
5041  
5042      argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5043      if (!argptr) {
5044          ret = -TARGET_EFAULT;
5045          goto out;
5046      }
5047  
5048      switch (ie->host_cmd) {
5049      case DM_REMOVE_ALL:
5050      case DM_LIST_DEVICES:
5051      case DM_DEV_CREATE:
5052      case DM_DEV_REMOVE:
5053      case DM_DEV_SUSPEND:
5054      case DM_DEV_STATUS:
5055      case DM_DEV_WAIT:
5056      case DM_TABLE_STATUS:
5057      case DM_TABLE_CLEAR:
5058      case DM_TABLE_DEPS:
5059      case DM_LIST_VERSIONS:
5060          /* no input data */
5061          break;
5062      case DM_DEV_RENAME:
5063      case DM_DEV_SET_GEOMETRY:
5064          /* data contains only strings */
5065          memcpy(host_data, argptr, guest_data_size);
5066          break;
5067      case DM_TARGET_MSG:
5068          memcpy(host_data, argptr, guest_data_size);
5069          *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5070          break;
5071      case DM_TABLE_LOAD:
5072      {
5073          void *gspec = argptr;
5074          void *cur_data = host_data;
5075          const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5076          int spec_size = thunk_type_size(dm_arg_type, 0);
5077          int i;
5078  
5079          for (i = 0; i < host_dm->target_count; i++) {
5080              struct dm_target_spec *spec = cur_data;
5081              uint32_t next;
5082              int slen;
5083  
5084              thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5085              slen = strlen((char*)gspec + spec_size) + 1;
5086              next = spec->next;
5087              spec->next = sizeof(*spec) + slen;
5088              strcpy((char*)&spec[1], gspec + spec_size);
5089              gspec += next;
5090              cur_data += spec->next;
5091          }
5092          break;
5093      }
5094      default:
5095          ret = -TARGET_EINVAL;
5096          unlock_user(argptr, guest_data, 0);
5097          goto out;
5098      }
5099      unlock_user(argptr, guest_data, 0);
5100  
5101      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5102      if (!is_error(ret)) {
5103          guest_data = arg + host_dm->data_start;
5104          guest_data_size = host_dm->data_size - host_dm->data_start;
5105          argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5106          switch (ie->host_cmd) {
5107          case DM_REMOVE_ALL:
5108          case DM_DEV_CREATE:
5109          case DM_DEV_REMOVE:
5110          case DM_DEV_RENAME:
5111          case DM_DEV_SUSPEND:
5112          case DM_DEV_STATUS:
5113          case DM_TABLE_LOAD:
5114          case DM_TABLE_CLEAR:
5115          case DM_TARGET_MSG:
5116          case DM_DEV_SET_GEOMETRY:
5117              /* no return data */
5118              break;
5119          case DM_LIST_DEVICES:
5120          {
5121              struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5122              uint32_t remaining_data = guest_data_size;
5123              void *cur_data = argptr;
5124              const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5125              int nl_size = 12; /* can't use thunk_size due to alignment */
5126  
5127              while (1) {
5128                  uint32_t next = nl->next;
5129                  if (next) {
5130                      nl->next = nl_size + (strlen(nl->name) + 1);
5131                  }
5132                  if (remaining_data < nl->next) {
5133                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5134                      break;
5135                  }
5136                  thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5137                  strcpy(cur_data + nl_size, nl->name);
5138                  cur_data += nl->next;
5139                  remaining_data -= nl->next;
5140                  if (!next) {
5141                      break;
5142                  }
5143                  nl = (void*)nl + next;
5144              }
5145              break;
5146          }
5147          case DM_DEV_WAIT:
5148          case DM_TABLE_STATUS:
5149          {
5150              struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5151              void *cur_data = argptr;
5152              const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5153              int spec_size = thunk_type_size(dm_arg_type, 0);
5154              int i;
5155  
5156              for (i = 0; i < host_dm->target_count; i++) {
5157                  uint32_t next = spec->next;
5158                  int slen = strlen((char*)&spec[1]) + 1;
5159                  spec->next = (cur_data - argptr) + spec_size + slen;
5160                  if (guest_data_size < spec->next) {
5161                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5162                      break;
5163                  }
5164                  thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5165                  strcpy(cur_data + spec_size, (char*)&spec[1]);
5166                  cur_data = argptr + spec->next;
5167                  spec = (void*)host_dm + host_dm->data_start + next;
5168              }
5169              break;
5170          }
5171          case DM_TABLE_DEPS:
5172          {
5173              void *hdata = (void*)host_dm + host_dm->data_start;
5174              int count = *(uint32_t*)hdata;
5175              uint64_t *hdev = hdata + 8;
5176              uint64_t *gdev = argptr + 8;
5177              int i;
5178  
5179              *(uint32_t*)argptr = tswap32(count);
5180              for (i = 0; i < count; i++) {
5181                  *gdev = tswap64(*hdev);
5182                  gdev++;
5183                  hdev++;
5184              }
5185              break;
5186          }
5187          case DM_LIST_VERSIONS:
5188          {
5189              struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5190              uint32_t remaining_data = guest_data_size;
5191              void *cur_data = argptr;
5192              const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5193              int vers_size = thunk_type_size(dm_arg_type, 0);
5194  
5195              while (1) {
5196                  uint32_t next = vers->next;
5197                  if (next) {
5198                      vers->next = vers_size + (strlen(vers->name) + 1);
5199                  }
5200                  if (remaining_data < vers->next) {
5201                      host_dm->flags |= DM_BUFFER_FULL_FLAG;
5202                      break;
5203                  }
5204                  thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5205                  strcpy(cur_data + vers_size, vers->name);
5206                  cur_data += vers->next;
5207                  remaining_data -= vers->next;
5208                  if (!next) {
5209                      break;
5210                  }
5211                  vers = (void*)vers + next;
5212              }
5213              break;
5214          }
5215          default:
5216              unlock_user(argptr, guest_data, 0);
5217              ret = -TARGET_EINVAL;
5218              goto out;
5219          }
5220          unlock_user(argptr, guest_data, guest_data_size);
5221  
5222          argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5223          if (!argptr) {
5224              ret = -TARGET_EFAULT;
5225              goto out;
5226          }
5227          thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5228          unlock_user(argptr, arg, target_size);
5229      }
5230  out:
5231      g_free(big_buf);
5232      return ret;
5233  }
5234  
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5235  static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5236                                 int cmd, abi_long arg)
5237  {
5238      void *argptr;
5239      int target_size;
5240      const argtype *arg_type = ie->arg_type;
5241      const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5242      abi_long ret;
5243  
5244      struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5245      struct blkpg_partition host_part;
5246  
5247      /* Read and convert blkpg */
5248      arg_type++;
5249      target_size = thunk_type_size(arg_type, 0);
5250      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5251      if (!argptr) {
5252          ret = -TARGET_EFAULT;
5253          goto out;
5254      }
5255      thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5256      unlock_user(argptr, arg, 0);
5257  
5258      switch (host_blkpg->op) {
5259      case BLKPG_ADD_PARTITION:
5260      case BLKPG_DEL_PARTITION:
5261          /* payload is struct blkpg_partition */
5262          break;
5263      default:
5264          /* Unknown opcode */
5265          ret = -TARGET_EINVAL;
5266          goto out;
5267      }
5268  
5269      /* Read and convert blkpg->data */
5270      arg = (abi_long)(uintptr_t)host_blkpg->data;
5271      target_size = thunk_type_size(part_arg_type, 0);
5272      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5273      if (!argptr) {
5274          ret = -TARGET_EFAULT;
5275          goto out;
5276      }
5277      thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5278      unlock_user(argptr, arg, 0);
5279  
5280      /* Swizzle the data pointer to our local copy and call! */
5281      host_blkpg->data = &host_part;
5282      ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5283  
5284  out:
5285      return ret;
5286  }
5287  
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5288  static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5289                                  int fd, int cmd, abi_long arg)
5290  {
5291      const argtype *arg_type = ie->arg_type;
5292      const StructEntry *se;
5293      const argtype *field_types;
5294      const int *dst_offsets, *src_offsets;
5295      int target_size;
5296      void *argptr;
5297      abi_ulong *target_rt_dev_ptr = NULL;
5298      unsigned long *host_rt_dev_ptr = NULL;
5299      abi_long ret;
5300      int i;
5301  
5302      assert(ie->access == IOC_W);
5303      assert(*arg_type == TYPE_PTR);
5304      arg_type++;
5305      assert(*arg_type == TYPE_STRUCT);
5306      target_size = thunk_type_size(arg_type, 0);
5307      argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5308      if (!argptr) {
5309          return -TARGET_EFAULT;
5310      }
5311      arg_type++;
5312      assert(*arg_type == (int)STRUCT_rtentry);
5313      se = struct_entries + *arg_type++;
5314      assert(se->convert[0] == NULL);
5315      /* convert struct here to be able to catch rt_dev string */
5316      field_types = se->field_types;
5317      dst_offsets = se->field_offsets[THUNK_HOST];
5318      src_offsets = se->field_offsets[THUNK_TARGET];
5319      for (i = 0; i < se->nb_fields; i++) {
5320          if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5321              assert(*field_types == TYPE_PTRVOID);
5322              target_rt_dev_ptr = argptr + src_offsets[i];
5323              host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5324              if (*target_rt_dev_ptr != 0) {
5325                  *host_rt_dev_ptr = (unsigned long)lock_user_string(
5326                                                    tswapal(*target_rt_dev_ptr));
5327                  if (!*host_rt_dev_ptr) {
5328                      unlock_user(argptr, arg, 0);
5329                      return -TARGET_EFAULT;
5330                  }
5331              } else {
5332                  *host_rt_dev_ptr = 0;
5333              }
5334              field_types++;
5335              continue;
5336          }
5337          field_types = thunk_convert(buf_temp + dst_offsets[i],
5338                                      argptr + src_offsets[i],
5339                                      field_types, THUNK_HOST);
5340      }
5341      unlock_user(argptr, arg, 0);
5342  
5343      ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5344  
5345      assert(host_rt_dev_ptr != NULL);
5346      assert(target_rt_dev_ptr != NULL);
5347      if (*host_rt_dev_ptr != 0) {
5348          unlock_user((void *)*host_rt_dev_ptr,
5349                      *target_rt_dev_ptr, 0);
5350      }
5351      return ret;
5352  }
5353  
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5354  static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5355                                       int fd, int cmd, abi_long arg)
5356  {
5357      int sig = target_to_host_signal(arg);
5358      return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5359  }
5360  
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5361  static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5362                                      int fd, int cmd, abi_long arg)
5363  {
5364      struct timeval tv;
5365      abi_long ret;
5366  
5367      ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5368      if (is_error(ret)) {
5369          return ret;
5370      }
5371  
5372      if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5373          if (copy_to_user_timeval(arg, &tv)) {
5374              return -TARGET_EFAULT;
5375          }
5376      } else {
5377          if (copy_to_user_timeval64(arg, &tv)) {
5378              return -TARGET_EFAULT;
5379          }
5380      }
5381  
5382      return ret;
5383  }
5384  
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5385  static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5386                                        int fd, int cmd, abi_long arg)
5387  {
5388      struct timespec ts;
5389      abi_long ret;
5390  
5391      ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5392      if (is_error(ret)) {
5393          return ret;
5394      }
5395  
5396      if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5397          if (host_to_target_timespec(arg, &ts)) {
5398              return -TARGET_EFAULT;
5399          }
5400      } else{
5401          if (host_to_target_timespec64(arg, &ts)) {
5402              return -TARGET_EFAULT;
5403          }
5404      }
5405  
5406      return ret;
5407  }
5408  
5409  #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5410  static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5411                                       int fd, int cmd, abi_long arg)
5412  {
5413      int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5414      return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5415  }
5416  #endif
5417  
5418  #ifdef HAVE_DRM_H
5419  
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5420  static void unlock_drm_version(struct drm_version *host_ver,
5421                                 struct target_drm_version *target_ver,
5422                                 bool copy)
5423  {
5424      unlock_user(host_ver->name, target_ver->name,
5425                                  copy ? host_ver->name_len : 0);
5426      unlock_user(host_ver->date, target_ver->date,
5427                                  copy ? host_ver->date_len : 0);
5428      unlock_user(host_ver->desc, target_ver->desc,
5429                                  copy ? host_ver->desc_len : 0);
5430  }
5431  
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5432  static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5433                                            struct target_drm_version *target_ver)
5434  {
5435      memset(host_ver, 0, sizeof(*host_ver));
5436  
5437      __get_user(host_ver->name_len, &target_ver->name_len);
5438      if (host_ver->name_len) {
5439          host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5440                                     target_ver->name_len, 0);
5441          if (!host_ver->name) {
5442              return -EFAULT;
5443          }
5444      }
5445  
5446      __get_user(host_ver->date_len, &target_ver->date_len);
5447      if (host_ver->date_len) {
5448          host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5449                                     target_ver->date_len, 0);
5450          if (!host_ver->date) {
5451              goto err;
5452          }
5453      }
5454  
5455      __get_user(host_ver->desc_len, &target_ver->desc_len);
5456      if (host_ver->desc_len) {
5457          host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5458                                     target_ver->desc_len, 0);
5459          if (!host_ver->desc) {
5460              goto err;
5461          }
5462      }
5463  
5464      return 0;
5465  err:
5466      unlock_drm_version(host_ver, target_ver, false);
5467      return -EFAULT;
5468  }
5469  
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5470  static inline void host_to_target_drmversion(
5471                                            struct target_drm_version *target_ver,
5472                                            struct drm_version *host_ver)
5473  {
5474      __put_user(host_ver->version_major, &target_ver->version_major);
5475      __put_user(host_ver->version_minor, &target_ver->version_minor);
5476      __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5477      __put_user(host_ver->name_len, &target_ver->name_len);
5478      __put_user(host_ver->date_len, &target_ver->date_len);
5479      __put_user(host_ver->desc_len, &target_ver->desc_len);
5480      unlock_drm_version(host_ver, target_ver, true);
5481  }
5482  
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5483  static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5484                               int fd, int cmd, abi_long arg)
5485  {
5486      struct drm_version *ver;
5487      struct target_drm_version *target_ver;
5488      abi_long ret;
5489  
5490      switch (ie->host_cmd) {
5491      case DRM_IOCTL_VERSION:
5492          if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5493              return -TARGET_EFAULT;
5494          }
5495          ver = (struct drm_version *)buf_temp;
5496          ret = target_to_host_drmversion(ver, target_ver);
5497          if (!is_error(ret)) {
5498              ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5499              if (is_error(ret)) {
5500                  unlock_drm_version(ver, target_ver, false);
5501              } else {
5502                  host_to_target_drmversion(target_ver, ver);
5503              }
5504          }
5505          unlock_user_struct(target_ver, arg, 0);
5506          return ret;
5507      }
5508      return -TARGET_ENOSYS;
5509  }
5510  
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5511  static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5512                                             struct drm_i915_getparam *gparam,
5513                                             int fd, abi_long arg)
5514  {
5515      abi_long ret;
5516      int value;
5517      struct target_drm_i915_getparam *target_gparam;
5518  
5519      if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5520          return -TARGET_EFAULT;
5521      }
5522  
5523      __get_user(gparam->param, &target_gparam->param);
5524      gparam->value = &value;
5525      ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5526      put_user_s32(value, target_gparam->value);
5527  
5528      unlock_user_struct(target_gparam, arg, 0);
5529      return ret;
5530  }
5531  
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5532  static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5533                                    int fd, int cmd, abi_long arg)
5534  {
5535      switch (ie->host_cmd) {
5536      case DRM_IOCTL_I915_GETPARAM:
5537          return do_ioctl_drm_i915_getparam(ie,
5538                                            (struct drm_i915_getparam *)buf_temp,
5539                                            fd, arg);
5540      default:
5541          return -TARGET_ENOSYS;
5542      }
5543  }
5544  
5545  #endif
5546  
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5547  static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5548                                          int fd, int cmd, abi_long arg)
5549  {
5550      struct tun_filter *filter = (struct tun_filter *)buf_temp;
5551      struct tun_filter *target_filter;
5552      char *target_addr;
5553  
5554      assert(ie->access == IOC_W);
5555  
5556      target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5557      if (!target_filter) {
5558          return -TARGET_EFAULT;
5559      }
5560      filter->flags = tswap16(target_filter->flags);
5561      filter->count = tswap16(target_filter->count);
5562      unlock_user(target_filter, arg, 0);
5563  
5564      if (filter->count) {
5565          if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5566              MAX_STRUCT_SIZE) {
5567              return -TARGET_EFAULT;
5568          }
5569  
5570          target_addr = lock_user(VERIFY_READ,
5571                                  arg + offsetof(struct tun_filter, addr),
5572                                  filter->count * ETH_ALEN, 1);
5573          if (!target_addr) {
5574              return -TARGET_EFAULT;
5575          }
5576          memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5577          unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5578      }
5579  
5580      return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5581  }
5582  
5583  IOCTLEntry ioctl_entries[] = {
5584  #define IOCTL(cmd, access, ...) \
5585      { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5586  #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5587      { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5588  #define IOCTL_IGNORE(cmd) \
5589      { TARGET_ ## cmd, 0, #cmd },
5590  #include "ioctls.h"
5591      { 0, 0, },
5592  };
5593  
5594  /* ??? Implement proper locking for ioctls.  */
5595  /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5596  static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5597  {
5598      const IOCTLEntry *ie;
5599      const argtype *arg_type;
5600      abi_long ret;
5601      uint8_t buf_temp[MAX_STRUCT_SIZE];
5602      int target_size;
5603      void *argptr;
5604  
5605      ie = ioctl_entries;
5606      for(;;) {
5607          if (ie->target_cmd == 0) {
5608              qemu_log_mask(
5609                  LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5610              return -TARGET_ENOTTY;
5611          }
5612          if (ie->target_cmd == cmd)
5613              break;
5614          ie++;
5615      }
5616      arg_type = ie->arg_type;
5617      if (ie->do_ioctl) {
5618          return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5619      } else if (!ie->host_cmd) {
5620          /* Some architectures define BSD ioctls in their headers
5621             that are not implemented in Linux.  */
5622          return -TARGET_ENOTTY;
5623      }
5624  
5625      switch(arg_type[0]) {
5626      case TYPE_NULL:
5627          /* no argument */
5628          ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5629          break;
5630      case TYPE_PTRVOID:
5631      case TYPE_INT:
5632      case TYPE_LONG:
5633      case TYPE_ULONG:
5634          ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5635          break;
5636      case TYPE_PTR:
5637          arg_type++;
5638          target_size = thunk_type_size(arg_type, 0);
5639          switch(ie->access) {
5640          case IOC_R:
5641              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5642              if (!is_error(ret)) {
5643                  argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5644                  if (!argptr)
5645                      return -TARGET_EFAULT;
5646                  thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5647                  unlock_user(argptr, arg, target_size);
5648              }
5649              break;
5650          case IOC_W:
5651              argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5652              if (!argptr)
5653                  return -TARGET_EFAULT;
5654              thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5655              unlock_user(argptr, arg, 0);
5656              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5657              break;
5658          default:
5659          case IOC_RW:
5660              argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5661              if (!argptr)
5662                  return -TARGET_EFAULT;
5663              thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5664              unlock_user(argptr, arg, 0);
5665              ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5666              if (!is_error(ret)) {
5667                  argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5668                  if (!argptr)
5669                      return -TARGET_EFAULT;
5670                  thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5671                  unlock_user(argptr, arg, target_size);
5672              }
5673              break;
5674          }
5675          break;
5676      default:
5677          qemu_log_mask(LOG_UNIMP,
5678                        "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5679                        (long)cmd, arg_type[0]);
5680          ret = -TARGET_ENOTTY;
5681          break;
5682      }
5683      return ret;
5684  }
5685  
5686  static const bitmask_transtbl iflag_tbl[] = {
5687          { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5688          { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5689          { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5690          { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5691          { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5692          { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5693          { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5694          { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5695          { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5696          { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5697          { TARGET_IXON, TARGET_IXON, IXON, IXON },
5698          { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5699          { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5700          { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5701          { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5702  };
5703  
5704  static const bitmask_transtbl oflag_tbl[] = {
5705  	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5706  	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5707  	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5708  	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5709  	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5710  	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5711  	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5712  	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5713  	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5714  	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5715  	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5716  	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5717  	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5718  	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5719  	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5720  	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5721  	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5722  	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5723  	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5724  	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5725  	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5726  	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5727  	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5728  	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5729  };
5730  
5731  static const bitmask_transtbl cflag_tbl[] = {
5732  	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5733  	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5734  	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5735  	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5736  	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5737  	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5738  	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5739  	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5740  	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5741  	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5742  	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5743  	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5744  	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5745  	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5746  	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5747  	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5748  	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5749  	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5750  	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5751  	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5752  	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5753  	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5754  	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5755  	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5756  	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5757  	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5758  	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5759  	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5760  	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5761  	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5762  	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5763  };
5764  
5765  static const bitmask_transtbl lflag_tbl[] = {
5766    { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5767    { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5768    { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5769    { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5770    { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5771    { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5772    { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5773    { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5774    { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5775    { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5776    { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5777    { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5778    { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5779    { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5780    { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5781    { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5782  };
5783  
target_to_host_termios(void * dst,const void * src)5784  static void target_to_host_termios (void *dst, const void *src)
5785  {
5786      struct host_termios *host = dst;
5787      const struct target_termios *target = src;
5788  
5789      host->c_iflag =
5790          target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5791      host->c_oflag =
5792          target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5793      host->c_cflag =
5794          target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5795      host->c_lflag =
5796          target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5797      host->c_line = target->c_line;
5798  
5799      memset(host->c_cc, 0, sizeof(host->c_cc));
5800      host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5801      host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5802      host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5803      host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5804      host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5805      host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5806      host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5807      host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5808      host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5809      host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5810      host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5811      host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5812      host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5813      host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5814      host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5815      host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5816      host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5817  }
5818  
host_to_target_termios(void * dst,const void * src)5819  static void host_to_target_termios (void *dst, const void *src)
5820  {
5821      struct target_termios *target = dst;
5822      const struct host_termios *host = src;
5823  
5824      target->c_iflag =
5825          tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5826      target->c_oflag =
5827          tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5828      target->c_cflag =
5829          tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5830      target->c_lflag =
5831          tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5832      target->c_line = host->c_line;
5833  
5834      memset(target->c_cc, 0, sizeof(target->c_cc));
5835      target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5836      target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5837      target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5838      target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5839      target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5840      target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5841      target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5842      target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5843      target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5844      target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5845      target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5846      target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5847      target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5848      target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5849      target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5850      target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5851      target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5852  }
5853  
5854  static const StructEntry struct_termios_def = {
5855      .convert = { host_to_target_termios, target_to_host_termios },
5856      .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5857      .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5858      .print = print_termios,
5859  };
5860  
5861  /* If the host does not provide these bits, they may be safely discarded. */
5862  #ifndef MAP_SYNC
5863  #define MAP_SYNC 0
5864  #endif
5865  #ifndef MAP_UNINITIALIZED
5866  #define MAP_UNINITIALIZED 0
5867  #endif
5868  
5869  static const bitmask_transtbl mmap_flags_tbl[] = {
5870      { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5871      { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5872        MAP_ANONYMOUS, MAP_ANONYMOUS },
5873      { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5874        MAP_GROWSDOWN, MAP_GROWSDOWN },
5875      { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5876        MAP_DENYWRITE, MAP_DENYWRITE },
5877      { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5878        MAP_EXECUTABLE, MAP_EXECUTABLE },
5879      { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5880      { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5881        MAP_NORESERVE, MAP_NORESERVE },
5882      { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5883      /* MAP_STACK had been ignored by the kernel for quite some time.
5884         Recognize it for the target insofar as we do not want to pass
5885         it through to the host.  */
5886      { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5887      { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5888      { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5889      { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5890        MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5891      { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5892        MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5893  };
5894  
5895  /*
5896   * Arrange for legacy / undefined architecture specific flags to be
5897   * ignored by mmap handling code.
5898   */
5899  #ifndef TARGET_MAP_32BIT
5900  #define TARGET_MAP_32BIT 0
5901  #endif
5902  #ifndef TARGET_MAP_HUGE_2MB
5903  #define TARGET_MAP_HUGE_2MB 0
5904  #endif
5905  #ifndef TARGET_MAP_HUGE_1GB
5906  #define TARGET_MAP_HUGE_1GB 0
5907  #endif
5908  
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5909  static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5910                          int target_flags, int fd, off_t offset)
5911  {
5912      /*
5913       * The historical set of flags that all mmap types implicitly support.
5914       */
5915      enum {
5916          TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5917                                 | TARGET_MAP_PRIVATE
5918                                 | TARGET_MAP_FIXED
5919                                 | TARGET_MAP_ANONYMOUS
5920                                 | TARGET_MAP_DENYWRITE
5921                                 | TARGET_MAP_EXECUTABLE
5922                                 | TARGET_MAP_UNINITIALIZED
5923                                 | TARGET_MAP_GROWSDOWN
5924                                 | TARGET_MAP_LOCKED
5925                                 | TARGET_MAP_NORESERVE
5926                                 | TARGET_MAP_POPULATE
5927                                 | TARGET_MAP_NONBLOCK
5928                                 | TARGET_MAP_STACK
5929                                 | TARGET_MAP_HUGETLB
5930                                 | TARGET_MAP_32BIT
5931                                 | TARGET_MAP_HUGE_2MB
5932                                 | TARGET_MAP_HUGE_1GB
5933      };
5934      int host_flags;
5935  
5936      switch (target_flags & TARGET_MAP_TYPE) {
5937      case TARGET_MAP_PRIVATE:
5938          host_flags = MAP_PRIVATE;
5939          break;
5940      case TARGET_MAP_SHARED:
5941          host_flags = MAP_SHARED;
5942          break;
5943      case TARGET_MAP_SHARED_VALIDATE:
5944          /*
5945           * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5946           * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5947           */
5948          if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5949              return -TARGET_EOPNOTSUPP;
5950          }
5951          host_flags = MAP_SHARED_VALIDATE;
5952          if (target_flags & TARGET_MAP_SYNC) {
5953              host_flags |= MAP_SYNC;
5954          }
5955          break;
5956      default:
5957          return -TARGET_EINVAL;
5958      }
5959      host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5960  
5961      return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5962  }
5963  
5964  /*
5965   * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5966   *       TARGET_I386 is defined if TARGET_X86_64 is defined
5967   */
5968  #if defined(TARGET_I386)
5969  
5970  /* NOTE: there is really one LDT for all the threads */
5971  static uint8_t *ldt_table;
5972  
read_ldt(abi_ulong ptr,unsigned long bytecount)5973  static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5974  {
5975      int size;
5976      void *p;
5977  
5978      if (!ldt_table)
5979          return 0;
5980      size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5981      if (size > bytecount)
5982          size = bytecount;
5983      p = lock_user(VERIFY_WRITE, ptr, size, 0);
5984      if (!p)
5985          return -TARGET_EFAULT;
5986      /* ??? Should this by byteswapped?  */
5987      memcpy(p, ldt_table, size);
5988      unlock_user(p, ptr, size);
5989      return size;
5990  }
5991  
5992  /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)5993  static abi_long write_ldt(CPUX86State *env,
5994                            abi_ulong ptr, unsigned long bytecount, int oldmode)
5995  {
5996      struct target_modify_ldt_ldt_s ldt_info;
5997      struct target_modify_ldt_ldt_s *target_ldt_info;
5998      int seg_32bit, contents, read_exec_only, limit_in_pages;
5999      int seg_not_present, useable, lm;
6000      uint32_t *lp, entry_1, entry_2;
6001  
6002      if (bytecount != sizeof(ldt_info))
6003          return -TARGET_EINVAL;
6004      if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6005          return -TARGET_EFAULT;
6006      ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6007      ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6008      ldt_info.limit = tswap32(target_ldt_info->limit);
6009      ldt_info.flags = tswap32(target_ldt_info->flags);
6010      unlock_user_struct(target_ldt_info, ptr, 0);
6011  
6012      if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6013          return -TARGET_EINVAL;
6014      seg_32bit = ldt_info.flags & 1;
6015      contents = (ldt_info.flags >> 1) & 3;
6016      read_exec_only = (ldt_info.flags >> 3) & 1;
6017      limit_in_pages = (ldt_info.flags >> 4) & 1;
6018      seg_not_present = (ldt_info.flags >> 5) & 1;
6019      useable = (ldt_info.flags >> 6) & 1;
6020  #ifdef TARGET_ABI32
6021      lm = 0;
6022  #else
6023      lm = (ldt_info.flags >> 7) & 1;
6024  #endif
6025      if (contents == 3) {
6026          if (oldmode)
6027              return -TARGET_EINVAL;
6028          if (seg_not_present == 0)
6029              return -TARGET_EINVAL;
6030      }
6031      /* allocate the LDT */
6032      if (!ldt_table) {
6033          env->ldt.base = target_mmap(0,
6034                                      TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6035                                      PROT_READ|PROT_WRITE,
6036                                      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6037          if (env->ldt.base == -1)
6038              return -TARGET_ENOMEM;
6039          memset(g2h_untagged(env->ldt.base), 0,
6040                 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6041          env->ldt.limit = 0xffff;
6042          ldt_table = g2h_untagged(env->ldt.base);
6043      }
6044  
6045      /* NOTE: same code as Linux kernel */
6046      /* Allow LDTs to be cleared by the user. */
6047      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6048          if (oldmode ||
6049              (contents == 0		&&
6050               read_exec_only == 1	&&
6051               seg_32bit == 0		&&
6052               limit_in_pages == 0	&&
6053               seg_not_present == 1	&&
6054               useable == 0 )) {
6055              entry_1 = 0;
6056              entry_2 = 0;
6057              goto install;
6058          }
6059      }
6060  
6061      entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6062          (ldt_info.limit & 0x0ffff);
6063      entry_2 = (ldt_info.base_addr & 0xff000000) |
6064          ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6065          (ldt_info.limit & 0xf0000) |
6066          ((read_exec_only ^ 1) << 9) |
6067          (contents << 10) |
6068          ((seg_not_present ^ 1) << 15) |
6069          (seg_32bit << 22) |
6070          (limit_in_pages << 23) |
6071          (lm << 21) |
6072          0x7000;
6073      if (!oldmode)
6074          entry_2 |= (useable << 20);
6075  
6076      /* Install the new entry ...  */
6077  install:
6078      lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6079      lp[0] = tswap32(entry_1);
6080      lp[1] = tswap32(entry_2);
6081      return 0;
6082  }
6083  
6084  /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6085  static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6086                                unsigned long bytecount)
6087  {
6088      abi_long ret;
6089  
6090      switch (func) {
6091      case 0:
6092          ret = read_ldt(ptr, bytecount);
6093          break;
6094      case 1:
6095          ret = write_ldt(env, ptr, bytecount, 1);
6096          break;
6097      case 0x11:
6098          ret = write_ldt(env, ptr, bytecount, 0);
6099          break;
6100      default:
6101          ret = -TARGET_ENOSYS;
6102          break;
6103      }
6104      return ret;
6105  }
6106  
6107  #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6108  abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6109  {
6110      uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6111      struct target_modify_ldt_ldt_s ldt_info;
6112      struct target_modify_ldt_ldt_s *target_ldt_info;
6113      int seg_32bit, contents, read_exec_only, limit_in_pages;
6114      int seg_not_present, useable, lm;
6115      uint32_t *lp, entry_1, entry_2;
6116      int i;
6117  
6118      lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6119      if (!target_ldt_info)
6120          return -TARGET_EFAULT;
6121      ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6122      ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6123      ldt_info.limit = tswap32(target_ldt_info->limit);
6124      ldt_info.flags = tswap32(target_ldt_info->flags);
6125      if (ldt_info.entry_number == -1) {
6126          for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6127              if (gdt_table[i] == 0) {
6128                  ldt_info.entry_number = i;
6129                  target_ldt_info->entry_number = tswap32(i);
6130                  break;
6131              }
6132          }
6133      }
6134      unlock_user_struct(target_ldt_info, ptr, 1);
6135  
6136      if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6137          ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6138             return -TARGET_EINVAL;
6139      seg_32bit = ldt_info.flags & 1;
6140      contents = (ldt_info.flags >> 1) & 3;
6141      read_exec_only = (ldt_info.flags >> 3) & 1;
6142      limit_in_pages = (ldt_info.flags >> 4) & 1;
6143      seg_not_present = (ldt_info.flags >> 5) & 1;
6144      useable = (ldt_info.flags >> 6) & 1;
6145  #ifdef TARGET_ABI32
6146      lm = 0;
6147  #else
6148      lm = (ldt_info.flags >> 7) & 1;
6149  #endif
6150  
6151      if (contents == 3) {
6152          if (seg_not_present == 0)
6153              return -TARGET_EINVAL;
6154      }
6155  
6156      /* NOTE: same code as Linux kernel */
6157      /* Allow LDTs to be cleared by the user. */
6158      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6159          if ((contents == 0             &&
6160               read_exec_only == 1       &&
6161               seg_32bit == 0            &&
6162               limit_in_pages == 0       &&
6163               seg_not_present == 1      &&
6164               useable == 0 )) {
6165              entry_1 = 0;
6166              entry_2 = 0;
6167              goto install;
6168          }
6169      }
6170  
6171      entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6172          (ldt_info.limit & 0x0ffff);
6173      entry_2 = (ldt_info.base_addr & 0xff000000) |
6174          ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6175          (ldt_info.limit & 0xf0000) |
6176          ((read_exec_only ^ 1) << 9) |
6177          (contents << 10) |
6178          ((seg_not_present ^ 1) << 15) |
6179          (seg_32bit << 22) |
6180          (limit_in_pages << 23) |
6181          (useable << 20) |
6182          (lm << 21) |
6183          0x7000;
6184  
6185      /* Install the new entry ...  */
6186  install:
6187      lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6188      lp[0] = tswap32(entry_1);
6189      lp[1] = tswap32(entry_2);
6190      return 0;
6191  }
6192  
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6193  static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6194  {
6195      struct target_modify_ldt_ldt_s *target_ldt_info;
6196      uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6197      uint32_t base_addr, limit, flags;
6198      int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6199      int seg_not_present, useable, lm;
6200      uint32_t *lp, entry_1, entry_2;
6201  
6202      lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6203      if (!target_ldt_info)
6204          return -TARGET_EFAULT;
6205      idx = tswap32(target_ldt_info->entry_number);
6206      if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6207          idx > TARGET_GDT_ENTRY_TLS_MAX) {
6208          unlock_user_struct(target_ldt_info, ptr, 1);
6209          return -TARGET_EINVAL;
6210      }
6211      lp = (uint32_t *)(gdt_table + idx);
6212      entry_1 = tswap32(lp[0]);
6213      entry_2 = tswap32(lp[1]);
6214  
6215      read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6216      contents = (entry_2 >> 10) & 3;
6217      seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6218      seg_32bit = (entry_2 >> 22) & 1;
6219      limit_in_pages = (entry_2 >> 23) & 1;
6220      useable = (entry_2 >> 20) & 1;
6221  #ifdef TARGET_ABI32
6222      lm = 0;
6223  #else
6224      lm = (entry_2 >> 21) & 1;
6225  #endif
6226      flags = (seg_32bit << 0) | (contents << 1) |
6227          (read_exec_only << 3) | (limit_in_pages << 4) |
6228          (seg_not_present << 5) | (useable << 6) | (lm << 7);
6229      limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6230      base_addr = (entry_1 >> 16) |
6231          (entry_2 & 0xff000000) |
6232          ((entry_2 & 0xff) << 16);
6233      target_ldt_info->base_addr = tswapal(base_addr);
6234      target_ldt_info->limit = tswap32(limit);
6235      target_ldt_info->flags = tswap32(flags);
6236      unlock_user_struct(target_ldt_info, ptr, 1);
6237      return 0;
6238  }
6239  
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6240  abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6241  {
6242      return -TARGET_ENOSYS;
6243  }
6244  #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6245  abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6246  {
6247      abi_long ret = 0;
6248      abi_ulong val;
6249      int idx;
6250  
6251      switch(code) {
6252      case TARGET_ARCH_SET_GS:
6253      case TARGET_ARCH_SET_FS:
6254          if (code == TARGET_ARCH_SET_GS)
6255              idx = R_GS;
6256          else
6257              idx = R_FS;
6258          cpu_x86_load_seg(env, idx, 0);
6259          env->segs[idx].base = addr;
6260          break;
6261      case TARGET_ARCH_GET_GS:
6262      case TARGET_ARCH_GET_FS:
6263          if (code == TARGET_ARCH_GET_GS)
6264              idx = R_GS;
6265          else
6266              idx = R_FS;
6267          val = env->segs[idx].base;
6268          if (put_user(val, addr, abi_ulong))
6269              ret = -TARGET_EFAULT;
6270          break;
6271      default:
6272          ret = -TARGET_EINVAL;
6273          break;
6274      }
6275      return ret;
6276  }
6277  #endif /* defined(TARGET_ABI32 */
6278  #endif /* defined(TARGET_I386) */
6279  
6280  /*
6281   * These constants are generic.  Supply any that are missing from the host.
6282   */
6283  #ifndef PR_SET_NAME
6284  # define PR_SET_NAME    15
6285  # define PR_GET_NAME    16
6286  #endif
6287  #ifndef PR_SET_FP_MODE
6288  # define PR_SET_FP_MODE 45
6289  # define PR_GET_FP_MODE 46
6290  # define PR_FP_MODE_FR   (1 << 0)
6291  # define PR_FP_MODE_FRE  (1 << 1)
6292  #endif
6293  #ifndef PR_SVE_SET_VL
6294  # define PR_SVE_SET_VL  50
6295  # define PR_SVE_GET_VL  51
6296  # define PR_SVE_VL_LEN_MASK  0xffff
6297  # define PR_SVE_VL_INHERIT   (1 << 17)
6298  #endif
6299  #ifndef PR_PAC_RESET_KEYS
6300  # define PR_PAC_RESET_KEYS  54
6301  # define PR_PAC_APIAKEY   (1 << 0)
6302  # define PR_PAC_APIBKEY   (1 << 1)
6303  # define PR_PAC_APDAKEY   (1 << 2)
6304  # define PR_PAC_APDBKEY   (1 << 3)
6305  # define PR_PAC_APGAKEY   (1 << 4)
6306  #endif
6307  #ifndef PR_SET_TAGGED_ADDR_CTRL
6308  # define PR_SET_TAGGED_ADDR_CTRL 55
6309  # define PR_GET_TAGGED_ADDR_CTRL 56
6310  # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6311  #endif
6312  #ifndef PR_SET_IO_FLUSHER
6313  # define PR_SET_IO_FLUSHER 57
6314  # define PR_GET_IO_FLUSHER 58
6315  #endif
6316  #ifndef PR_SET_SYSCALL_USER_DISPATCH
6317  # define PR_SET_SYSCALL_USER_DISPATCH 59
6318  #endif
6319  #ifndef PR_SME_SET_VL
6320  # define PR_SME_SET_VL  63
6321  # define PR_SME_GET_VL  64
6322  # define PR_SME_VL_LEN_MASK  0xffff
6323  # define PR_SME_VL_INHERIT   (1 << 17)
6324  #endif
6325  
6326  #include "target_prctl.h"
6327  
do_prctl_inval0(CPUArchState * env)6328  static abi_long do_prctl_inval0(CPUArchState *env)
6329  {
6330      return -TARGET_EINVAL;
6331  }
6332  
do_prctl_inval1(CPUArchState * env,abi_long arg2)6333  static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6334  {
6335      return -TARGET_EINVAL;
6336  }
6337  
6338  #ifndef do_prctl_get_fp_mode
6339  #define do_prctl_get_fp_mode do_prctl_inval0
6340  #endif
6341  #ifndef do_prctl_set_fp_mode
6342  #define do_prctl_set_fp_mode do_prctl_inval1
6343  #endif
6344  #ifndef do_prctl_sve_get_vl
6345  #define do_prctl_sve_get_vl do_prctl_inval0
6346  #endif
6347  #ifndef do_prctl_sve_set_vl
6348  #define do_prctl_sve_set_vl do_prctl_inval1
6349  #endif
6350  #ifndef do_prctl_reset_keys
6351  #define do_prctl_reset_keys do_prctl_inval1
6352  #endif
6353  #ifndef do_prctl_set_tagged_addr_ctrl
6354  #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6355  #endif
6356  #ifndef do_prctl_get_tagged_addr_ctrl
6357  #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6358  #endif
6359  #ifndef do_prctl_get_unalign
6360  #define do_prctl_get_unalign do_prctl_inval1
6361  #endif
6362  #ifndef do_prctl_set_unalign
6363  #define do_prctl_set_unalign do_prctl_inval1
6364  #endif
6365  #ifndef do_prctl_sme_get_vl
6366  #define do_prctl_sme_get_vl do_prctl_inval0
6367  #endif
6368  #ifndef do_prctl_sme_set_vl
6369  #define do_prctl_sme_set_vl do_prctl_inval1
6370  #endif
6371  
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6372  static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6373                           abi_long arg3, abi_long arg4, abi_long arg5)
6374  {
6375      abi_long ret;
6376  
6377      switch (option) {
6378      case PR_GET_PDEATHSIG:
6379          {
6380              int deathsig;
6381              ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6382                                    arg3, arg4, arg5));
6383              if (!is_error(ret) &&
6384                  put_user_s32(host_to_target_signal(deathsig), arg2)) {
6385                  return -TARGET_EFAULT;
6386              }
6387              return ret;
6388          }
6389      case PR_SET_PDEATHSIG:
6390          return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6391                                 arg3, arg4, arg5));
6392      case PR_GET_NAME:
6393          {
6394              void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6395              if (!name) {
6396                  return -TARGET_EFAULT;
6397              }
6398              ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6399                                    arg3, arg4, arg5));
6400              unlock_user(name, arg2, 16);
6401              return ret;
6402          }
6403      case PR_SET_NAME:
6404          {
6405              void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6406              if (!name) {
6407                  return -TARGET_EFAULT;
6408              }
6409              ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6410                                    arg3, arg4, arg5));
6411              unlock_user(name, arg2, 0);
6412              return ret;
6413          }
6414      case PR_GET_FP_MODE:
6415          return do_prctl_get_fp_mode(env);
6416      case PR_SET_FP_MODE:
6417          return do_prctl_set_fp_mode(env, arg2);
6418      case PR_SVE_GET_VL:
6419          return do_prctl_sve_get_vl(env);
6420      case PR_SVE_SET_VL:
6421          return do_prctl_sve_set_vl(env, arg2);
6422      case PR_SME_GET_VL:
6423          return do_prctl_sme_get_vl(env);
6424      case PR_SME_SET_VL:
6425          return do_prctl_sme_set_vl(env, arg2);
6426      case PR_PAC_RESET_KEYS:
6427          if (arg3 || arg4 || arg5) {
6428              return -TARGET_EINVAL;
6429          }
6430          return do_prctl_reset_keys(env, arg2);
6431      case PR_SET_TAGGED_ADDR_CTRL:
6432          if (arg3 || arg4 || arg5) {
6433              return -TARGET_EINVAL;
6434          }
6435          return do_prctl_set_tagged_addr_ctrl(env, arg2);
6436      case PR_GET_TAGGED_ADDR_CTRL:
6437          if (arg2 || arg3 || arg4 || arg5) {
6438              return -TARGET_EINVAL;
6439          }
6440          return do_prctl_get_tagged_addr_ctrl(env);
6441  
6442      case PR_GET_UNALIGN:
6443          return do_prctl_get_unalign(env, arg2);
6444      case PR_SET_UNALIGN:
6445          return do_prctl_set_unalign(env, arg2);
6446  
6447      case PR_CAP_AMBIENT:
6448      case PR_CAPBSET_READ:
6449      case PR_CAPBSET_DROP:
6450      case PR_GET_DUMPABLE:
6451      case PR_SET_DUMPABLE:
6452      case PR_GET_KEEPCAPS:
6453      case PR_SET_KEEPCAPS:
6454      case PR_GET_SECUREBITS:
6455      case PR_SET_SECUREBITS:
6456      case PR_GET_TIMING:
6457      case PR_SET_TIMING:
6458      case PR_GET_TIMERSLACK:
6459      case PR_SET_TIMERSLACK:
6460      case PR_MCE_KILL:
6461      case PR_MCE_KILL_GET:
6462      case PR_GET_NO_NEW_PRIVS:
6463      case PR_SET_NO_NEW_PRIVS:
6464      case PR_GET_IO_FLUSHER:
6465      case PR_SET_IO_FLUSHER:
6466      case PR_SET_CHILD_SUBREAPER:
6467      case PR_GET_SPECULATION_CTRL:
6468      case PR_SET_SPECULATION_CTRL:
6469          /* Some prctl options have no pointer arguments and we can pass on. */
6470          return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6471  
6472      case PR_GET_CHILD_SUBREAPER:
6473          {
6474              int val;
6475              ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6476                                    arg3, arg4, arg5));
6477              if (!is_error(ret) && put_user_s32(val, arg2)) {
6478                  return -TARGET_EFAULT;
6479              }
6480              return ret;
6481          }
6482  
6483      case PR_GET_TID_ADDRESS:
6484          {
6485              TaskState *ts = get_task_state(env_cpu(env));
6486              return put_user_ual(ts->child_tidptr, arg2);
6487          }
6488  
6489      case PR_GET_FPEXC:
6490      case PR_SET_FPEXC:
6491          /* Was used for SPE on PowerPC. */
6492          return -TARGET_EINVAL;
6493  
6494      case PR_GET_ENDIAN:
6495      case PR_SET_ENDIAN:
6496      case PR_GET_FPEMU:
6497      case PR_SET_FPEMU:
6498      case PR_SET_MM:
6499      case PR_GET_SECCOMP:
6500      case PR_SET_SECCOMP:
6501      case PR_SET_SYSCALL_USER_DISPATCH:
6502      case PR_GET_THP_DISABLE:
6503      case PR_SET_THP_DISABLE:
6504      case PR_GET_TSC:
6505      case PR_SET_TSC:
6506          /* Disable to prevent the target disabling stuff we need. */
6507          return -TARGET_EINVAL;
6508  
6509      default:
6510          qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6511                        option);
6512          return -TARGET_EINVAL;
6513      }
6514  }
6515  
6516  #define NEW_STACK_SIZE 0x40000
6517  
6518  
6519  static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6520  typedef struct {
6521      CPUArchState *env;
6522      pthread_mutex_t mutex;
6523      pthread_cond_t cond;
6524      pthread_t thread;
6525      uint32_t tid;
6526      abi_ulong child_tidptr;
6527      abi_ulong parent_tidptr;
6528      sigset_t sigmask;
6529  } new_thread_info;
6530  
clone_func(void * arg)6531  static void *clone_func(void *arg)
6532  {
6533      new_thread_info *info = arg;
6534      CPUArchState *env;
6535      CPUState *cpu;
6536      TaskState *ts;
6537  
6538      rcu_register_thread();
6539      tcg_register_thread();
6540      env = info->env;
6541      cpu = env_cpu(env);
6542      thread_cpu = cpu;
6543      ts = get_task_state(cpu);
6544      info->tid = sys_gettid();
6545      task_settid(ts);
6546      if (info->child_tidptr)
6547          put_user_u32(info->tid, info->child_tidptr);
6548      if (info->parent_tidptr)
6549          put_user_u32(info->tid, info->parent_tidptr);
6550      qemu_guest_random_seed_thread_part2(cpu->random_seed);
6551      /* Enable signals.  */
6552      sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6553      /* Signal to the parent that we're ready.  */
6554      pthread_mutex_lock(&info->mutex);
6555      pthread_cond_broadcast(&info->cond);
6556      pthread_mutex_unlock(&info->mutex);
6557      /* Wait until the parent has finished initializing the tls state.  */
6558      pthread_mutex_lock(&clone_lock);
6559      pthread_mutex_unlock(&clone_lock);
6560      cpu_loop(env);
6561      /* never exits */
6562      return NULL;
6563  }
6564  
6565  /* do_fork() Must return host values and target errnos (unlike most
6566     do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6567  static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6568                     abi_ulong parent_tidptr, target_ulong newtls,
6569                     abi_ulong child_tidptr)
6570  {
6571      CPUState *cpu = env_cpu(env);
6572      int ret;
6573      TaskState *ts;
6574      CPUState *new_cpu;
6575      CPUArchState *new_env;
6576      sigset_t sigmask;
6577  
6578      flags &= ~CLONE_IGNORED_FLAGS;
6579  
6580      /* Emulate vfork() with fork() */
6581      if (flags & CLONE_VFORK)
6582          flags &= ~(CLONE_VFORK | CLONE_VM);
6583  
6584      if (flags & CLONE_VM) {
6585          TaskState *parent_ts = get_task_state(cpu);
6586          new_thread_info info;
6587          pthread_attr_t attr;
6588  
6589          if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6590              (flags & CLONE_INVALID_THREAD_FLAGS)) {
6591              return -TARGET_EINVAL;
6592          }
6593  
6594          ts = g_new0(TaskState, 1);
6595          init_task_state(ts);
6596  
6597          /* Grab a mutex so that thread setup appears atomic.  */
6598          pthread_mutex_lock(&clone_lock);
6599  
6600          /*
6601           * If this is our first additional thread, we need to ensure we
6602           * generate code for parallel execution and flush old translations.
6603           * Do this now so that the copy gets CF_PARALLEL too.
6604           */
6605          if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6606              tcg_cflags_set(cpu, CF_PARALLEL);
6607              tb_flush(cpu);
6608          }
6609  
6610          /* we create a new CPU instance. */
6611          new_env = cpu_copy(env);
6612          /* Init regs that differ from the parent.  */
6613          cpu_clone_regs_child(new_env, newsp, flags);
6614          cpu_clone_regs_parent(env, flags);
6615          new_cpu = env_cpu(new_env);
6616          new_cpu->opaque = ts;
6617          ts->bprm = parent_ts->bprm;
6618          ts->info = parent_ts->info;
6619          ts->signal_mask = parent_ts->signal_mask;
6620  
6621          if (flags & CLONE_CHILD_CLEARTID) {
6622              ts->child_tidptr = child_tidptr;
6623          }
6624  
6625          if (flags & CLONE_SETTLS) {
6626              cpu_set_tls (new_env, newtls);
6627          }
6628  
6629          memset(&info, 0, sizeof(info));
6630          pthread_mutex_init(&info.mutex, NULL);
6631          pthread_mutex_lock(&info.mutex);
6632          pthread_cond_init(&info.cond, NULL);
6633          info.env = new_env;
6634          if (flags & CLONE_CHILD_SETTID) {
6635              info.child_tidptr = child_tidptr;
6636          }
6637          if (flags & CLONE_PARENT_SETTID) {
6638              info.parent_tidptr = parent_tidptr;
6639          }
6640  
6641          ret = pthread_attr_init(&attr);
6642          ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6643          ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6644          /* It is not safe to deliver signals until the child has finished
6645             initializing, so temporarily block all signals.  */
6646          sigfillset(&sigmask);
6647          sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6648          cpu->random_seed = qemu_guest_random_seed_thread_part1();
6649  
6650          ret = pthread_create(&info.thread, &attr, clone_func, &info);
6651          /* TODO: Free new CPU state if thread creation failed.  */
6652  
6653          sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6654          pthread_attr_destroy(&attr);
6655          if (ret == 0) {
6656              /* Wait for the child to initialize.  */
6657              pthread_cond_wait(&info.cond, &info.mutex);
6658              ret = info.tid;
6659          } else {
6660              ret = -1;
6661          }
6662          pthread_mutex_unlock(&info.mutex);
6663          pthread_cond_destroy(&info.cond);
6664          pthread_mutex_destroy(&info.mutex);
6665          pthread_mutex_unlock(&clone_lock);
6666      } else {
6667          /* if no CLONE_VM, we consider it is a fork */
6668          if (flags & CLONE_INVALID_FORK_FLAGS) {
6669              return -TARGET_EINVAL;
6670          }
6671  
6672          /* We can't support custom termination signals */
6673          if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6674              return -TARGET_EINVAL;
6675          }
6676  
6677  #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6678          if (flags & CLONE_PIDFD) {
6679              return -TARGET_EINVAL;
6680          }
6681  #endif
6682  
6683          /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6684          if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6685              return -TARGET_EINVAL;
6686          }
6687  
6688          if (block_signals()) {
6689              return -QEMU_ERESTARTSYS;
6690          }
6691  
6692          fork_start();
6693          ret = fork();
6694          if (ret == 0) {
6695              /* Child Process.  */
6696              cpu_clone_regs_child(env, newsp, flags);
6697              fork_end(ret);
6698              /* There is a race condition here.  The parent process could
6699                 theoretically read the TID in the child process before the child
6700                 tid is set.  This would require using either ptrace
6701                 (not implemented) or having *_tidptr to point at a shared memory
6702                 mapping.  We can't repeat the spinlock hack used above because
6703                 the child process gets its own copy of the lock.  */
6704              if (flags & CLONE_CHILD_SETTID)
6705                  put_user_u32(sys_gettid(), child_tidptr);
6706              if (flags & CLONE_PARENT_SETTID)
6707                  put_user_u32(sys_gettid(), parent_tidptr);
6708              ts = get_task_state(cpu);
6709              if (flags & CLONE_SETTLS)
6710                  cpu_set_tls (env, newtls);
6711              if (flags & CLONE_CHILD_CLEARTID)
6712                  ts->child_tidptr = child_tidptr;
6713          } else {
6714              cpu_clone_regs_parent(env, flags);
6715              if (flags & CLONE_PIDFD) {
6716                  int pid_fd = 0;
6717  #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6718                  int pid_child = ret;
6719                  pid_fd = pidfd_open(pid_child, 0);
6720                  if (pid_fd >= 0) {
6721                          fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6722                                                 | FD_CLOEXEC);
6723                  } else {
6724                          pid_fd = 0;
6725                  }
6726  #endif
6727                  put_user_u32(pid_fd, parent_tidptr);
6728              }
6729              fork_end(ret);
6730          }
6731          g_assert(!cpu_in_exclusive_context(cpu));
6732      }
6733      return ret;
6734  }
6735  
6736  /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6737  static int target_to_host_fcntl_cmd(int cmd)
6738  {
6739      int ret;
6740  
6741      switch(cmd) {
6742      case TARGET_F_DUPFD:
6743      case TARGET_F_GETFD:
6744      case TARGET_F_SETFD:
6745      case TARGET_F_GETFL:
6746      case TARGET_F_SETFL:
6747      case TARGET_F_OFD_GETLK:
6748      case TARGET_F_OFD_SETLK:
6749      case TARGET_F_OFD_SETLKW:
6750          ret = cmd;
6751          break;
6752      case TARGET_F_GETLK:
6753          ret = F_GETLK;
6754          break;
6755      case TARGET_F_SETLK:
6756          ret = F_SETLK;
6757          break;
6758      case TARGET_F_SETLKW:
6759          ret = F_SETLKW;
6760          break;
6761      case TARGET_F_GETOWN:
6762          ret = F_GETOWN;
6763          break;
6764      case TARGET_F_SETOWN:
6765          ret = F_SETOWN;
6766          break;
6767      case TARGET_F_GETSIG:
6768          ret = F_GETSIG;
6769          break;
6770      case TARGET_F_SETSIG:
6771          ret = F_SETSIG;
6772          break;
6773  #if TARGET_ABI_BITS == 32
6774      case TARGET_F_GETLK64:
6775          ret = F_GETLK;
6776          break;
6777      case TARGET_F_SETLK64:
6778          ret = F_SETLK;
6779          break;
6780      case TARGET_F_SETLKW64:
6781          ret = F_SETLKW;
6782          break;
6783  #endif
6784      case TARGET_F_SETLEASE:
6785          ret = F_SETLEASE;
6786          break;
6787      case TARGET_F_GETLEASE:
6788          ret = F_GETLEASE;
6789          break;
6790  #ifdef F_DUPFD_CLOEXEC
6791      case TARGET_F_DUPFD_CLOEXEC:
6792          ret = F_DUPFD_CLOEXEC;
6793          break;
6794  #endif
6795      case TARGET_F_NOTIFY:
6796          ret = F_NOTIFY;
6797          break;
6798  #ifdef F_GETOWN_EX
6799      case TARGET_F_GETOWN_EX:
6800          ret = F_GETOWN_EX;
6801          break;
6802  #endif
6803  #ifdef F_SETOWN_EX
6804      case TARGET_F_SETOWN_EX:
6805          ret = F_SETOWN_EX;
6806          break;
6807  #endif
6808  #ifdef F_SETPIPE_SZ
6809      case TARGET_F_SETPIPE_SZ:
6810          ret = F_SETPIPE_SZ;
6811          break;
6812      case TARGET_F_GETPIPE_SZ:
6813          ret = F_GETPIPE_SZ;
6814          break;
6815  #endif
6816  #ifdef F_ADD_SEALS
6817      case TARGET_F_ADD_SEALS:
6818          ret = F_ADD_SEALS;
6819          break;
6820      case TARGET_F_GET_SEALS:
6821          ret = F_GET_SEALS;
6822          break;
6823  #endif
6824      default:
6825          ret = -TARGET_EINVAL;
6826          break;
6827      }
6828  
6829  #if defined(__powerpc64__)
6830      /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6831       * is not supported by kernel. The glibc fcntl call actually adjusts
6832       * them to 5, 6 and 7 before making the syscall(). Since we make the
6833       * syscall directly, adjust to what is supported by the kernel.
6834       */
6835      if (ret >= F_GETLK && ret <= F_SETLKW) {
6836          ret -= F_GETLK - 5;
6837      }
6838  #endif
6839  
6840      return ret;
6841  }
6842  
6843  #define FLOCK_TRANSTBL \
6844      switch (type) { \
6845      TRANSTBL_CONVERT(F_RDLCK); \
6846      TRANSTBL_CONVERT(F_WRLCK); \
6847      TRANSTBL_CONVERT(F_UNLCK); \
6848      }
6849  
target_to_host_flock(int type)6850  static int target_to_host_flock(int type)
6851  {
6852  #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6853      FLOCK_TRANSTBL
6854  #undef  TRANSTBL_CONVERT
6855      return -TARGET_EINVAL;
6856  }
6857  
host_to_target_flock(int type)6858  static int host_to_target_flock(int type)
6859  {
6860  #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6861      FLOCK_TRANSTBL
6862  #undef  TRANSTBL_CONVERT
6863      /* if we don't know how to convert the value coming
6864       * from the host we copy to the target field as-is
6865       */
6866      return type;
6867  }
6868  
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6869  static inline abi_long copy_from_user_flock(struct flock *fl,
6870                                              abi_ulong target_flock_addr)
6871  {
6872      struct target_flock *target_fl;
6873      int l_type;
6874  
6875      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6876          return -TARGET_EFAULT;
6877      }
6878  
6879      __get_user(l_type, &target_fl->l_type);
6880      l_type = target_to_host_flock(l_type);
6881      if (l_type < 0) {
6882          return l_type;
6883      }
6884      fl->l_type = l_type;
6885      __get_user(fl->l_whence, &target_fl->l_whence);
6886      __get_user(fl->l_start, &target_fl->l_start);
6887      __get_user(fl->l_len, &target_fl->l_len);
6888      __get_user(fl->l_pid, &target_fl->l_pid);
6889      unlock_user_struct(target_fl, target_flock_addr, 0);
6890      return 0;
6891  }
6892  
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6893  static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6894                                            const struct flock *fl)
6895  {
6896      struct target_flock *target_fl;
6897      short l_type;
6898  
6899      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6900          return -TARGET_EFAULT;
6901      }
6902  
6903      l_type = host_to_target_flock(fl->l_type);
6904      __put_user(l_type, &target_fl->l_type);
6905      __put_user(fl->l_whence, &target_fl->l_whence);
6906      __put_user(fl->l_start, &target_fl->l_start);
6907      __put_user(fl->l_len, &target_fl->l_len);
6908      __put_user(fl->l_pid, &target_fl->l_pid);
6909      unlock_user_struct(target_fl, target_flock_addr, 1);
6910      return 0;
6911  }
6912  
6913  typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6914  typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6915  
6916  #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6917  struct target_oabi_flock64 {
6918      abi_short l_type;
6919      abi_short l_whence;
6920      abi_llong l_start;
6921      abi_llong l_len;
6922      abi_int   l_pid;
6923  } QEMU_PACKED;
6924  
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6925  static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6926                                                     abi_ulong target_flock_addr)
6927  {
6928      struct target_oabi_flock64 *target_fl;
6929      int l_type;
6930  
6931      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6932          return -TARGET_EFAULT;
6933      }
6934  
6935      __get_user(l_type, &target_fl->l_type);
6936      l_type = target_to_host_flock(l_type);
6937      if (l_type < 0) {
6938          return l_type;
6939      }
6940      fl->l_type = l_type;
6941      __get_user(fl->l_whence, &target_fl->l_whence);
6942      __get_user(fl->l_start, &target_fl->l_start);
6943      __get_user(fl->l_len, &target_fl->l_len);
6944      __get_user(fl->l_pid, &target_fl->l_pid);
6945      unlock_user_struct(target_fl, target_flock_addr, 0);
6946      return 0;
6947  }
6948  
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6949  static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6950                                                   const struct flock *fl)
6951  {
6952      struct target_oabi_flock64 *target_fl;
6953      short l_type;
6954  
6955      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6956          return -TARGET_EFAULT;
6957      }
6958  
6959      l_type = host_to_target_flock(fl->l_type);
6960      __put_user(l_type, &target_fl->l_type);
6961      __put_user(fl->l_whence, &target_fl->l_whence);
6962      __put_user(fl->l_start, &target_fl->l_start);
6963      __put_user(fl->l_len, &target_fl->l_len);
6964      __put_user(fl->l_pid, &target_fl->l_pid);
6965      unlock_user_struct(target_fl, target_flock_addr, 1);
6966      return 0;
6967  }
6968  #endif
6969  
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6970  static inline abi_long copy_from_user_flock64(struct flock *fl,
6971                                                abi_ulong target_flock_addr)
6972  {
6973      struct target_flock64 *target_fl;
6974      int l_type;
6975  
6976      if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6977          return -TARGET_EFAULT;
6978      }
6979  
6980      __get_user(l_type, &target_fl->l_type);
6981      l_type = target_to_host_flock(l_type);
6982      if (l_type < 0) {
6983          return l_type;
6984      }
6985      fl->l_type = l_type;
6986      __get_user(fl->l_whence, &target_fl->l_whence);
6987      __get_user(fl->l_start, &target_fl->l_start);
6988      __get_user(fl->l_len, &target_fl->l_len);
6989      __get_user(fl->l_pid, &target_fl->l_pid);
6990      unlock_user_struct(target_fl, target_flock_addr, 0);
6991      return 0;
6992  }
6993  
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)6994  static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6995                                              const struct flock *fl)
6996  {
6997      struct target_flock64 *target_fl;
6998      short l_type;
6999  
7000      if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7001          return -TARGET_EFAULT;
7002      }
7003  
7004      l_type = host_to_target_flock(fl->l_type);
7005      __put_user(l_type, &target_fl->l_type);
7006      __put_user(fl->l_whence, &target_fl->l_whence);
7007      __put_user(fl->l_start, &target_fl->l_start);
7008      __put_user(fl->l_len, &target_fl->l_len);
7009      __put_user(fl->l_pid, &target_fl->l_pid);
7010      unlock_user_struct(target_fl, target_flock_addr, 1);
7011      return 0;
7012  }
7013  
do_fcntl(int fd,int cmd,abi_ulong arg)7014  static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7015  {
7016      struct flock fl;
7017  #ifdef F_GETOWN_EX
7018      struct f_owner_ex fox;
7019      struct target_f_owner_ex *target_fox;
7020  #endif
7021      abi_long ret;
7022      int host_cmd = target_to_host_fcntl_cmd(cmd);
7023  
7024      if (host_cmd == -TARGET_EINVAL)
7025  	    return host_cmd;
7026  
7027      switch(cmd) {
7028      case TARGET_F_GETLK:
7029          ret = copy_from_user_flock(&fl, arg);
7030          if (ret) {
7031              return ret;
7032          }
7033          ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7034          if (ret == 0) {
7035              ret = copy_to_user_flock(arg, &fl);
7036          }
7037          break;
7038  
7039      case TARGET_F_SETLK:
7040      case TARGET_F_SETLKW:
7041          ret = copy_from_user_flock(&fl, arg);
7042          if (ret) {
7043              return ret;
7044          }
7045          ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7046          break;
7047  
7048      case TARGET_F_GETLK64:
7049      case TARGET_F_OFD_GETLK:
7050          ret = copy_from_user_flock64(&fl, arg);
7051          if (ret) {
7052              return ret;
7053          }
7054          ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7055          if (ret == 0) {
7056              ret = copy_to_user_flock64(arg, &fl);
7057          }
7058          break;
7059      case TARGET_F_SETLK64:
7060      case TARGET_F_SETLKW64:
7061      case TARGET_F_OFD_SETLK:
7062      case TARGET_F_OFD_SETLKW:
7063          ret = copy_from_user_flock64(&fl, arg);
7064          if (ret) {
7065              return ret;
7066          }
7067          ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7068          break;
7069  
7070      case TARGET_F_GETFL:
7071          ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7072          if (ret >= 0) {
7073              ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7074              /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7075              if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7076                  ret |= TARGET_O_LARGEFILE;
7077              }
7078          }
7079          break;
7080  
7081      case TARGET_F_SETFL:
7082          ret = get_errno(safe_fcntl(fd, host_cmd,
7083                                     target_to_host_bitmask(arg,
7084                                                            fcntl_flags_tbl)));
7085          break;
7086  
7087  #ifdef F_GETOWN_EX
7088      case TARGET_F_GETOWN_EX:
7089          ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7090          if (ret >= 0) {
7091              if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7092                  return -TARGET_EFAULT;
7093              target_fox->type = tswap32(fox.type);
7094              target_fox->pid = tswap32(fox.pid);
7095              unlock_user_struct(target_fox, arg, 1);
7096          }
7097          break;
7098  #endif
7099  
7100  #ifdef F_SETOWN_EX
7101      case TARGET_F_SETOWN_EX:
7102          if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7103              return -TARGET_EFAULT;
7104          fox.type = tswap32(target_fox->type);
7105          fox.pid = tswap32(target_fox->pid);
7106          unlock_user_struct(target_fox, arg, 0);
7107          ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7108          break;
7109  #endif
7110  
7111      case TARGET_F_SETSIG:
7112          ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7113          break;
7114  
7115      case TARGET_F_GETSIG:
7116          ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7117          break;
7118  
7119      case TARGET_F_SETOWN:
7120      case TARGET_F_GETOWN:
7121      case TARGET_F_SETLEASE:
7122      case TARGET_F_GETLEASE:
7123      case TARGET_F_SETPIPE_SZ:
7124      case TARGET_F_GETPIPE_SZ:
7125      case TARGET_F_ADD_SEALS:
7126      case TARGET_F_GET_SEALS:
7127          ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7128          break;
7129  
7130      default:
7131          ret = get_errno(safe_fcntl(fd, cmd, arg));
7132          break;
7133      }
7134      return ret;
7135  }
7136  
7137  #ifdef USE_UID16
7138  
high2lowuid(int uid)7139  static inline int high2lowuid(int uid)
7140  {
7141      if (uid > 65535)
7142          return 65534;
7143      else
7144          return uid;
7145  }
7146  
high2lowgid(int gid)7147  static inline int high2lowgid(int gid)
7148  {
7149      if (gid > 65535)
7150          return 65534;
7151      else
7152          return gid;
7153  }
7154  
low2highuid(int uid)7155  static inline int low2highuid(int uid)
7156  {
7157      if ((int16_t)uid == -1)
7158          return -1;
7159      else
7160          return uid;
7161  }
7162  
low2highgid(int gid)7163  static inline int low2highgid(int gid)
7164  {
7165      if ((int16_t)gid == -1)
7166          return -1;
7167      else
7168          return gid;
7169  }
tswapid(int id)7170  static inline int tswapid(int id)
7171  {
7172      return tswap16(id);
7173  }
7174  
7175  #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7176  
7177  #else /* !USE_UID16 */
high2lowuid(int uid)7178  static inline int high2lowuid(int uid)
7179  {
7180      return uid;
7181  }
high2lowgid(int gid)7182  static inline int high2lowgid(int gid)
7183  {
7184      return gid;
7185  }
low2highuid(int uid)7186  static inline int low2highuid(int uid)
7187  {
7188      return uid;
7189  }
low2highgid(int gid)7190  static inline int low2highgid(int gid)
7191  {
7192      return gid;
7193  }
tswapid(int id)7194  static inline int tswapid(int id)
7195  {
7196      return tswap32(id);
7197  }
7198  
7199  #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7200  
7201  #endif /* USE_UID16 */
7202  
7203  /* We must do direct syscalls for setting UID/GID, because we want to
7204   * implement the Linux system call semantics of "change only for this thread",
7205   * not the libc/POSIX semantics of "change for all threads in process".
7206   * (See http://ewontfix.com/17/ for more details.)
7207   * We use the 32-bit version of the syscalls if present; if it is not
7208   * then either the host architecture supports 32-bit UIDs natively with
7209   * the standard syscall, or the 16-bit UID is the best we can do.
7210   */
7211  #ifdef __NR_setuid32
7212  #define __NR_sys_setuid __NR_setuid32
7213  #else
7214  #define __NR_sys_setuid __NR_setuid
7215  #endif
7216  #ifdef __NR_setgid32
7217  #define __NR_sys_setgid __NR_setgid32
7218  #else
7219  #define __NR_sys_setgid __NR_setgid
7220  #endif
7221  #ifdef __NR_setresuid32
7222  #define __NR_sys_setresuid __NR_setresuid32
7223  #else
7224  #define __NR_sys_setresuid __NR_setresuid
7225  #endif
7226  #ifdef __NR_setresgid32
7227  #define __NR_sys_setresgid __NR_setresgid32
7228  #else
7229  #define __NR_sys_setresgid __NR_setresgid
7230  #endif
7231  #ifdef __NR_setgroups32
7232  #define __NR_sys_setgroups __NR_setgroups32
7233  #else
7234  #define __NR_sys_setgroups __NR_setgroups
7235  #endif
7236  #ifdef __NR_sys_setreuid32
7237  #define __NR_sys_setreuid __NR_setreuid32
7238  #else
7239  #define __NR_sys_setreuid __NR_setreuid
7240  #endif
7241  #ifdef __NR_sys_setregid32
7242  #define __NR_sys_setregid __NR_setregid32
7243  #else
7244  #define __NR_sys_setregid __NR_setregid
7245  #endif
7246  
7247  _syscall1(int, sys_setuid, uid_t, uid)
7248  _syscall1(int, sys_setgid, gid_t, gid)
7249  _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7250  _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7251  _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7252  _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7253  _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7254  
syscall_init(void)7255  void syscall_init(void)
7256  {
7257      IOCTLEntry *ie;
7258      const argtype *arg_type;
7259      int size;
7260  
7261      thunk_init(STRUCT_MAX);
7262  
7263  #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7264  #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7265  #include "syscall_types.h"
7266  #undef STRUCT
7267  #undef STRUCT_SPECIAL
7268  
7269      /* we patch the ioctl size if necessary. We rely on the fact that
7270         no ioctl has all the bits at '1' in the size field */
7271      ie = ioctl_entries;
7272      while (ie->target_cmd != 0) {
7273          if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7274              TARGET_IOC_SIZEMASK) {
7275              arg_type = ie->arg_type;
7276              if (arg_type[0] != TYPE_PTR) {
7277                  fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7278                          ie->target_cmd);
7279                  exit(1);
7280              }
7281              arg_type++;
7282              size = thunk_type_size(arg_type, 0);
7283              ie->target_cmd = (ie->target_cmd &
7284                                ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7285                  (size << TARGET_IOC_SIZESHIFT);
7286          }
7287  
7288          /* automatic consistency check if same arch */
7289  #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7290      (defined(__x86_64__) && defined(TARGET_X86_64))
7291          if (unlikely(ie->target_cmd != ie->host_cmd)) {
7292              fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7293                      ie->name, ie->target_cmd, ie->host_cmd);
7294          }
7295  #endif
7296          ie++;
7297      }
7298  }
7299  
7300  #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7301  static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7302                                           abi_long arg2,
7303                                           abi_long arg3,
7304                                           abi_long arg4)
7305  {
7306      if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7307          arg2 = arg3;
7308          arg3 = arg4;
7309      }
7310      return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7311  }
7312  #endif
7313  
7314  #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7315  static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7316                                            abi_long arg2,
7317                                            abi_long arg3,
7318                                            abi_long arg4)
7319  {
7320      if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7321          arg2 = arg3;
7322          arg3 = arg4;
7323      }
7324      return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7325  }
7326  #endif
7327  
7328  #if defined(TARGET_NR_timer_settime) || \
7329      (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7330  static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7331                                                   abi_ulong target_addr)
7332  {
7333      if (target_to_host_timespec(&host_its->it_interval, target_addr +
7334                                  offsetof(struct target_itimerspec,
7335                                           it_interval)) ||
7336          target_to_host_timespec(&host_its->it_value, target_addr +
7337                                  offsetof(struct target_itimerspec,
7338                                           it_value))) {
7339          return -TARGET_EFAULT;
7340      }
7341  
7342      return 0;
7343  }
7344  #endif
7345  
7346  #if defined(TARGET_NR_timer_settime64) || \
7347      (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7348  static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7349                                                     abi_ulong target_addr)
7350  {
7351      if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7352                                    offsetof(struct target__kernel_itimerspec,
7353                                             it_interval)) ||
7354          target_to_host_timespec64(&host_its->it_value, target_addr +
7355                                    offsetof(struct target__kernel_itimerspec,
7356                                             it_value))) {
7357          return -TARGET_EFAULT;
7358      }
7359  
7360      return 0;
7361  }
7362  #endif
7363  
7364  #if ((defined(TARGET_NR_timerfd_gettime) || \
7365        defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7366        defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7367  static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7368                                                   struct itimerspec *host_its)
7369  {
7370      if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7371                                                         it_interval),
7372                                  &host_its->it_interval) ||
7373          host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7374                                                         it_value),
7375                                  &host_its->it_value)) {
7376          return -TARGET_EFAULT;
7377      }
7378      return 0;
7379  }
7380  #endif
7381  
7382  #if ((defined(TARGET_NR_timerfd_gettime64) || \
7383        defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7384        defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7385  static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7386                                                     struct itimerspec *host_its)
7387  {
7388      if (host_to_target_timespec64(target_addr +
7389                                    offsetof(struct target__kernel_itimerspec,
7390                                             it_interval),
7391                                    &host_its->it_interval) ||
7392          host_to_target_timespec64(target_addr +
7393                                    offsetof(struct target__kernel_itimerspec,
7394                                             it_value),
7395                                    &host_its->it_value)) {
7396          return -TARGET_EFAULT;
7397      }
7398      return 0;
7399  }
7400  #endif
7401  
7402  #if defined(TARGET_NR_adjtimex) || \
7403      (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7404  static inline abi_long target_to_host_timex(struct timex *host_tx,
7405                                              abi_long target_addr)
7406  {
7407      struct target_timex *target_tx;
7408  
7409      if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7410          return -TARGET_EFAULT;
7411      }
7412  
7413      __get_user(host_tx->modes, &target_tx->modes);
7414      __get_user(host_tx->offset, &target_tx->offset);
7415      __get_user(host_tx->freq, &target_tx->freq);
7416      __get_user(host_tx->maxerror, &target_tx->maxerror);
7417      __get_user(host_tx->esterror, &target_tx->esterror);
7418      __get_user(host_tx->status, &target_tx->status);
7419      __get_user(host_tx->constant, &target_tx->constant);
7420      __get_user(host_tx->precision, &target_tx->precision);
7421      __get_user(host_tx->tolerance, &target_tx->tolerance);
7422      __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7423      __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7424      __get_user(host_tx->tick, &target_tx->tick);
7425      __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7426      __get_user(host_tx->jitter, &target_tx->jitter);
7427      __get_user(host_tx->shift, &target_tx->shift);
7428      __get_user(host_tx->stabil, &target_tx->stabil);
7429      __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7430      __get_user(host_tx->calcnt, &target_tx->calcnt);
7431      __get_user(host_tx->errcnt, &target_tx->errcnt);
7432      __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7433      __get_user(host_tx->tai, &target_tx->tai);
7434  
7435      unlock_user_struct(target_tx, target_addr, 0);
7436      return 0;
7437  }
7438  
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7439  static inline abi_long host_to_target_timex(abi_long target_addr,
7440                                              struct timex *host_tx)
7441  {
7442      struct target_timex *target_tx;
7443  
7444      if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7445          return -TARGET_EFAULT;
7446      }
7447  
7448      __put_user(host_tx->modes, &target_tx->modes);
7449      __put_user(host_tx->offset, &target_tx->offset);
7450      __put_user(host_tx->freq, &target_tx->freq);
7451      __put_user(host_tx->maxerror, &target_tx->maxerror);
7452      __put_user(host_tx->esterror, &target_tx->esterror);
7453      __put_user(host_tx->status, &target_tx->status);
7454      __put_user(host_tx->constant, &target_tx->constant);
7455      __put_user(host_tx->precision, &target_tx->precision);
7456      __put_user(host_tx->tolerance, &target_tx->tolerance);
7457      __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7458      __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7459      __put_user(host_tx->tick, &target_tx->tick);
7460      __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7461      __put_user(host_tx->jitter, &target_tx->jitter);
7462      __put_user(host_tx->shift, &target_tx->shift);
7463      __put_user(host_tx->stabil, &target_tx->stabil);
7464      __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7465      __put_user(host_tx->calcnt, &target_tx->calcnt);
7466      __put_user(host_tx->errcnt, &target_tx->errcnt);
7467      __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7468      __put_user(host_tx->tai, &target_tx->tai);
7469  
7470      unlock_user_struct(target_tx, target_addr, 1);
7471      return 0;
7472  }
7473  #endif
7474  
7475  
7476  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7477  static inline abi_long target_to_host_timex64(struct timex *host_tx,
7478                                                abi_long target_addr)
7479  {
7480      struct target__kernel_timex *target_tx;
7481  
7482      if (copy_from_user_timeval64(&host_tx->time, target_addr +
7483                                   offsetof(struct target__kernel_timex,
7484                                            time))) {
7485          return -TARGET_EFAULT;
7486      }
7487  
7488      if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7489          return -TARGET_EFAULT;
7490      }
7491  
7492      __get_user(host_tx->modes, &target_tx->modes);
7493      __get_user(host_tx->offset, &target_tx->offset);
7494      __get_user(host_tx->freq, &target_tx->freq);
7495      __get_user(host_tx->maxerror, &target_tx->maxerror);
7496      __get_user(host_tx->esterror, &target_tx->esterror);
7497      __get_user(host_tx->status, &target_tx->status);
7498      __get_user(host_tx->constant, &target_tx->constant);
7499      __get_user(host_tx->precision, &target_tx->precision);
7500      __get_user(host_tx->tolerance, &target_tx->tolerance);
7501      __get_user(host_tx->tick, &target_tx->tick);
7502      __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7503      __get_user(host_tx->jitter, &target_tx->jitter);
7504      __get_user(host_tx->shift, &target_tx->shift);
7505      __get_user(host_tx->stabil, &target_tx->stabil);
7506      __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7507      __get_user(host_tx->calcnt, &target_tx->calcnt);
7508      __get_user(host_tx->errcnt, &target_tx->errcnt);
7509      __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7510      __get_user(host_tx->tai, &target_tx->tai);
7511  
7512      unlock_user_struct(target_tx, target_addr, 0);
7513      return 0;
7514  }
7515  
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7516  static inline abi_long host_to_target_timex64(abi_long target_addr,
7517                                                struct timex *host_tx)
7518  {
7519      struct target__kernel_timex *target_tx;
7520  
7521     if (copy_to_user_timeval64(target_addr +
7522                                offsetof(struct target__kernel_timex, time),
7523                                &host_tx->time)) {
7524          return -TARGET_EFAULT;
7525      }
7526  
7527      if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7528          return -TARGET_EFAULT;
7529      }
7530  
7531      __put_user(host_tx->modes, &target_tx->modes);
7532      __put_user(host_tx->offset, &target_tx->offset);
7533      __put_user(host_tx->freq, &target_tx->freq);
7534      __put_user(host_tx->maxerror, &target_tx->maxerror);
7535      __put_user(host_tx->esterror, &target_tx->esterror);
7536      __put_user(host_tx->status, &target_tx->status);
7537      __put_user(host_tx->constant, &target_tx->constant);
7538      __put_user(host_tx->precision, &target_tx->precision);
7539      __put_user(host_tx->tolerance, &target_tx->tolerance);
7540      __put_user(host_tx->tick, &target_tx->tick);
7541      __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7542      __put_user(host_tx->jitter, &target_tx->jitter);
7543      __put_user(host_tx->shift, &target_tx->shift);
7544      __put_user(host_tx->stabil, &target_tx->stabil);
7545      __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7546      __put_user(host_tx->calcnt, &target_tx->calcnt);
7547      __put_user(host_tx->errcnt, &target_tx->errcnt);
7548      __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7549      __put_user(host_tx->tai, &target_tx->tai);
7550  
7551      unlock_user_struct(target_tx, target_addr, 1);
7552      return 0;
7553  }
7554  #endif
7555  
7556  #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7557  #define sigev_notify_thread_id _sigev_un._tid
7558  #endif
7559  
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7560  static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7561                                                 abi_ulong target_addr)
7562  {
7563      struct target_sigevent *target_sevp;
7564  
7565      if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7566          return -TARGET_EFAULT;
7567      }
7568  
7569      /* This union is awkward on 64 bit systems because it has a 32 bit
7570       * integer and a pointer in it; we follow the conversion approach
7571       * used for handling sigval types in signal.c so the guest should get
7572       * the correct value back even if we did a 64 bit byteswap and it's
7573       * using the 32 bit integer.
7574       */
7575      host_sevp->sigev_value.sival_ptr =
7576          (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7577      host_sevp->sigev_signo =
7578          target_to_host_signal(tswap32(target_sevp->sigev_signo));
7579      host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7580      host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7581  
7582      unlock_user_struct(target_sevp, target_addr, 1);
7583      return 0;
7584  }
7585  
7586  #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7587  static inline int target_to_host_mlockall_arg(int arg)
7588  {
7589      int result = 0;
7590  
7591      if (arg & TARGET_MCL_CURRENT) {
7592          result |= MCL_CURRENT;
7593      }
7594      if (arg & TARGET_MCL_FUTURE) {
7595          result |= MCL_FUTURE;
7596      }
7597  #ifdef MCL_ONFAULT
7598      if (arg & TARGET_MCL_ONFAULT) {
7599          result |= MCL_ONFAULT;
7600      }
7601  #endif
7602  
7603      return result;
7604  }
7605  #endif
7606  
target_to_host_msync_arg(abi_long arg)7607  static inline int target_to_host_msync_arg(abi_long arg)
7608  {
7609      return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7610             ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7611             ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7612             (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7613  }
7614  
7615  #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7616       defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7617       defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7618  static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7619                                               abi_ulong target_addr,
7620                                               struct stat *host_st)
7621  {
7622  #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7623      if (cpu_env->eabi) {
7624          struct target_eabi_stat64 *target_st;
7625  
7626          if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7627              return -TARGET_EFAULT;
7628          memset(target_st, 0, sizeof(struct target_eabi_stat64));
7629          __put_user(host_st->st_dev, &target_st->st_dev);
7630          __put_user(host_st->st_ino, &target_st->st_ino);
7631  #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7632          __put_user(host_st->st_ino, &target_st->__st_ino);
7633  #endif
7634          __put_user(host_st->st_mode, &target_st->st_mode);
7635          __put_user(host_st->st_nlink, &target_st->st_nlink);
7636          __put_user(host_st->st_uid, &target_st->st_uid);
7637          __put_user(host_st->st_gid, &target_st->st_gid);
7638          __put_user(host_st->st_rdev, &target_st->st_rdev);
7639          __put_user(host_st->st_size, &target_st->st_size);
7640          __put_user(host_st->st_blksize, &target_st->st_blksize);
7641          __put_user(host_st->st_blocks, &target_st->st_blocks);
7642          __put_user(host_st->st_atime, &target_st->target_st_atime);
7643          __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7644          __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7645  #ifdef HAVE_STRUCT_STAT_ST_ATIM
7646          __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7647          __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7648          __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7649  #endif
7650          unlock_user_struct(target_st, target_addr, 1);
7651      } else
7652  #endif
7653      {
7654  #if defined(TARGET_HAS_STRUCT_STAT64)
7655          struct target_stat64 *target_st;
7656  #else
7657          struct target_stat *target_st;
7658  #endif
7659  
7660          if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7661              return -TARGET_EFAULT;
7662          memset(target_st, 0, sizeof(*target_st));
7663          __put_user(host_st->st_dev, &target_st->st_dev);
7664          __put_user(host_st->st_ino, &target_st->st_ino);
7665  #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7666          __put_user(host_st->st_ino, &target_st->__st_ino);
7667  #endif
7668          __put_user(host_st->st_mode, &target_st->st_mode);
7669          __put_user(host_st->st_nlink, &target_st->st_nlink);
7670          __put_user(host_st->st_uid, &target_st->st_uid);
7671          __put_user(host_st->st_gid, &target_st->st_gid);
7672          __put_user(host_st->st_rdev, &target_st->st_rdev);
7673          /* XXX: better use of kernel struct */
7674          __put_user(host_st->st_size, &target_st->st_size);
7675          __put_user(host_st->st_blksize, &target_st->st_blksize);
7676          __put_user(host_st->st_blocks, &target_st->st_blocks);
7677          __put_user(host_st->st_atime, &target_st->target_st_atime);
7678          __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7679          __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7680  #ifdef HAVE_STRUCT_STAT_ST_ATIM
7681          __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7682          __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7683          __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7684  #endif
7685          unlock_user_struct(target_st, target_addr, 1);
7686      }
7687  
7688      return 0;
7689  }
7690  #endif
7691  
7692  #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7693  static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7694                                              abi_ulong target_addr)
7695  {
7696      struct target_statx *target_stx;
7697  
7698      if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7699          return -TARGET_EFAULT;
7700      }
7701      memset(target_stx, 0, sizeof(*target_stx));
7702  
7703      __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7704      __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7705      __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7706      __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7707      __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7708      __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7709      __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7710      __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7711      __put_user(host_stx->stx_size, &target_stx->stx_size);
7712      __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7713      __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7714      __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7715      __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7716      __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7717      __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7718      __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7719      __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7720      __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7721      __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7722      __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7723      __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7724      __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7725      __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7726  
7727      unlock_user_struct(target_stx, target_addr, 1);
7728  
7729      return 0;
7730  }
7731  #endif
7732  
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7733  static int do_sys_futex(int *uaddr, int op, int val,
7734                           const struct timespec *timeout, int *uaddr2,
7735                           int val3)
7736  {
7737  #if HOST_LONG_BITS == 64
7738  #if defined(__NR_futex)
7739      /* always a 64-bit time_t, it doesn't define _time64 version  */
7740      return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7741  
7742  #endif
7743  #else /* HOST_LONG_BITS == 64 */
7744  #if defined(__NR_futex_time64)
7745      if (sizeof(timeout->tv_sec) == 8) {
7746          /* _time64 function on 32bit arch */
7747          return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7748      }
7749  #endif
7750  #if defined(__NR_futex)
7751      /* old function on 32bit arch */
7752      return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7753  #endif
7754  #endif /* HOST_LONG_BITS == 64 */
7755      g_assert_not_reached();
7756  }
7757  
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7758  static int do_safe_futex(int *uaddr, int op, int val,
7759                           const struct timespec *timeout, int *uaddr2,
7760                           int val3)
7761  {
7762  #if HOST_LONG_BITS == 64
7763  #if defined(__NR_futex)
7764      /* always a 64-bit time_t, it doesn't define _time64 version  */
7765      return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7766  #endif
7767  #else /* HOST_LONG_BITS == 64 */
7768  #if defined(__NR_futex_time64)
7769      if (sizeof(timeout->tv_sec) == 8) {
7770          /* _time64 function on 32bit arch */
7771          return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7772                                             val3));
7773      }
7774  #endif
7775  #if defined(__NR_futex)
7776      /* old function on 32bit arch */
7777      return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7778  #endif
7779  #endif /* HOST_LONG_BITS == 64 */
7780      return -TARGET_ENOSYS;
7781  }
7782  
7783  /* ??? Using host futex calls even when target atomic operations
7784     are not really atomic probably breaks things.  However implementing
7785     futexes locally would make futexes shared between multiple processes
7786     tricky.  However they're probably useless because guest atomic
7787     operations won't work either.  */
7788  #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7789  static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7790                      int op, int val, target_ulong timeout,
7791                      target_ulong uaddr2, int val3)
7792  {
7793      struct timespec ts, *pts = NULL;
7794      void *haddr2 = NULL;
7795      int base_op;
7796  
7797      /* We assume FUTEX_* constants are the same on both host and target. */
7798  #ifdef FUTEX_CMD_MASK
7799      base_op = op & FUTEX_CMD_MASK;
7800  #else
7801      base_op = op;
7802  #endif
7803      switch (base_op) {
7804      case FUTEX_WAIT:
7805      case FUTEX_WAIT_BITSET:
7806          val = tswap32(val);
7807          break;
7808      case FUTEX_WAIT_REQUEUE_PI:
7809          val = tswap32(val);
7810          haddr2 = g2h(cpu, uaddr2);
7811          break;
7812      case FUTEX_LOCK_PI:
7813      case FUTEX_LOCK_PI2:
7814          break;
7815      case FUTEX_WAKE:
7816      case FUTEX_WAKE_BITSET:
7817      case FUTEX_TRYLOCK_PI:
7818      case FUTEX_UNLOCK_PI:
7819          timeout = 0;
7820          break;
7821      case FUTEX_FD:
7822          val = target_to_host_signal(val);
7823          timeout = 0;
7824          break;
7825      case FUTEX_CMP_REQUEUE:
7826      case FUTEX_CMP_REQUEUE_PI:
7827          val3 = tswap32(val3);
7828          /* fall through */
7829      case FUTEX_REQUEUE:
7830      case FUTEX_WAKE_OP:
7831          /*
7832           * For these, the 4th argument is not TIMEOUT, but VAL2.
7833           * But the prototype of do_safe_futex takes a pointer, so
7834           * insert casts to satisfy the compiler.  We do not need
7835           * to tswap VAL2 since it's not compared to guest memory.
7836            */
7837          pts = (struct timespec *)(uintptr_t)timeout;
7838          timeout = 0;
7839          haddr2 = g2h(cpu, uaddr2);
7840          break;
7841      default:
7842          return -TARGET_ENOSYS;
7843      }
7844      if (timeout) {
7845          pts = &ts;
7846          if (time64
7847              ? target_to_host_timespec64(pts, timeout)
7848              : target_to_host_timespec(pts, timeout)) {
7849              return -TARGET_EFAULT;
7850          }
7851      }
7852      return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7853  }
7854  #endif
7855  
7856  #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7857  static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7858                                       abi_long handle, abi_long mount_id,
7859                                       abi_long flags)
7860  {
7861      struct file_handle *target_fh;
7862      struct file_handle *fh;
7863      int mid = 0;
7864      abi_long ret;
7865      char *name;
7866      unsigned int size, total_size;
7867  
7868      if (get_user_s32(size, handle)) {
7869          return -TARGET_EFAULT;
7870      }
7871  
7872      name = lock_user_string(pathname);
7873      if (!name) {
7874          return -TARGET_EFAULT;
7875      }
7876  
7877      total_size = sizeof(struct file_handle) + size;
7878      target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7879      if (!target_fh) {
7880          unlock_user(name, pathname, 0);
7881          return -TARGET_EFAULT;
7882      }
7883  
7884      fh = g_malloc0(total_size);
7885      fh->handle_bytes = size;
7886  
7887      ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7888      unlock_user(name, pathname, 0);
7889  
7890      /* man name_to_handle_at(2):
7891       * Other than the use of the handle_bytes field, the caller should treat
7892       * the file_handle structure as an opaque data type
7893       */
7894  
7895      memcpy(target_fh, fh, total_size);
7896      target_fh->handle_bytes = tswap32(fh->handle_bytes);
7897      target_fh->handle_type = tswap32(fh->handle_type);
7898      g_free(fh);
7899      unlock_user(target_fh, handle, total_size);
7900  
7901      if (put_user_s32(mid, mount_id)) {
7902          return -TARGET_EFAULT;
7903      }
7904  
7905      return ret;
7906  
7907  }
7908  #endif
7909  
7910  #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7911  static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7912                                       abi_long flags)
7913  {
7914      struct file_handle *target_fh;
7915      struct file_handle *fh;
7916      unsigned int size, total_size;
7917      abi_long ret;
7918  
7919      if (get_user_s32(size, handle)) {
7920          return -TARGET_EFAULT;
7921      }
7922  
7923      total_size = sizeof(struct file_handle) + size;
7924      target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7925      if (!target_fh) {
7926          return -TARGET_EFAULT;
7927      }
7928  
7929      fh = g_memdup(target_fh, total_size);
7930      fh->handle_bytes = size;
7931      fh->handle_type = tswap32(target_fh->handle_type);
7932  
7933      ret = get_errno(open_by_handle_at(mount_fd, fh,
7934                      target_to_host_bitmask(flags, fcntl_flags_tbl)));
7935  
7936      g_free(fh);
7937  
7938      unlock_user(target_fh, handle, total_size);
7939  
7940      return ret;
7941  }
7942  #endif
7943  
7944  #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7945  
do_signalfd4(int fd,abi_long mask,int flags)7946  static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7947  {
7948      int host_flags;
7949      target_sigset_t *target_mask;
7950      sigset_t host_mask;
7951      abi_long ret;
7952  
7953      if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7954          return -TARGET_EINVAL;
7955      }
7956      if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7957          return -TARGET_EFAULT;
7958      }
7959  
7960      target_to_host_sigset(&host_mask, target_mask);
7961  
7962      host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7963  
7964      ret = get_errno(signalfd(fd, &host_mask, host_flags));
7965      if (ret >= 0) {
7966          fd_trans_register(ret, &target_signalfd_trans);
7967      }
7968  
7969      unlock_user_struct(target_mask, mask, 0);
7970  
7971      return ret;
7972  }
7973  #endif
7974  
7975  /* Map host to target signal numbers for the wait family of syscalls.
7976     Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)7977  int host_to_target_waitstatus(int status)
7978  {
7979      if (WIFSIGNALED(status)) {
7980          return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7981      }
7982      if (WIFSTOPPED(status)) {
7983          return (host_to_target_signal(WSTOPSIG(status)) << 8)
7984                 | (status & 0xff);
7985      }
7986      return status;
7987  }
7988  
open_self_cmdline(CPUArchState * cpu_env,int fd)7989  static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7990  {
7991      CPUState *cpu = env_cpu(cpu_env);
7992      struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7993      int i;
7994  
7995      for (i = 0; i < bprm->argc; i++) {
7996          size_t len = strlen(bprm->argv[i]) + 1;
7997  
7998          if (write(fd, bprm->argv[i], len) != len) {
7999              return -1;
8000          }
8001      }
8002  
8003      return 0;
8004  }
8005  
8006  struct open_self_maps_data {
8007      TaskState *ts;
8008      IntervalTreeRoot *host_maps;
8009      int fd;
8010      bool smaps;
8011  };
8012  
8013  /*
8014   * Subroutine to output one line of /proc/self/maps,
8015   * or one region of /proc/self/smaps.
8016   */
8017  
8018  #ifdef TARGET_HPPA
8019  # define test_stack(S, E, L)  (E == L)
8020  #else
8021  # define test_stack(S, E, L)  (S == L)
8022  #endif
8023  
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8024  static void open_self_maps_4(const struct open_self_maps_data *d,
8025                               const MapInfo *mi, abi_ptr start,
8026                               abi_ptr end, unsigned flags)
8027  {
8028      const struct image_info *info = d->ts->info;
8029      const char *path = mi->path;
8030      uint64_t offset;
8031      int fd = d->fd;
8032      int count;
8033  
8034      if (test_stack(start, end, info->stack_limit)) {
8035          path = "[stack]";
8036      } else if (start == info->brk) {
8037          path = "[heap]";
8038      } else if (start == info->vdso) {
8039          path = "[vdso]";
8040  #ifdef TARGET_X86_64
8041      } else if (start == TARGET_VSYSCALL_PAGE) {
8042          path = "[vsyscall]";
8043  #endif
8044      }
8045  
8046      /* Except null device (MAP_ANON), adjust offset for this fragment. */
8047      offset = mi->offset;
8048      if (mi->dev) {
8049          uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8050          offset += hstart - mi->itree.start;
8051      }
8052  
8053      count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8054                      " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8055                      start, end,
8056                      (flags & PAGE_READ) ? 'r' : '-',
8057                      (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8058                      (flags & PAGE_EXEC) ? 'x' : '-',
8059                      mi->is_priv ? 'p' : 's',
8060                      offset, major(mi->dev), minor(mi->dev),
8061                      (uint64_t)mi->inode);
8062      if (path) {
8063          dprintf(fd, "%*s%s\n", 73 - count, "", path);
8064      } else {
8065          dprintf(fd, "\n");
8066      }
8067  
8068      if (d->smaps) {
8069          unsigned long size = end - start;
8070          unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8071          unsigned long size_kb = size >> 10;
8072  
8073          dprintf(fd, "Size:                  %lu kB\n"
8074                  "KernelPageSize:        %lu kB\n"
8075                  "MMUPageSize:           %lu kB\n"
8076                  "Rss:                   0 kB\n"
8077                  "Pss:                   0 kB\n"
8078                  "Pss_Dirty:             0 kB\n"
8079                  "Shared_Clean:          0 kB\n"
8080                  "Shared_Dirty:          0 kB\n"
8081                  "Private_Clean:         0 kB\n"
8082                  "Private_Dirty:         0 kB\n"
8083                  "Referenced:            0 kB\n"
8084                  "Anonymous:             %lu kB\n"
8085                  "LazyFree:              0 kB\n"
8086                  "AnonHugePages:         0 kB\n"
8087                  "ShmemPmdMapped:        0 kB\n"
8088                  "FilePmdMapped:         0 kB\n"
8089                  "Shared_Hugetlb:        0 kB\n"
8090                  "Private_Hugetlb:       0 kB\n"
8091                  "Swap:                  0 kB\n"
8092                  "SwapPss:               0 kB\n"
8093                  "Locked:                0 kB\n"
8094                  "THPeligible:    0\n"
8095                  "VmFlags:%s%s%s%s%s%s%s%s\n",
8096                  size_kb, page_size_kb, page_size_kb,
8097                  (flags & PAGE_ANON ? size_kb : 0),
8098                  (flags & PAGE_READ) ? " rd" : "",
8099                  (flags & PAGE_WRITE_ORG) ? " wr" : "",
8100                  (flags & PAGE_EXEC) ? " ex" : "",
8101                  mi->is_priv ? "" : " sh",
8102                  (flags & PAGE_READ) ? " mr" : "",
8103                  (flags & PAGE_WRITE_ORG) ? " mw" : "",
8104                  (flags & PAGE_EXEC) ? " me" : "",
8105                  mi->is_priv ? "" : " ms");
8106      }
8107  }
8108  
8109  /*
8110   * Callback for walk_memory_regions, when read_self_maps() fails.
8111   * Proceed without the benefit of host /proc/self/maps cross-check.
8112   */
open_self_maps_3(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8113  static int open_self_maps_3(void *opaque, target_ulong guest_start,
8114                              target_ulong guest_end, unsigned long flags)
8115  {
8116      static const MapInfo mi = { .is_priv = true };
8117  
8118      open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8119      return 0;
8120  }
8121  
8122  /*
8123   * Callback for walk_memory_regions, when read_self_maps() succeeds.
8124   */
open_self_maps_2(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8125  static int open_self_maps_2(void *opaque, target_ulong guest_start,
8126                              target_ulong guest_end, unsigned long flags)
8127  {
8128      const struct open_self_maps_data *d = opaque;
8129      uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8130      uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8131  
8132  #ifdef TARGET_X86_64
8133      /*
8134       * Because of the extremely high position of the page within the guest
8135       * virtual address space, this is not backed by host memory at all.
8136       * Therefore the loop below would fail.  This is the only instance
8137       * of not having host backing memory.
8138       */
8139      if (guest_start == TARGET_VSYSCALL_PAGE) {
8140          return open_self_maps_3(opaque, guest_start, guest_end, flags);
8141      }
8142  #endif
8143  
8144      while (1) {
8145          IntervalTreeNode *n =
8146              interval_tree_iter_first(d->host_maps, host_start, host_start);
8147          MapInfo *mi = container_of(n, MapInfo, itree);
8148          uintptr_t this_hlast = MIN(host_last, n->last);
8149          target_ulong this_gend = h2g(this_hlast) + 1;
8150  
8151          open_self_maps_4(d, mi, guest_start, this_gend, flags);
8152  
8153          if (this_hlast == host_last) {
8154              return 0;
8155          }
8156          host_start = this_hlast + 1;
8157          guest_start = h2g(host_start);
8158      }
8159  }
8160  
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8161  static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8162  {
8163      struct open_self_maps_data d = {
8164          .ts = get_task_state(env_cpu(env)),
8165          .fd = fd,
8166          .smaps = smaps
8167      };
8168  
8169      mmap_lock();
8170      d.host_maps = read_self_maps();
8171      if (d.host_maps) {
8172          walk_memory_regions(&d, open_self_maps_2);
8173          free_self_maps(d.host_maps);
8174      } else {
8175          walk_memory_regions(&d, open_self_maps_3);
8176      }
8177      mmap_unlock();
8178      return 0;
8179  }
8180  
open_self_maps(CPUArchState * cpu_env,int fd)8181  static int open_self_maps(CPUArchState *cpu_env, int fd)
8182  {
8183      return open_self_maps_1(cpu_env, fd, false);
8184  }
8185  
open_self_smaps(CPUArchState * cpu_env,int fd)8186  static int open_self_smaps(CPUArchState *cpu_env, int fd)
8187  {
8188      return open_self_maps_1(cpu_env, fd, true);
8189  }
8190  
open_self_stat(CPUArchState * cpu_env,int fd)8191  static int open_self_stat(CPUArchState *cpu_env, int fd)
8192  {
8193      CPUState *cpu = env_cpu(cpu_env);
8194      TaskState *ts = get_task_state(cpu);
8195      g_autoptr(GString) buf = g_string_new(NULL);
8196      int i;
8197  
8198      for (i = 0; i < 44; i++) {
8199          if (i == 0) {
8200              /* pid */
8201              g_string_printf(buf, FMT_pid " ", getpid());
8202          } else if (i == 1) {
8203              /* app name */
8204              gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8205              bin = bin ? bin + 1 : ts->bprm->argv[0];
8206              g_string_printf(buf, "(%.15s) ", bin);
8207          } else if (i == 2) {
8208              /* task state */
8209              g_string_assign(buf, "R "); /* we are running right now */
8210          } else if (i == 3) {
8211              /* ppid */
8212              g_string_printf(buf, FMT_pid " ", getppid());
8213          } else if (i == 19) {
8214              /* num_threads */
8215              int cpus = 0;
8216              WITH_RCU_READ_LOCK_GUARD() {
8217                  CPUState *cpu_iter;
8218                  CPU_FOREACH(cpu_iter) {
8219                      cpus++;
8220                  }
8221              }
8222              g_string_printf(buf, "%d ", cpus);
8223          } else if (i == 21) {
8224              /* starttime */
8225              g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8226          } else if (i == 27) {
8227              /* stack bottom */
8228              g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8229          } else {
8230              /* for the rest, there is MasterCard */
8231              g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8232          }
8233  
8234          if (write(fd, buf->str, buf->len) != buf->len) {
8235              return -1;
8236          }
8237      }
8238  
8239      return 0;
8240  }
8241  
open_self_auxv(CPUArchState * cpu_env,int fd)8242  static int open_self_auxv(CPUArchState *cpu_env, int fd)
8243  {
8244      CPUState *cpu = env_cpu(cpu_env);
8245      TaskState *ts = get_task_state(cpu);
8246      abi_ulong auxv = ts->info->saved_auxv;
8247      abi_ulong len = ts->info->auxv_len;
8248      char *ptr;
8249  
8250      /*
8251       * Auxiliary vector is stored in target process stack.
8252       * read in whole auxv vector and copy it to file
8253       */
8254      ptr = lock_user(VERIFY_READ, auxv, len, 0);
8255      if (ptr != NULL) {
8256          while (len > 0) {
8257              ssize_t r;
8258              r = write(fd, ptr, len);
8259              if (r <= 0) {
8260                  break;
8261              }
8262              len -= r;
8263              ptr += r;
8264          }
8265          lseek(fd, 0, SEEK_SET);
8266          unlock_user(ptr, auxv, len);
8267      }
8268  
8269      return 0;
8270  }
8271  
is_proc_myself(const char * filename,const char * entry)8272  static int is_proc_myself(const char *filename, const char *entry)
8273  {
8274      if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8275          filename += strlen("/proc/");
8276          if (!strncmp(filename, "self/", strlen("self/"))) {
8277              filename += strlen("self/");
8278          } else if (*filename >= '1' && *filename <= '9') {
8279              char myself[80];
8280              snprintf(myself, sizeof(myself), "%d/", getpid());
8281              if (!strncmp(filename, myself, strlen(myself))) {
8282                  filename += strlen(myself);
8283              } else {
8284                  return 0;
8285              }
8286          } else {
8287              return 0;
8288          }
8289          if (!strcmp(filename, entry)) {
8290              return 1;
8291          }
8292      }
8293      return 0;
8294  }
8295  
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8296  static void excp_dump_file(FILE *logfile, CPUArchState *env,
8297                        const char *fmt, int code)
8298  {
8299      if (logfile) {
8300          CPUState *cs = env_cpu(env);
8301  
8302          fprintf(logfile, fmt, code);
8303          fprintf(logfile, "Failing executable: %s\n", exec_path);
8304          cpu_dump_state(cs, logfile, 0);
8305          open_self_maps(env, fileno(logfile));
8306      }
8307  }
8308  
target_exception_dump(CPUArchState * env,const char * fmt,int code)8309  void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8310  {
8311      /* dump to console */
8312      excp_dump_file(stderr, env, fmt, code);
8313  
8314      /* dump to log file */
8315      if (qemu_log_separate()) {
8316          FILE *logfile = qemu_log_trylock();
8317  
8318          excp_dump_file(logfile, env, fmt, code);
8319          qemu_log_unlock(logfile);
8320      }
8321  }
8322  
8323  #include "target_proc.h"
8324  
8325  #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8326      defined(HAVE_ARCH_PROC_CPUINFO) || \
8327      defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8328  static int is_proc(const char *filename, const char *entry)
8329  {
8330      return strcmp(filename, entry) == 0;
8331  }
8332  #endif
8333  
8334  #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8335  static int open_net_route(CPUArchState *cpu_env, int fd)
8336  {
8337      FILE *fp;
8338      char *line = NULL;
8339      size_t len = 0;
8340      ssize_t read;
8341  
8342      fp = fopen("/proc/net/route", "r");
8343      if (fp == NULL) {
8344          return -1;
8345      }
8346  
8347      /* read header */
8348  
8349      read = getline(&line, &len, fp);
8350      dprintf(fd, "%s", line);
8351  
8352      /* read routes */
8353  
8354      while ((read = getline(&line, &len, fp)) != -1) {
8355          char iface[16];
8356          uint32_t dest, gw, mask;
8357          unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8358          int fields;
8359  
8360          fields = sscanf(line,
8361                          "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8362                          iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8363                          &mask, &mtu, &window, &irtt);
8364          if (fields != 11) {
8365              continue;
8366          }
8367          dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8368                  iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8369                  metric, tswap32(mask), mtu, window, irtt);
8370      }
8371  
8372      free(line);
8373      fclose(fp);
8374  
8375      return 0;
8376  }
8377  #endif
8378  
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8379  static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8380                                const char *fname, int flags, mode_t mode,
8381                                int openat2_resolve, bool safe)
8382  {
8383      g_autofree char *proc_name = NULL;
8384      const char *pathname;
8385      struct fake_open {
8386          const char *filename;
8387          int (*fill)(CPUArchState *cpu_env, int fd);
8388          int (*cmp)(const char *s1, const char *s2);
8389      };
8390      const struct fake_open *fake_open;
8391      static const struct fake_open fakes[] = {
8392          { "maps", open_self_maps, is_proc_myself },
8393          { "smaps", open_self_smaps, is_proc_myself },
8394          { "stat", open_self_stat, is_proc_myself },
8395          { "auxv", open_self_auxv, is_proc_myself },
8396          { "cmdline", open_self_cmdline, is_proc_myself },
8397  #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8398          { "/proc/net/route", open_net_route, is_proc },
8399  #endif
8400  #if defined(HAVE_ARCH_PROC_CPUINFO)
8401          { "/proc/cpuinfo", open_cpuinfo, is_proc },
8402  #endif
8403  #if defined(HAVE_ARCH_PROC_HARDWARE)
8404          { "/proc/hardware", open_hardware, is_proc },
8405  #endif
8406          { NULL, NULL, NULL }
8407      };
8408  
8409      /* if this is a file from /proc/ filesystem, expand full name */
8410      proc_name = realpath(fname, NULL);
8411      if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8412          pathname = proc_name;
8413      } else {
8414          pathname = fname;
8415      }
8416  
8417      if (is_proc_myself(pathname, "exe")) {
8418          /* Honor openat2 resolve flags */
8419          if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8420              (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8421              errno = ELOOP;
8422              return -1;
8423          }
8424          if (safe) {
8425              return safe_openat(dirfd, exec_path, flags, mode);
8426          } else {
8427              return openat(dirfd, exec_path, flags, mode);
8428          }
8429      }
8430  
8431      for (fake_open = fakes; fake_open->filename; fake_open++) {
8432          if (fake_open->cmp(pathname, fake_open->filename)) {
8433              break;
8434          }
8435      }
8436  
8437      if (fake_open->filename) {
8438          const char *tmpdir;
8439          char filename[PATH_MAX];
8440          int fd, r;
8441  
8442          fd = memfd_create("qemu-open", 0);
8443          if (fd < 0) {
8444              if (errno != ENOSYS) {
8445                  return fd;
8446              }
8447              /* create temporary file to map stat to */
8448              tmpdir = getenv("TMPDIR");
8449              if (!tmpdir)
8450                  tmpdir = "/tmp";
8451              snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8452              fd = mkstemp(filename);
8453              if (fd < 0) {
8454                  return fd;
8455              }
8456              unlink(filename);
8457          }
8458  
8459          if ((r = fake_open->fill(cpu_env, fd))) {
8460              int e = errno;
8461              close(fd);
8462              errno = e;
8463              return r;
8464          }
8465          lseek(fd, 0, SEEK_SET);
8466  
8467          return fd;
8468      }
8469  
8470      return -2;
8471  }
8472  
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8473  int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8474                      int flags, mode_t mode, bool safe)
8475  {
8476      int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8477      if (fd > -2) {
8478          return fd;
8479      }
8480  
8481      if (safe) {
8482          return safe_openat(dirfd, path(pathname), flags, mode);
8483      } else {
8484          return openat(dirfd, path(pathname), flags, mode);
8485      }
8486  }
8487  
8488  
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8489  static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8490                        abi_ptr guest_pathname, abi_ptr guest_open_how,
8491                        abi_ulong guest_size)
8492  {
8493      struct open_how_ver0 how = {0};
8494      char *pathname;
8495      int ret;
8496  
8497      if (guest_size < sizeof(struct target_open_how_ver0)) {
8498          return -TARGET_EINVAL;
8499      }
8500      ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8501      if (ret) {
8502          if (ret == -TARGET_E2BIG) {
8503              qemu_log_mask(LOG_UNIMP,
8504                            "Unimplemented openat2 open_how size: "
8505                            TARGET_ABI_FMT_lu "\n", guest_size);
8506          }
8507          return ret;
8508      }
8509      pathname = lock_user_string(guest_pathname);
8510      if (!pathname) {
8511          return -TARGET_EFAULT;
8512      }
8513  
8514      how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8515      how.mode = tswap64(how.mode);
8516      how.resolve = tswap64(how.resolve);
8517      int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8518                                  how.resolve, true);
8519      if (fd > -2) {
8520          ret = get_errno(fd);
8521      } else {
8522          ret = get_errno(safe_openat2(dirfd, pathname, &how,
8523                                       sizeof(struct open_how_ver0)));
8524      }
8525  
8526      fd_trans_unregister(ret);
8527      unlock_user(pathname, guest_pathname, 0);
8528      return ret;
8529  }
8530  
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8531  ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8532  {
8533      ssize_t ret;
8534  
8535      if (!pathname || !buf) {
8536          errno = EFAULT;
8537          return -1;
8538      }
8539  
8540      if (!bufsiz) {
8541          /* Short circuit this for the magic exe check. */
8542          errno = EINVAL;
8543          return -1;
8544      }
8545  
8546      if (is_proc_myself((const char *)pathname, "exe")) {
8547          /*
8548           * Don't worry about sign mismatch as earlier mapping
8549           * logic would have thrown a bad address error.
8550           */
8551          ret = MIN(strlen(exec_path), bufsiz);
8552          /* We cannot NUL terminate the string. */
8553          memcpy(buf, exec_path, ret);
8554      } else {
8555          ret = readlink(path(pathname), buf, bufsiz);
8556      }
8557  
8558      return ret;
8559  }
8560  
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8561  static int do_execv(CPUArchState *cpu_env, int dirfd,
8562                      abi_long pathname, abi_long guest_argp,
8563                      abi_long guest_envp, int flags, bool is_execveat)
8564  {
8565      int ret;
8566      char **argp, **envp;
8567      int argc, envc;
8568      abi_ulong gp;
8569      abi_ulong addr;
8570      char **q;
8571      void *p;
8572  
8573      argc = 0;
8574  
8575      for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8576          if (get_user_ual(addr, gp)) {
8577              return -TARGET_EFAULT;
8578          }
8579          if (!addr) {
8580              break;
8581          }
8582          argc++;
8583      }
8584      envc = 0;
8585      for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8586          if (get_user_ual(addr, gp)) {
8587              return -TARGET_EFAULT;
8588          }
8589          if (!addr) {
8590              break;
8591          }
8592          envc++;
8593      }
8594  
8595      argp = g_new0(char *, argc + 1);
8596      envp = g_new0(char *, envc + 1);
8597  
8598      for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8599          if (get_user_ual(addr, gp)) {
8600              goto execve_efault;
8601          }
8602          if (!addr) {
8603              break;
8604          }
8605          *q = lock_user_string(addr);
8606          if (!*q) {
8607              goto execve_efault;
8608          }
8609      }
8610      *q = NULL;
8611  
8612      for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8613          if (get_user_ual(addr, gp)) {
8614              goto execve_efault;
8615          }
8616          if (!addr) {
8617              break;
8618          }
8619          *q = lock_user_string(addr);
8620          if (!*q) {
8621              goto execve_efault;
8622          }
8623      }
8624      *q = NULL;
8625  
8626      /*
8627       * Although execve() is not an interruptible syscall it is
8628       * a special case where we must use the safe_syscall wrapper:
8629       * if we allow a signal to happen before we make the host
8630       * syscall then we will 'lose' it, because at the point of
8631       * execve the process leaves QEMU's control. So we use the
8632       * safe syscall wrapper to ensure that we either take the
8633       * signal as a guest signal, or else it does not happen
8634       * before the execve completes and makes it the other
8635       * program's problem.
8636       */
8637      p = lock_user_string(pathname);
8638      if (!p) {
8639          goto execve_efault;
8640      }
8641  
8642      const char *exe = p;
8643      if (is_proc_myself(p, "exe")) {
8644          exe = exec_path;
8645      }
8646      ret = is_execveat
8647          ? safe_execveat(dirfd, exe, argp, envp, flags)
8648          : safe_execve(exe, argp, envp);
8649      ret = get_errno(ret);
8650  
8651      unlock_user(p, pathname, 0);
8652  
8653      goto execve_end;
8654  
8655  execve_efault:
8656      ret = -TARGET_EFAULT;
8657  
8658  execve_end:
8659      for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8660          if (get_user_ual(addr, gp) || !addr) {
8661              break;
8662          }
8663          unlock_user(*q, addr, 0);
8664      }
8665      for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8666          if (get_user_ual(addr, gp) || !addr) {
8667              break;
8668          }
8669          unlock_user(*q, addr, 0);
8670      }
8671  
8672      g_free(argp);
8673      g_free(envp);
8674      return ret;
8675  }
8676  
8677  #define TIMER_MAGIC 0x0caf0000
8678  #define TIMER_MAGIC_MASK 0xffff0000
8679  
8680  /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8681  static target_timer_t get_timer_id(abi_long arg)
8682  {
8683      target_timer_t timerid = arg;
8684  
8685      if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8686          return -TARGET_EINVAL;
8687      }
8688  
8689      timerid &= 0xffff;
8690  
8691      if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8692          return -TARGET_EINVAL;
8693      }
8694  
8695      return timerid;
8696  }
8697  
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8698  static int target_to_host_cpu_mask(unsigned long *host_mask,
8699                                     size_t host_size,
8700                                     abi_ulong target_addr,
8701                                     size_t target_size)
8702  {
8703      unsigned target_bits = sizeof(abi_ulong) * 8;
8704      unsigned host_bits = sizeof(*host_mask) * 8;
8705      abi_ulong *target_mask;
8706      unsigned i, j;
8707  
8708      assert(host_size >= target_size);
8709  
8710      target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8711      if (!target_mask) {
8712          return -TARGET_EFAULT;
8713      }
8714      memset(host_mask, 0, host_size);
8715  
8716      for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8717          unsigned bit = i * target_bits;
8718          abi_ulong val;
8719  
8720          __get_user(val, &target_mask[i]);
8721          for (j = 0; j < target_bits; j++, bit++) {
8722              if (val & (1UL << j)) {
8723                  host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8724              }
8725          }
8726      }
8727  
8728      unlock_user(target_mask, target_addr, 0);
8729      return 0;
8730  }
8731  
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8732  static int host_to_target_cpu_mask(const unsigned long *host_mask,
8733                                     size_t host_size,
8734                                     abi_ulong target_addr,
8735                                     size_t target_size)
8736  {
8737      unsigned target_bits = sizeof(abi_ulong) * 8;
8738      unsigned host_bits = sizeof(*host_mask) * 8;
8739      abi_ulong *target_mask;
8740      unsigned i, j;
8741  
8742      assert(host_size >= target_size);
8743  
8744      target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8745      if (!target_mask) {
8746          return -TARGET_EFAULT;
8747      }
8748  
8749      for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8750          unsigned bit = i * target_bits;
8751          abi_ulong val = 0;
8752  
8753          for (j = 0; j < target_bits; j++, bit++) {
8754              if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8755                  val |= 1UL << j;
8756              }
8757          }
8758          __put_user(val, &target_mask[i]);
8759      }
8760  
8761      unlock_user(target_mask, target_addr, target_size);
8762      return 0;
8763  }
8764  
8765  #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8766  static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8767  {
8768      g_autofree void *hdirp = NULL;
8769      void *tdirp;
8770      int hlen, hoff, toff;
8771      int hreclen, treclen;
8772      off_t prev_diroff = 0;
8773  
8774      hdirp = g_try_malloc(count);
8775      if (!hdirp) {
8776          return -TARGET_ENOMEM;
8777      }
8778  
8779  #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8780      hlen = sys_getdents(dirfd, hdirp, count);
8781  #else
8782      hlen = sys_getdents64(dirfd, hdirp, count);
8783  #endif
8784  
8785      hlen = get_errno(hlen);
8786      if (is_error(hlen)) {
8787          return hlen;
8788      }
8789  
8790      tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8791      if (!tdirp) {
8792          return -TARGET_EFAULT;
8793      }
8794  
8795      for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8796  #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8797          struct linux_dirent *hde = hdirp + hoff;
8798  #else
8799          struct linux_dirent64 *hde = hdirp + hoff;
8800  #endif
8801          struct target_dirent *tde = tdirp + toff;
8802          int namelen;
8803          uint8_t type;
8804  
8805          namelen = strlen(hde->d_name);
8806          hreclen = hde->d_reclen;
8807          treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8808          treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8809  
8810          if (toff + treclen > count) {
8811              /*
8812               * If the host struct is smaller than the target struct, or
8813               * requires less alignment and thus packs into less space,
8814               * then the host can return more entries than we can pass
8815               * on to the guest.
8816               */
8817              if (toff == 0) {
8818                  toff = -TARGET_EINVAL; /* result buffer is too small */
8819                  break;
8820              }
8821              /*
8822               * Return what we have, resetting the file pointer to the
8823               * location of the first record not returned.
8824               */
8825              lseek(dirfd, prev_diroff, SEEK_SET);
8826              break;
8827          }
8828  
8829          prev_diroff = hde->d_off;
8830          tde->d_ino = tswapal(hde->d_ino);
8831          tde->d_off = tswapal(hde->d_off);
8832          tde->d_reclen = tswap16(treclen);
8833          memcpy(tde->d_name, hde->d_name, namelen + 1);
8834  
8835          /*
8836           * The getdents type is in what was formerly a padding byte at the
8837           * end of the structure.
8838           */
8839  #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8840          type = *((uint8_t *)hde + hreclen - 1);
8841  #else
8842          type = hde->d_type;
8843  #endif
8844          *((uint8_t *)tde + treclen - 1) = type;
8845      }
8846  
8847      unlock_user(tdirp, arg2, toff);
8848      return toff;
8849  }
8850  #endif /* TARGET_NR_getdents */
8851  
8852  #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8853  static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8854  {
8855      g_autofree void *hdirp = NULL;
8856      void *tdirp;
8857      int hlen, hoff, toff;
8858      int hreclen, treclen;
8859      off_t prev_diroff = 0;
8860  
8861      hdirp = g_try_malloc(count);
8862      if (!hdirp) {
8863          return -TARGET_ENOMEM;
8864      }
8865  
8866      hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8867      if (is_error(hlen)) {
8868          return hlen;
8869      }
8870  
8871      tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8872      if (!tdirp) {
8873          return -TARGET_EFAULT;
8874      }
8875  
8876      for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8877          struct linux_dirent64 *hde = hdirp + hoff;
8878          struct target_dirent64 *tde = tdirp + toff;
8879          int namelen;
8880  
8881          namelen = strlen(hde->d_name) + 1;
8882          hreclen = hde->d_reclen;
8883          treclen = offsetof(struct target_dirent64, d_name) + namelen;
8884          treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8885  
8886          if (toff + treclen > count) {
8887              /*
8888               * If the host struct is smaller than the target struct, or
8889               * requires less alignment and thus packs into less space,
8890               * then the host can return more entries than we can pass
8891               * on to the guest.
8892               */
8893              if (toff == 0) {
8894                  toff = -TARGET_EINVAL; /* result buffer is too small */
8895                  break;
8896              }
8897              /*
8898               * Return what we have, resetting the file pointer to the
8899               * location of the first record not returned.
8900               */
8901              lseek(dirfd, prev_diroff, SEEK_SET);
8902              break;
8903          }
8904  
8905          prev_diroff = hde->d_off;
8906          tde->d_ino = tswap64(hde->d_ino);
8907          tde->d_off = tswap64(hde->d_off);
8908          tde->d_reclen = tswap16(treclen);
8909          tde->d_type = hde->d_type;
8910          memcpy(tde->d_name, hde->d_name, namelen);
8911      }
8912  
8913      unlock_user(tdirp, arg2, toff);
8914      return toff;
8915  }
8916  #endif /* TARGET_NR_getdents64 */
8917  
8918  #if defined(TARGET_NR_riscv_hwprobe)
8919  
8920  #define RISCV_HWPROBE_KEY_MVENDORID     0
8921  #define RISCV_HWPROBE_KEY_MARCHID       1
8922  #define RISCV_HWPROBE_KEY_MIMPID        2
8923  
8924  #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8925  #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8926  
8927  #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8928  #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8929  #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8930  #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8931  #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8932  #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8933  #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8934  #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8935  #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8936  #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8937  #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8938  #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8939  #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8940  #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8941  #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8942  #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8943  #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8944  #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8945  #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8946  #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8947  #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8948  #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8949  #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8950  #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8951  #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8952  #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8953  #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8954  #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8955  #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8956  #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8957  #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8958  #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8959  #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8960  #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8961  #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8962  #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8963  #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8964  
8965  #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8966  #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8967  #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8968  #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8969  #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8970  #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8971  #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8972  
8973  #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8974  
8975  struct riscv_hwprobe {
8976      abi_llong  key;
8977      abi_ullong value;
8978  };
8979  
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)8980  static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8981                                      struct riscv_hwprobe *pair,
8982                                      size_t pair_count)
8983  {
8984      const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8985  
8986      for (; pair_count > 0; pair_count--, pair++) {
8987          abi_llong key;
8988          abi_ullong value;
8989          __put_user(0, &pair->value);
8990          __get_user(key, &pair->key);
8991          switch (key) {
8992          case RISCV_HWPROBE_KEY_MVENDORID:
8993              __put_user(cfg->mvendorid, &pair->value);
8994              break;
8995          case RISCV_HWPROBE_KEY_MARCHID:
8996              __put_user(cfg->marchid, &pair->value);
8997              break;
8998          case RISCV_HWPROBE_KEY_MIMPID:
8999              __put_user(cfg->mimpid, &pair->value);
9000              break;
9001          case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9002              value = riscv_has_ext(env, RVI) &&
9003                      riscv_has_ext(env, RVM) &&
9004                      riscv_has_ext(env, RVA) ?
9005                      RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9006              __put_user(value, &pair->value);
9007              break;
9008          case RISCV_HWPROBE_KEY_IMA_EXT_0:
9009              value = riscv_has_ext(env, RVF) &&
9010                      riscv_has_ext(env, RVD) ?
9011                      RISCV_HWPROBE_IMA_FD : 0;
9012              value |= riscv_has_ext(env, RVC) ?
9013                       RISCV_HWPROBE_IMA_C : 0;
9014              value |= riscv_has_ext(env, RVV) ?
9015                       RISCV_HWPROBE_IMA_V : 0;
9016              value |= cfg->ext_zba ?
9017                       RISCV_HWPROBE_EXT_ZBA : 0;
9018              value |= cfg->ext_zbb ?
9019                       RISCV_HWPROBE_EXT_ZBB : 0;
9020              value |= cfg->ext_zbs ?
9021                       RISCV_HWPROBE_EXT_ZBS : 0;
9022              value |= cfg->ext_zicboz ?
9023                       RISCV_HWPROBE_EXT_ZICBOZ : 0;
9024              value |= cfg->ext_zbc ?
9025                       RISCV_HWPROBE_EXT_ZBC : 0;
9026              value |= cfg->ext_zbkb ?
9027                       RISCV_HWPROBE_EXT_ZBKB : 0;
9028              value |= cfg->ext_zbkc ?
9029                       RISCV_HWPROBE_EXT_ZBKC : 0;
9030              value |= cfg->ext_zbkx ?
9031                       RISCV_HWPROBE_EXT_ZBKX : 0;
9032              value |= cfg->ext_zknd ?
9033                       RISCV_HWPROBE_EXT_ZKND : 0;
9034              value |= cfg->ext_zkne ?
9035                       RISCV_HWPROBE_EXT_ZKNE : 0;
9036              value |= cfg->ext_zknh ?
9037                       RISCV_HWPROBE_EXT_ZKNH : 0;
9038              value |= cfg->ext_zksed ?
9039                       RISCV_HWPROBE_EXT_ZKSED : 0;
9040              value |= cfg->ext_zksh ?
9041                       RISCV_HWPROBE_EXT_ZKSH : 0;
9042              value |= cfg->ext_zkt ?
9043                       RISCV_HWPROBE_EXT_ZKT : 0;
9044              value |= cfg->ext_zvbb ?
9045                       RISCV_HWPROBE_EXT_ZVBB : 0;
9046              value |= cfg->ext_zvbc ?
9047                       RISCV_HWPROBE_EXT_ZVBC : 0;
9048              value |= cfg->ext_zvkb ?
9049                       RISCV_HWPROBE_EXT_ZVKB : 0;
9050              value |= cfg->ext_zvkg ?
9051                       RISCV_HWPROBE_EXT_ZVKG : 0;
9052              value |= cfg->ext_zvkned ?
9053                       RISCV_HWPROBE_EXT_ZVKNED : 0;
9054              value |= cfg->ext_zvknha ?
9055                       RISCV_HWPROBE_EXT_ZVKNHA : 0;
9056              value |= cfg->ext_zvknhb ?
9057                       RISCV_HWPROBE_EXT_ZVKNHB : 0;
9058              value |= cfg->ext_zvksed ?
9059                       RISCV_HWPROBE_EXT_ZVKSED : 0;
9060              value |= cfg->ext_zvksh ?
9061                       RISCV_HWPROBE_EXT_ZVKSH : 0;
9062              value |= cfg->ext_zvkt ?
9063                       RISCV_HWPROBE_EXT_ZVKT : 0;
9064              value |= cfg->ext_zfh ?
9065                       RISCV_HWPROBE_EXT_ZFH : 0;
9066              value |= cfg->ext_zfhmin ?
9067                       RISCV_HWPROBE_EXT_ZFHMIN : 0;
9068              value |= cfg->ext_zihintntl ?
9069                       RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9070              value |= cfg->ext_zvfh ?
9071                       RISCV_HWPROBE_EXT_ZVFH : 0;
9072              value |= cfg->ext_zvfhmin ?
9073                       RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9074              value |= cfg->ext_zfa ?
9075                       RISCV_HWPROBE_EXT_ZFA : 0;
9076              value |= cfg->ext_ztso ?
9077                       RISCV_HWPROBE_EXT_ZTSO : 0;
9078              value |= cfg->ext_zacas ?
9079                       RISCV_HWPROBE_EXT_ZACAS : 0;
9080              value |= cfg->ext_zicond ?
9081                       RISCV_HWPROBE_EXT_ZICOND : 0;
9082              __put_user(value, &pair->value);
9083              break;
9084          case RISCV_HWPROBE_KEY_CPUPERF_0:
9085              __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9086              break;
9087          case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9088              value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9089              __put_user(value, &pair->value);
9090              break;
9091          default:
9092              __put_user(-1, &pair->key);
9093              break;
9094          }
9095      }
9096  }
9097  
cpu_set_valid(abi_long arg3,abi_long arg4)9098  static int cpu_set_valid(abi_long arg3, abi_long arg4)
9099  {
9100      int ret, i, tmp;
9101      size_t host_mask_size, target_mask_size;
9102      unsigned long *host_mask;
9103  
9104      /*
9105       * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9106       * arg3 contains the cpu count.
9107       */
9108      tmp = (8 * sizeof(abi_ulong));
9109      target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9110      host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9111                       ~(sizeof(*host_mask) - 1);
9112  
9113      host_mask = alloca(host_mask_size);
9114  
9115      ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9116                                    arg4, target_mask_size);
9117      if (ret != 0) {
9118          return ret;
9119      }
9120  
9121      for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9122          if (host_mask[i] != 0) {
9123              return 0;
9124          }
9125      }
9126      return -TARGET_EINVAL;
9127  }
9128  
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9129  static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9130                                   abi_long arg2, abi_long arg3,
9131                                   abi_long arg4, abi_long arg5)
9132  {
9133      int ret;
9134      struct riscv_hwprobe *host_pairs;
9135  
9136      /* flags must be 0 */
9137      if (arg5 != 0) {
9138          return -TARGET_EINVAL;
9139      }
9140  
9141      /* check cpu_set */
9142      if (arg3 != 0) {
9143          ret = cpu_set_valid(arg3, arg4);
9144          if (ret != 0) {
9145              return ret;
9146          }
9147      } else if (arg4 != 0) {
9148          return -TARGET_EINVAL;
9149      }
9150  
9151      /* no pairs */
9152      if (arg2 == 0) {
9153          return 0;
9154      }
9155  
9156      host_pairs = lock_user(VERIFY_WRITE, arg1,
9157                             sizeof(*host_pairs) * (size_t)arg2, 0);
9158      if (host_pairs == NULL) {
9159          return -TARGET_EFAULT;
9160      }
9161      risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9162      unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9163      return 0;
9164  }
9165  #endif /* TARGET_NR_riscv_hwprobe */
9166  
9167  #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9168  _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9169  #endif
9170  
9171  #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9172  #define __NR_sys_open_tree __NR_open_tree
9173  _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9174            unsigned int, __flags)
9175  #endif
9176  
9177  #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9178  #define __NR_sys_move_mount __NR_move_mount
9179  _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9180             int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9181  #endif
9182  
9183  /* This is an internal helper for do_syscall so that it is easier
9184   * to have a single return point, so that actions, such as logging
9185   * of syscall results, can be performed.
9186   * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9187   */
9188  static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9189                              abi_long arg2, abi_long arg3, abi_long arg4,
9190                              abi_long arg5, abi_long arg6, abi_long arg7,
9191                              abi_long arg8)
9192  {
9193      CPUState *cpu = env_cpu(cpu_env);
9194      abi_long ret;
9195  #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9196      || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9197      || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9198      || defined(TARGET_NR_statx)
9199      struct stat st;
9200  #endif
9201  #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9202      || defined(TARGET_NR_fstatfs)
9203      struct statfs stfs;
9204  #endif
9205      void *p;
9206  
9207      switch(num) {
9208      case TARGET_NR_exit:
9209          /* In old applications this may be used to implement _exit(2).
9210             However in threaded applications it is used for thread termination,
9211             and _exit_group is used for application termination.
9212             Do thread termination if we have more then one thread.  */
9213  
9214          if (block_signals()) {
9215              return -QEMU_ERESTARTSYS;
9216          }
9217  
9218          pthread_mutex_lock(&clone_lock);
9219  
9220          if (CPU_NEXT(first_cpu)) {
9221              TaskState *ts = get_task_state(cpu);
9222  
9223              if (ts->child_tidptr) {
9224                  put_user_u32(0, ts->child_tidptr);
9225                  do_sys_futex(g2h(cpu, ts->child_tidptr),
9226                               FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9227              }
9228  
9229              object_unparent(OBJECT(cpu));
9230              object_unref(OBJECT(cpu));
9231              /*
9232               * At this point the CPU should be unrealized and removed
9233               * from cpu lists. We can clean-up the rest of the thread
9234               * data without the lock held.
9235               */
9236  
9237              pthread_mutex_unlock(&clone_lock);
9238  
9239              thread_cpu = NULL;
9240              g_free(ts);
9241              rcu_unregister_thread();
9242              pthread_exit(NULL);
9243          }
9244  
9245          pthread_mutex_unlock(&clone_lock);
9246          preexit_cleanup(cpu_env, arg1);
9247          _exit(arg1);
9248          return 0; /* avoid warning */
9249      case TARGET_NR_read:
9250          if (arg2 == 0 && arg3 == 0) {
9251              return get_errno(safe_read(arg1, 0, 0));
9252          } else {
9253              if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9254                  return -TARGET_EFAULT;
9255              ret = get_errno(safe_read(arg1, p, arg3));
9256              if (ret >= 0 &&
9257                  fd_trans_host_to_target_data(arg1)) {
9258                  ret = fd_trans_host_to_target_data(arg1)(p, ret);
9259              }
9260              unlock_user(p, arg2, ret);
9261          }
9262          return ret;
9263      case TARGET_NR_write:
9264          if (arg2 == 0 && arg3 == 0) {
9265              return get_errno(safe_write(arg1, 0, 0));
9266          }
9267          if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9268              return -TARGET_EFAULT;
9269          if (fd_trans_target_to_host_data(arg1)) {
9270              void *copy = g_malloc(arg3);
9271              memcpy(copy, p, arg3);
9272              ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9273              if (ret >= 0) {
9274                  ret = get_errno(safe_write(arg1, copy, ret));
9275              }
9276              g_free(copy);
9277          } else {
9278              ret = get_errno(safe_write(arg1, p, arg3));
9279          }
9280          unlock_user(p, arg2, 0);
9281          return ret;
9282  
9283  #ifdef TARGET_NR_open
9284      case TARGET_NR_open:
9285          if (!(p = lock_user_string(arg1)))
9286              return -TARGET_EFAULT;
9287          ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9288                                    target_to_host_bitmask(arg2, fcntl_flags_tbl),
9289                                    arg3, true));
9290          fd_trans_unregister(ret);
9291          unlock_user(p, arg1, 0);
9292          return ret;
9293  #endif
9294      case TARGET_NR_openat:
9295          if (!(p = lock_user_string(arg2)))
9296              return -TARGET_EFAULT;
9297          ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9298                                    target_to_host_bitmask(arg3, fcntl_flags_tbl),
9299                                    arg4, true));
9300          fd_trans_unregister(ret);
9301          unlock_user(p, arg2, 0);
9302          return ret;
9303      case TARGET_NR_openat2:
9304          ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9305          return ret;
9306  #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9307      case TARGET_NR_name_to_handle_at:
9308          ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9309          return ret;
9310  #endif
9311  #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9312      case TARGET_NR_open_by_handle_at:
9313          ret = do_open_by_handle_at(arg1, arg2, arg3);
9314          fd_trans_unregister(ret);
9315          return ret;
9316  #endif
9317  #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9318      case TARGET_NR_pidfd_open:
9319          return get_errno(pidfd_open(arg1, arg2));
9320  #endif
9321  #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9322      case TARGET_NR_pidfd_send_signal:
9323          {
9324              siginfo_t uinfo, *puinfo;
9325  
9326              if (arg3) {
9327                  p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9328                  if (!p) {
9329                      return -TARGET_EFAULT;
9330                   }
9331                   target_to_host_siginfo(&uinfo, p);
9332                   unlock_user(p, arg3, 0);
9333                   puinfo = &uinfo;
9334              } else {
9335                   puinfo = NULL;
9336              }
9337              ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9338                                                puinfo, arg4));
9339          }
9340          return ret;
9341  #endif
9342  #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9343      case TARGET_NR_pidfd_getfd:
9344          return get_errno(pidfd_getfd(arg1, arg2, arg3));
9345  #endif
9346      case TARGET_NR_close:
9347          fd_trans_unregister(arg1);
9348          return get_errno(close(arg1));
9349  #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9350      case TARGET_NR_close_range:
9351          ret = get_errno(sys_close_range(arg1, arg2, arg3));
9352          if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9353              abi_long fd, maxfd;
9354              maxfd = MIN(arg2, target_fd_max);
9355              for (fd = arg1; fd < maxfd; fd++) {
9356                  fd_trans_unregister(fd);
9357              }
9358          }
9359          return ret;
9360  #endif
9361  
9362      case TARGET_NR_brk:
9363          return do_brk(arg1);
9364  #ifdef TARGET_NR_fork
9365      case TARGET_NR_fork:
9366          return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9367  #endif
9368  #ifdef TARGET_NR_waitpid
9369      case TARGET_NR_waitpid:
9370          {
9371              int status;
9372              ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9373              if (!is_error(ret) && arg2 && ret
9374                  && put_user_s32(host_to_target_waitstatus(status), arg2))
9375                  return -TARGET_EFAULT;
9376          }
9377          return ret;
9378  #endif
9379  #ifdef TARGET_NR_waitid
9380      case TARGET_NR_waitid:
9381          {
9382              struct rusage ru;
9383              siginfo_t info;
9384  
9385              ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9386                                          arg4, (arg5 ? &ru : NULL)));
9387              if (!is_error(ret)) {
9388                  if (arg3) {
9389                      p = lock_user(VERIFY_WRITE, arg3,
9390                                    sizeof(target_siginfo_t), 0);
9391                      if (!p) {
9392                          return -TARGET_EFAULT;
9393                      }
9394                      host_to_target_siginfo(p, &info);
9395                      unlock_user(p, arg3, sizeof(target_siginfo_t));
9396                  }
9397                  if (arg5 && host_to_target_rusage(arg5, &ru)) {
9398                      return -TARGET_EFAULT;
9399                  }
9400              }
9401          }
9402          return ret;
9403  #endif
9404  #ifdef TARGET_NR_creat /* not on alpha */
9405      case TARGET_NR_creat:
9406          if (!(p = lock_user_string(arg1)))
9407              return -TARGET_EFAULT;
9408          ret = get_errno(creat(p, arg2));
9409          fd_trans_unregister(ret);
9410          unlock_user(p, arg1, 0);
9411          return ret;
9412  #endif
9413  #ifdef TARGET_NR_link
9414      case TARGET_NR_link:
9415          {
9416              void * p2;
9417              p = lock_user_string(arg1);
9418              p2 = lock_user_string(arg2);
9419              if (!p || !p2)
9420                  ret = -TARGET_EFAULT;
9421              else
9422                  ret = get_errno(link(p, p2));
9423              unlock_user(p2, arg2, 0);
9424              unlock_user(p, arg1, 0);
9425          }
9426          return ret;
9427  #endif
9428  #if defined(TARGET_NR_linkat)
9429      case TARGET_NR_linkat:
9430          {
9431              void * p2 = NULL;
9432              if (!arg2 || !arg4)
9433                  return -TARGET_EFAULT;
9434              p  = lock_user_string(arg2);
9435              p2 = lock_user_string(arg4);
9436              if (!p || !p2)
9437                  ret = -TARGET_EFAULT;
9438              else
9439                  ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9440              unlock_user(p, arg2, 0);
9441              unlock_user(p2, arg4, 0);
9442          }
9443          return ret;
9444  #endif
9445  #ifdef TARGET_NR_unlink
9446      case TARGET_NR_unlink:
9447          if (!(p = lock_user_string(arg1)))
9448              return -TARGET_EFAULT;
9449          ret = get_errno(unlink(p));
9450          unlock_user(p, arg1, 0);
9451          return ret;
9452  #endif
9453  #if defined(TARGET_NR_unlinkat)
9454      case TARGET_NR_unlinkat:
9455          if (!(p = lock_user_string(arg2)))
9456              return -TARGET_EFAULT;
9457          ret = get_errno(unlinkat(arg1, p, arg3));
9458          unlock_user(p, arg2, 0);
9459          return ret;
9460  #endif
9461      case TARGET_NR_execveat:
9462          return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9463      case TARGET_NR_execve:
9464          return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9465      case TARGET_NR_chdir:
9466          if (!(p = lock_user_string(arg1)))
9467              return -TARGET_EFAULT;
9468          ret = get_errno(chdir(p));
9469          unlock_user(p, arg1, 0);
9470          return ret;
9471  #ifdef TARGET_NR_time
9472      case TARGET_NR_time:
9473          {
9474              time_t host_time;
9475              ret = get_errno(time(&host_time));
9476              if (!is_error(ret)
9477                  && arg1
9478                  && put_user_sal(host_time, arg1))
9479                  return -TARGET_EFAULT;
9480          }
9481          return ret;
9482  #endif
9483  #ifdef TARGET_NR_mknod
9484      case TARGET_NR_mknod:
9485          if (!(p = lock_user_string(arg1)))
9486              return -TARGET_EFAULT;
9487          ret = get_errno(mknod(p, arg2, arg3));
9488          unlock_user(p, arg1, 0);
9489          return ret;
9490  #endif
9491  #if defined(TARGET_NR_mknodat)
9492      case TARGET_NR_mknodat:
9493          if (!(p = lock_user_string(arg2)))
9494              return -TARGET_EFAULT;
9495          ret = get_errno(mknodat(arg1, p, arg3, arg4));
9496          unlock_user(p, arg2, 0);
9497          return ret;
9498  #endif
9499  #ifdef TARGET_NR_chmod
9500      case TARGET_NR_chmod:
9501          if (!(p = lock_user_string(arg1)))
9502              return -TARGET_EFAULT;
9503          ret = get_errno(chmod(p, arg2));
9504          unlock_user(p, arg1, 0);
9505          return ret;
9506  #endif
9507  #ifdef TARGET_NR_lseek
9508      case TARGET_NR_lseek:
9509          return get_errno(lseek(arg1, arg2, arg3));
9510  #endif
9511  #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9512      /* Alpha specific */
9513      case TARGET_NR_getxpid:
9514          cpu_env->ir[IR_A4] = getppid();
9515          return get_errno(getpid());
9516  #endif
9517  #ifdef TARGET_NR_getpid
9518      case TARGET_NR_getpid:
9519          return get_errno(getpid());
9520  #endif
9521      case TARGET_NR_mount:
9522          {
9523              /* need to look at the data field */
9524              void *p2, *p3;
9525  
9526              if (arg1) {
9527                  p = lock_user_string(arg1);
9528                  if (!p) {
9529                      return -TARGET_EFAULT;
9530                  }
9531              } else {
9532                  p = NULL;
9533              }
9534  
9535              p2 = lock_user_string(arg2);
9536              if (!p2) {
9537                  if (arg1) {
9538                      unlock_user(p, arg1, 0);
9539                  }
9540                  return -TARGET_EFAULT;
9541              }
9542  
9543              if (arg3) {
9544                  p3 = lock_user_string(arg3);
9545                  if (!p3) {
9546                      if (arg1) {
9547                          unlock_user(p, arg1, 0);
9548                      }
9549                      unlock_user(p2, arg2, 0);
9550                      return -TARGET_EFAULT;
9551                  }
9552              } else {
9553                  p3 = NULL;
9554              }
9555  
9556              /* FIXME - arg5 should be locked, but it isn't clear how to
9557               * do that since it's not guaranteed to be a NULL-terminated
9558               * string.
9559               */
9560              if (!arg5) {
9561                  ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9562              } else {
9563                  ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9564              }
9565              ret = get_errno(ret);
9566  
9567              if (arg1) {
9568                  unlock_user(p, arg1, 0);
9569              }
9570              unlock_user(p2, arg2, 0);
9571              if (arg3) {
9572                  unlock_user(p3, arg3, 0);
9573              }
9574          }
9575          return ret;
9576  #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9577  #if defined(TARGET_NR_umount)
9578      case TARGET_NR_umount:
9579  #endif
9580  #if defined(TARGET_NR_oldumount)
9581      case TARGET_NR_oldumount:
9582  #endif
9583          if (!(p = lock_user_string(arg1)))
9584              return -TARGET_EFAULT;
9585          ret = get_errno(umount(p));
9586          unlock_user(p, arg1, 0);
9587          return ret;
9588  #endif
9589  #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9590      case TARGET_NR_move_mount:
9591          {
9592              void *p2, *p4;
9593  
9594              if (!arg2 || !arg4) {
9595                  return -TARGET_EFAULT;
9596              }
9597  
9598              p2 = lock_user_string(arg2);
9599              if (!p2) {
9600                  return -TARGET_EFAULT;
9601              }
9602  
9603              p4 = lock_user_string(arg4);
9604              if (!p4) {
9605                  unlock_user(p2, arg2, 0);
9606                  return -TARGET_EFAULT;
9607              }
9608              ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9609  
9610              unlock_user(p2, arg2, 0);
9611              unlock_user(p4, arg4, 0);
9612  
9613              return ret;
9614          }
9615  #endif
9616  #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9617      case TARGET_NR_open_tree:
9618          {
9619              void *p2;
9620              int host_flags;
9621  
9622              if (!arg2) {
9623                  return -TARGET_EFAULT;
9624              }
9625  
9626              p2 = lock_user_string(arg2);
9627              if (!p2) {
9628                  return -TARGET_EFAULT;
9629              }
9630  
9631              host_flags = arg3 & ~TARGET_O_CLOEXEC;
9632              if (arg3 & TARGET_O_CLOEXEC) {
9633                  host_flags |= O_CLOEXEC;
9634              }
9635  
9636              ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9637  
9638              unlock_user(p2, arg2, 0);
9639  
9640              return ret;
9641          }
9642  #endif
9643  #ifdef TARGET_NR_stime /* not on alpha */
9644      case TARGET_NR_stime:
9645          {
9646              struct timespec ts;
9647              ts.tv_nsec = 0;
9648              if (get_user_sal(ts.tv_sec, arg1)) {
9649                  return -TARGET_EFAULT;
9650              }
9651              return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9652          }
9653  #endif
9654  #ifdef TARGET_NR_alarm /* not on alpha */
9655      case TARGET_NR_alarm:
9656          return alarm(arg1);
9657  #endif
9658  #ifdef TARGET_NR_pause /* not on alpha */
9659      case TARGET_NR_pause:
9660          if (!block_signals()) {
9661              sigsuspend(&get_task_state(cpu)->signal_mask);
9662          }
9663          return -TARGET_EINTR;
9664  #endif
9665  #ifdef TARGET_NR_utime
9666      case TARGET_NR_utime:
9667          {
9668              struct utimbuf tbuf, *host_tbuf;
9669              struct target_utimbuf *target_tbuf;
9670              if (arg2) {
9671                  if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9672                      return -TARGET_EFAULT;
9673                  tbuf.actime = tswapal(target_tbuf->actime);
9674                  tbuf.modtime = tswapal(target_tbuf->modtime);
9675                  unlock_user_struct(target_tbuf, arg2, 0);
9676                  host_tbuf = &tbuf;
9677              } else {
9678                  host_tbuf = NULL;
9679              }
9680              if (!(p = lock_user_string(arg1)))
9681                  return -TARGET_EFAULT;
9682              ret = get_errno(utime(p, host_tbuf));
9683              unlock_user(p, arg1, 0);
9684          }
9685          return ret;
9686  #endif
9687  #ifdef TARGET_NR_utimes
9688      case TARGET_NR_utimes:
9689          {
9690              struct timeval *tvp, tv[2];
9691              if (arg2) {
9692                  if (copy_from_user_timeval(&tv[0], arg2)
9693                      || copy_from_user_timeval(&tv[1],
9694                                                arg2 + sizeof(struct target_timeval)))
9695                      return -TARGET_EFAULT;
9696                  tvp = tv;
9697              } else {
9698                  tvp = NULL;
9699              }
9700              if (!(p = lock_user_string(arg1)))
9701                  return -TARGET_EFAULT;
9702              ret = get_errno(utimes(p, tvp));
9703              unlock_user(p, arg1, 0);
9704          }
9705          return ret;
9706  #endif
9707  #if defined(TARGET_NR_futimesat)
9708      case TARGET_NR_futimesat:
9709          {
9710              struct timeval *tvp, tv[2];
9711              if (arg3) {
9712                  if (copy_from_user_timeval(&tv[0], arg3)
9713                      || copy_from_user_timeval(&tv[1],
9714                                                arg3 + sizeof(struct target_timeval)))
9715                      return -TARGET_EFAULT;
9716                  tvp = tv;
9717              } else {
9718                  tvp = NULL;
9719              }
9720              if (!(p = lock_user_string(arg2))) {
9721                  return -TARGET_EFAULT;
9722              }
9723              ret = get_errno(futimesat(arg1, path(p), tvp));
9724              unlock_user(p, arg2, 0);
9725          }
9726          return ret;
9727  #endif
9728  #ifdef TARGET_NR_access
9729      case TARGET_NR_access:
9730          if (!(p = lock_user_string(arg1))) {
9731              return -TARGET_EFAULT;
9732          }
9733          ret = get_errno(access(path(p), arg2));
9734          unlock_user(p, arg1, 0);
9735          return ret;
9736  #endif
9737  #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9738      case TARGET_NR_faccessat:
9739          if (!(p = lock_user_string(arg2))) {
9740              return -TARGET_EFAULT;
9741          }
9742          ret = get_errno(faccessat(arg1, p, arg3, 0));
9743          unlock_user(p, arg2, 0);
9744          return ret;
9745  #endif
9746  #if defined(TARGET_NR_faccessat2)
9747      case TARGET_NR_faccessat2:
9748          if (!(p = lock_user_string(arg2))) {
9749              return -TARGET_EFAULT;
9750          }
9751          ret = get_errno(faccessat(arg1, p, arg3, arg4));
9752          unlock_user(p, arg2, 0);
9753          return ret;
9754  #endif
9755  #ifdef TARGET_NR_nice /* not on alpha */
9756      case TARGET_NR_nice:
9757          return get_errno(nice(arg1));
9758  #endif
9759      case TARGET_NR_sync:
9760          sync();
9761          return 0;
9762  #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9763      case TARGET_NR_syncfs:
9764          return get_errno(syncfs(arg1));
9765  #endif
9766      case TARGET_NR_kill:
9767          return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9768  #ifdef TARGET_NR_rename
9769      case TARGET_NR_rename:
9770          {
9771              void *p2;
9772              p = lock_user_string(arg1);
9773              p2 = lock_user_string(arg2);
9774              if (!p || !p2)
9775                  ret = -TARGET_EFAULT;
9776              else
9777                  ret = get_errno(rename(p, p2));
9778              unlock_user(p2, arg2, 0);
9779              unlock_user(p, arg1, 0);
9780          }
9781          return ret;
9782  #endif
9783  #if defined(TARGET_NR_renameat)
9784      case TARGET_NR_renameat:
9785          {
9786              void *p2;
9787              p  = lock_user_string(arg2);
9788              p2 = lock_user_string(arg4);
9789              if (!p || !p2)
9790                  ret = -TARGET_EFAULT;
9791              else
9792                  ret = get_errno(renameat(arg1, p, arg3, p2));
9793              unlock_user(p2, arg4, 0);
9794              unlock_user(p, arg2, 0);
9795          }
9796          return ret;
9797  #endif
9798  #if defined(TARGET_NR_renameat2)
9799      case TARGET_NR_renameat2:
9800          {
9801              void *p2;
9802              p  = lock_user_string(arg2);
9803              p2 = lock_user_string(arg4);
9804              if (!p || !p2) {
9805                  ret = -TARGET_EFAULT;
9806              } else {
9807                  ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9808              }
9809              unlock_user(p2, arg4, 0);
9810              unlock_user(p, arg2, 0);
9811          }
9812          return ret;
9813  #endif
9814  #ifdef TARGET_NR_mkdir
9815      case TARGET_NR_mkdir:
9816          if (!(p = lock_user_string(arg1)))
9817              return -TARGET_EFAULT;
9818          ret = get_errno(mkdir(p, arg2));
9819          unlock_user(p, arg1, 0);
9820          return ret;
9821  #endif
9822  #if defined(TARGET_NR_mkdirat)
9823      case TARGET_NR_mkdirat:
9824          if (!(p = lock_user_string(arg2)))
9825              return -TARGET_EFAULT;
9826          ret = get_errno(mkdirat(arg1, p, arg3));
9827          unlock_user(p, arg2, 0);
9828          return ret;
9829  #endif
9830  #ifdef TARGET_NR_rmdir
9831      case TARGET_NR_rmdir:
9832          if (!(p = lock_user_string(arg1)))
9833              return -TARGET_EFAULT;
9834          ret = get_errno(rmdir(p));
9835          unlock_user(p, arg1, 0);
9836          return ret;
9837  #endif
9838      case TARGET_NR_dup:
9839          ret = get_errno(dup(arg1));
9840          if (ret >= 0) {
9841              fd_trans_dup(arg1, ret);
9842          }
9843          return ret;
9844  #ifdef TARGET_NR_pipe
9845      case TARGET_NR_pipe:
9846          return do_pipe(cpu_env, arg1, 0, 0);
9847  #endif
9848  #ifdef TARGET_NR_pipe2
9849      case TARGET_NR_pipe2:
9850          return do_pipe(cpu_env, arg1,
9851                         target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9852  #endif
9853      case TARGET_NR_times:
9854          {
9855              struct target_tms *tmsp;
9856              struct tms tms;
9857              ret = get_errno(times(&tms));
9858              if (arg1) {
9859                  tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9860                  if (!tmsp)
9861                      return -TARGET_EFAULT;
9862                  tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9863                  tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9864                  tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9865                  tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9866              }
9867              if (!is_error(ret))
9868                  ret = host_to_target_clock_t(ret);
9869          }
9870          return ret;
9871      case TARGET_NR_acct:
9872          if (arg1 == 0) {
9873              ret = get_errno(acct(NULL));
9874          } else {
9875              if (!(p = lock_user_string(arg1))) {
9876                  return -TARGET_EFAULT;
9877              }
9878              ret = get_errno(acct(path(p)));
9879              unlock_user(p, arg1, 0);
9880          }
9881          return ret;
9882  #ifdef TARGET_NR_umount2
9883      case TARGET_NR_umount2:
9884          if (!(p = lock_user_string(arg1)))
9885              return -TARGET_EFAULT;
9886          ret = get_errno(umount2(p, arg2));
9887          unlock_user(p, arg1, 0);
9888          return ret;
9889  #endif
9890      case TARGET_NR_ioctl:
9891          return do_ioctl(arg1, arg2, arg3);
9892  #ifdef TARGET_NR_fcntl
9893      case TARGET_NR_fcntl:
9894          return do_fcntl(arg1, arg2, arg3);
9895  #endif
9896      case TARGET_NR_setpgid:
9897          return get_errno(setpgid(arg1, arg2));
9898      case TARGET_NR_umask:
9899          return get_errno(umask(arg1));
9900      case TARGET_NR_chroot:
9901          if (!(p = lock_user_string(arg1)))
9902              return -TARGET_EFAULT;
9903          ret = get_errno(chroot(p));
9904          unlock_user(p, arg1, 0);
9905          return ret;
9906  #ifdef TARGET_NR_dup2
9907      case TARGET_NR_dup2:
9908          ret = get_errno(dup2(arg1, arg2));
9909          if (ret >= 0) {
9910              fd_trans_dup(arg1, arg2);
9911          }
9912          return ret;
9913  #endif
9914  #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9915      case TARGET_NR_dup3:
9916      {
9917          int host_flags;
9918  
9919          if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9920              return -EINVAL;
9921          }
9922          host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9923          ret = get_errno(dup3(arg1, arg2, host_flags));
9924          if (ret >= 0) {
9925              fd_trans_dup(arg1, arg2);
9926          }
9927          return ret;
9928      }
9929  #endif
9930  #ifdef TARGET_NR_getppid /* not on alpha */
9931      case TARGET_NR_getppid:
9932          return get_errno(getppid());
9933  #endif
9934  #ifdef TARGET_NR_getpgrp
9935      case TARGET_NR_getpgrp:
9936          return get_errno(getpgrp());
9937  #endif
9938      case TARGET_NR_setsid:
9939          return get_errno(setsid());
9940  #ifdef TARGET_NR_sigaction
9941      case TARGET_NR_sigaction:
9942          {
9943  #if defined(TARGET_MIPS)
9944  	    struct target_sigaction act, oact, *pact, *old_act;
9945  
9946  	    if (arg2) {
9947                  if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9948                      return -TARGET_EFAULT;
9949  		act._sa_handler = old_act->_sa_handler;
9950  		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9951  		act.sa_flags = old_act->sa_flags;
9952  		unlock_user_struct(old_act, arg2, 0);
9953  		pact = &act;
9954  	    } else {
9955  		pact = NULL;
9956  	    }
9957  
9958          ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9959  
9960  	    if (!is_error(ret) && arg3) {
9961                  if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9962                      return -TARGET_EFAULT;
9963  		old_act->_sa_handler = oact._sa_handler;
9964  		old_act->sa_flags = oact.sa_flags;
9965  		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9966  		old_act->sa_mask.sig[1] = 0;
9967  		old_act->sa_mask.sig[2] = 0;
9968  		old_act->sa_mask.sig[3] = 0;
9969  		unlock_user_struct(old_act, arg3, 1);
9970  	    }
9971  #else
9972              struct target_old_sigaction *old_act;
9973              struct target_sigaction act, oact, *pact;
9974              if (arg2) {
9975                  if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9976                      return -TARGET_EFAULT;
9977                  act._sa_handler = old_act->_sa_handler;
9978                  target_siginitset(&act.sa_mask, old_act->sa_mask);
9979                  act.sa_flags = old_act->sa_flags;
9980  #ifdef TARGET_ARCH_HAS_SA_RESTORER
9981                  act.sa_restorer = old_act->sa_restorer;
9982  #endif
9983                  unlock_user_struct(old_act, arg2, 0);
9984                  pact = &act;
9985              } else {
9986                  pact = NULL;
9987              }
9988              ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9989              if (!is_error(ret) && arg3) {
9990                  if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9991                      return -TARGET_EFAULT;
9992                  old_act->_sa_handler = oact._sa_handler;
9993                  old_act->sa_mask = oact.sa_mask.sig[0];
9994                  old_act->sa_flags = oact.sa_flags;
9995  #ifdef TARGET_ARCH_HAS_SA_RESTORER
9996                  old_act->sa_restorer = oact.sa_restorer;
9997  #endif
9998                  unlock_user_struct(old_act, arg3, 1);
9999              }
10000  #endif
10001          }
10002          return ret;
10003  #endif
10004      case TARGET_NR_rt_sigaction:
10005          {
10006              /*
10007               * For Alpha and SPARC this is a 5 argument syscall, with
10008               * a 'restorer' parameter which must be copied into the
10009               * sa_restorer field of the sigaction struct.
10010               * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10011               * and arg5 is the sigsetsize.
10012               */
10013  #if defined(TARGET_ALPHA)
10014              target_ulong sigsetsize = arg4;
10015              target_ulong restorer = arg5;
10016  #elif defined(TARGET_SPARC)
10017              target_ulong restorer = arg4;
10018              target_ulong sigsetsize = arg5;
10019  #else
10020              target_ulong sigsetsize = arg4;
10021              target_ulong restorer = 0;
10022  #endif
10023              struct target_sigaction *act = NULL;
10024              struct target_sigaction *oact = NULL;
10025  
10026              if (sigsetsize != sizeof(target_sigset_t)) {
10027                  return -TARGET_EINVAL;
10028              }
10029              if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10030                  return -TARGET_EFAULT;
10031              }
10032              if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10033                  ret = -TARGET_EFAULT;
10034              } else {
10035                  ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10036                  if (oact) {
10037                      unlock_user_struct(oact, arg3, 1);
10038                  }
10039              }
10040              if (act) {
10041                  unlock_user_struct(act, arg2, 0);
10042              }
10043          }
10044          return ret;
10045  #ifdef TARGET_NR_sgetmask /* not on alpha */
10046      case TARGET_NR_sgetmask:
10047          {
10048              sigset_t cur_set;
10049              abi_ulong target_set;
10050              ret = do_sigprocmask(0, NULL, &cur_set);
10051              if (!ret) {
10052                  host_to_target_old_sigset(&target_set, &cur_set);
10053                  ret = target_set;
10054              }
10055          }
10056          return ret;
10057  #endif
10058  #ifdef TARGET_NR_ssetmask /* not on alpha */
10059      case TARGET_NR_ssetmask:
10060          {
10061              sigset_t set, oset;
10062              abi_ulong target_set = arg1;
10063              target_to_host_old_sigset(&set, &target_set);
10064              ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10065              if (!ret) {
10066                  host_to_target_old_sigset(&target_set, &oset);
10067                  ret = target_set;
10068              }
10069          }
10070          return ret;
10071  #endif
10072  #ifdef TARGET_NR_sigprocmask
10073      case TARGET_NR_sigprocmask:
10074          {
10075  #if defined(TARGET_ALPHA)
10076              sigset_t set, oldset;
10077              abi_ulong mask;
10078              int how;
10079  
10080              switch (arg1) {
10081              case TARGET_SIG_BLOCK:
10082                  how = SIG_BLOCK;
10083                  break;
10084              case TARGET_SIG_UNBLOCK:
10085                  how = SIG_UNBLOCK;
10086                  break;
10087              case TARGET_SIG_SETMASK:
10088                  how = SIG_SETMASK;
10089                  break;
10090              default:
10091                  return -TARGET_EINVAL;
10092              }
10093              mask = arg2;
10094              target_to_host_old_sigset(&set, &mask);
10095  
10096              ret = do_sigprocmask(how, &set, &oldset);
10097              if (!is_error(ret)) {
10098                  host_to_target_old_sigset(&mask, &oldset);
10099                  ret = mask;
10100                  cpu_env->ir[IR_V0] = 0; /* force no error */
10101              }
10102  #else
10103              sigset_t set, oldset, *set_ptr;
10104              int how;
10105  
10106              if (arg2) {
10107                  p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10108                  if (!p) {
10109                      return -TARGET_EFAULT;
10110                  }
10111                  target_to_host_old_sigset(&set, p);
10112                  unlock_user(p, arg2, 0);
10113                  set_ptr = &set;
10114                  switch (arg1) {
10115                  case TARGET_SIG_BLOCK:
10116                      how = SIG_BLOCK;
10117                      break;
10118                  case TARGET_SIG_UNBLOCK:
10119                      how = SIG_UNBLOCK;
10120                      break;
10121                  case TARGET_SIG_SETMASK:
10122                      how = SIG_SETMASK;
10123                      break;
10124                  default:
10125                      return -TARGET_EINVAL;
10126                  }
10127              } else {
10128                  how = 0;
10129                  set_ptr = NULL;
10130              }
10131              ret = do_sigprocmask(how, set_ptr, &oldset);
10132              if (!is_error(ret) && arg3) {
10133                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10134                      return -TARGET_EFAULT;
10135                  host_to_target_old_sigset(p, &oldset);
10136                  unlock_user(p, arg3, sizeof(target_sigset_t));
10137              }
10138  #endif
10139          }
10140          return ret;
10141  #endif
10142      case TARGET_NR_rt_sigprocmask:
10143          {
10144              int how = arg1;
10145              sigset_t set, oldset, *set_ptr;
10146  
10147              if (arg4 != sizeof(target_sigset_t)) {
10148                  return -TARGET_EINVAL;
10149              }
10150  
10151              if (arg2) {
10152                  p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10153                  if (!p) {
10154                      return -TARGET_EFAULT;
10155                  }
10156                  target_to_host_sigset(&set, p);
10157                  unlock_user(p, arg2, 0);
10158                  set_ptr = &set;
10159                  switch(how) {
10160                  case TARGET_SIG_BLOCK:
10161                      how = SIG_BLOCK;
10162                      break;
10163                  case TARGET_SIG_UNBLOCK:
10164                      how = SIG_UNBLOCK;
10165                      break;
10166                  case TARGET_SIG_SETMASK:
10167                      how = SIG_SETMASK;
10168                      break;
10169                  default:
10170                      return -TARGET_EINVAL;
10171                  }
10172              } else {
10173                  how = 0;
10174                  set_ptr = NULL;
10175              }
10176              ret = do_sigprocmask(how, set_ptr, &oldset);
10177              if (!is_error(ret) && arg3) {
10178                  if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10179                      return -TARGET_EFAULT;
10180                  host_to_target_sigset(p, &oldset);
10181                  unlock_user(p, arg3, sizeof(target_sigset_t));
10182              }
10183          }
10184          return ret;
10185  #ifdef TARGET_NR_sigpending
10186      case TARGET_NR_sigpending:
10187          {
10188              sigset_t set;
10189              ret = get_errno(sigpending(&set));
10190              if (!is_error(ret)) {
10191                  if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10192                      return -TARGET_EFAULT;
10193                  host_to_target_old_sigset(p, &set);
10194                  unlock_user(p, arg1, sizeof(target_sigset_t));
10195              }
10196          }
10197          return ret;
10198  #endif
10199      case TARGET_NR_rt_sigpending:
10200          {
10201              sigset_t set;
10202  
10203              /* Yes, this check is >, not != like most. We follow the kernel's
10204               * logic and it does it like this because it implements
10205               * NR_sigpending through the same code path, and in that case
10206               * the old_sigset_t is smaller in size.
10207               */
10208              if (arg2 > sizeof(target_sigset_t)) {
10209                  return -TARGET_EINVAL;
10210              }
10211  
10212              ret = get_errno(sigpending(&set));
10213              if (!is_error(ret)) {
10214                  if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10215                      return -TARGET_EFAULT;
10216                  host_to_target_sigset(p, &set);
10217                  unlock_user(p, arg1, sizeof(target_sigset_t));
10218              }
10219          }
10220          return ret;
10221  #ifdef TARGET_NR_sigsuspend
10222      case TARGET_NR_sigsuspend:
10223          {
10224              sigset_t *set;
10225  
10226  #if defined(TARGET_ALPHA)
10227              TaskState *ts = get_task_state(cpu);
10228              /* target_to_host_old_sigset will bswap back */
10229              abi_ulong mask = tswapal(arg1);
10230              set = &ts->sigsuspend_mask;
10231              target_to_host_old_sigset(set, &mask);
10232  #else
10233              ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10234              if (ret != 0) {
10235                  return ret;
10236              }
10237  #endif
10238              ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10239              finish_sigsuspend_mask(ret);
10240          }
10241          return ret;
10242  #endif
10243      case TARGET_NR_rt_sigsuspend:
10244          {
10245              sigset_t *set;
10246  
10247              ret = process_sigsuspend_mask(&set, arg1, arg2);
10248              if (ret != 0) {
10249                  return ret;
10250              }
10251              ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10252              finish_sigsuspend_mask(ret);
10253          }
10254          return ret;
10255  #ifdef TARGET_NR_rt_sigtimedwait
10256      case TARGET_NR_rt_sigtimedwait:
10257          {
10258              sigset_t set;
10259              struct timespec uts, *puts;
10260              siginfo_t uinfo;
10261  
10262              if (arg4 != sizeof(target_sigset_t)) {
10263                  return -TARGET_EINVAL;
10264              }
10265  
10266              if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10267                  return -TARGET_EFAULT;
10268              target_to_host_sigset(&set, p);
10269              unlock_user(p, arg1, 0);
10270              if (arg3) {
10271                  puts = &uts;
10272                  if (target_to_host_timespec(puts, arg3)) {
10273                      return -TARGET_EFAULT;
10274                  }
10275              } else {
10276                  puts = NULL;
10277              }
10278              ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10279                                                   SIGSET_T_SIZE));
10280              if (!is_error(ret)) {
10281                  if (arg2) {
10282                      p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10283                                    0);
10284                      if (!p) {
10285                          return -TARGET_EFAULT;
10286                      }
10287                      host_to_target_siginfo(p, &uinfo);
10288                      unlock_user(p, arg2, sizeof(target_siginfo_t));
10289                  }
10290                  ret = host_to_target_signal(ret);
10291              }
10292          }
10293          return ret;
10294  #endif
10295  #ifdef TARGET_NR_rt_sigtimedwait_time64
10296      case TARGET_NR_rt_sigtimedwait_time64:
10297          {
10298              sigset_t set;
10299              struct timespec uts, *puts;
10300              siginfo_t uinfo;
10301  
10302              if (arg4 != sizeof(target_sigset_t)) {
10303                  return -TARGET_EINVAL;
10304              }
10305  
10306              p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10307              if (!p) {
10308                  return -TARGET_EFAULT;
10309              }
10310              target_to_host_sigset(&set, p);
10311              unlock_user(p, arg1, 0);
10312              if (arg3) {
10313                  puts = &uts;
10314                  if (target_to_host_timespec64(puts, arg3)) {
10315                      return -TARGET_EFAULT;
10316                  }
10317              } else {
10318                  puts = NULL;
10319              }
10320              ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10321                                                   SIGSET_T_SIZE));
10322              if (!is_error(ret)) {
10323                  if (arg2) {
10324                      p = lock_user(VERIFY_WRITE, arg2,
10325                                    sizeof(target_siginfo_t), 0);
10326                      if (!p) {
10327                          return -TARGET_EFAULT;
10328                      }
10329                      host_to_target_siginfo(p, &uinfo);
10330                      unlock_user(p, arg2, sizeof(target_siginfo_t));
10331                  }
10332                  ret = host_to_target_signal(ret);
10333              }
10334          }
10335          return ret;
10336  #endif
10337      case TARGET_NR_rt_sigqueueinfo:
10338          {
10339              siginfo_t uinfo;
10340  
10341              p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10342              if (!p) {
10343                  return -TARGET_EFAULT;
10344              }
10345              target_to_host_siginfo(&uinfo, p);
10346              unlock_user(p, arg3, 0);
10347              ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10348          }
10349          return ret;
10350      case TARGET_NR_rt_tgsigqueueinfo:
10351          {
10352              siginfo_t uinfo;
10353  
10354              p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10355              if (!p) {
10356                  return -TARGET_EFAULT;
10357              }
10358              target_to_host_siginfo(&uinfo, p);
10359              unlock_user(p, arg4, 0);
10360              ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10361          }
10362          return ret;
10363  #ifdef TARGET_NR_sigreturn
10364      case TARGET_NR_sigreturn:
10365          if (block_signals()) {
10366              return -QEMU_ERESTARTSYS;
10367          }
10368          return do_sigreturn(cpu_env);
10369  #endif
10370      case TARGET_NR_rt_sigreturn:
10371          if (block_signals()) {
10372              return -QEMU_ERESTARTSYS;
10373          }
10374          return do_rt_sigreturn(cpu_env);
10375      case TARGET_NR_sethostname:
10376          if (!(p = lock_user_string(arg1)))
10377              return -TARGET_EFAULT;
10378          ret = get_errno(sethostname(p, arg2));
10379          unlock_user(p, arg1, 0);
10380          return ret;
10381  #ifdef TARGET_NR_setrlimit
10382      case TARGET_NR_setrlimit:
10383          {
10384              int resource = target_to_host_resource(arg1);
10385              struct target_rlimit *target_rlim;
10386              struct rlimit rlim;
10387              if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10388                  return -TARGET_EFAULT;
10389              rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10390              rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10391              unlock_user_struct(target_rlim, arg2, 0);
10392              /*
10393               * If we just passed through resource limit settings for memory then
10394               * they would also apply to QEMU's own allocations, and QEMU will
10395               * crash or hang or die if its allocations fail. Ideally we would
10396               * track the guest allocations in QEMU and apply the limits ourselves.
10397               * For now, just tell the guest the call succeeded but don't actually
10398               * limit anything.
10399               */
10400              if (resource != RLIMIT_AS &&
10401                  resource != RLIMIT_DATA &&
10402                  resource != RLIMIT_STACK) {
10403                  return get_errno(setrlimit(resource, &rlim));
10404              } else {
10405                  return 0;
10406              }
10407          }
10408  #endif
10409  #ifdef TARGET_NR_getrlimit
10410      case TARGET_NR_getrlimit:
10411          {
10412              int resource = target_to_host_resource(arg1);
10413              struct target_rlimit *target_rlim;
10414              struct rlimit rlim;
10415  
10416              ret = get_errno(getrlimit(resource, &rlim));
10417              if (!is_error(ret)) {
10418                  if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10419                      return -TARGET_EFAULT;
10420                  target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10421                  target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10422                  unlock_user_struct(target_rlim, arg2, 1);
10423              }
10424          }
10425          return ret;
10426  #endif
10427      case TARGET_NR_getrusage:
10428          {
10429              struct rusage rusage;
10430              ret = get_errno(getrusage(arg1, &rusage));
10431              if (!is_error(ret)) {
10432                  ret = host_to_target_rusage(arg2, &rusage);
10433              }
10434          }
10435          return ret;
10436  #if defined(TARGET_NR_gettimeofday)
10437      case TARGET_NR_gettimeofday:
10438          {
10439              struct timeval tv;
10440              struct timezone tz;
10441  
10442              ret = get_errno(gettimeofday(&tv, &tz));
10443              if (!is_error(ret)) {
10444                  if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10445                      return -TARGET_EFAULT;
10446                  }
10447                  if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10448                      return -TARGET_EFAULT;
10449                  }
10450              }
10451          }
10452          return ret;
10453  #endif
10454  #if defined(TARGET_NR_settimeofday)
10455      case TARGET_NR_settimeofday:
10456          {
10457              struct timeval tv, *ptv = NULL;
10458              struct timezone tz, *ptz = NULL;
10459  
10460              if (arg1) {
10461                  if (copy_from_user_timeval(&tv, arg1)) {
10462                      return -TARGET_EFAULT;
10463                  }
10464                  ptv = &tv;
10465              }
10466  
10467              if (arg2) {
10468                  if (copy_from_user_timezone(&tz, arg2)) {
10469                      return -TARGET_EFAULT;
10470                  }
10471                  ptz = &tz;
10472              }
10473  
10474              return get_errno(settimeofday(ptv, ptz));
10475          }
10476  #endif
10477  #if defined(TARGET_NR_select)
10478      case TARGET_NR_select:
10479  #if defined(TARGET_WANT_NI_OLD_SELECT)
10480          /* some architectures used to have old_select here
10481           * but now ENOSYS it.
10482           */
10483          ret = -TARGET_ENOSYS;
10484  #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10485          ret = do_old_select(arg1);
10486  #else
10487          ret = do_select(arg1, arg2, arg3, arg4, arg5);
10488  #endif
10489          return ret;
10490  #endif
10491  #ifdef TARGET_NR_pselect6
10492      case TARGET_NR_pselect6:
10493          return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10494  #endif
10495  #ifdef TARGET_NR_pselect6_time64
10496      case TARGET_NR_pselect6_time64:
10497          return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10498  #endif
10499  #ifdef TARGET_NR_symlink
10500      case TARGET_NR_symlink:
10501          {
10502              void *p2;
10503              p = lock_user_string(arg1);
10504              p2 = lock_user_string(arg2);
10505              if (!p || !p2)
10506                  ret = -TARGET_EFAULT;
10507              else
10508                  ret = get_errno(symlink(p, p2));
10509              unlock_user(p2, arg2, 0);
10510              unlock_user(p, arg1, 0);
10511          }
10512          return ret;
10513  #endif
10514  #if defined(TARGET_NR_symlinkat)
10515      case TARGET_NR_symlinkat:
10516          {
10517              void *p2;
10518              p  = lock_user_string(arg1);
10519              p2 = lock_user_string(arg3);
10520              if (!p || !p2)
10521                  ret = -TARGET_EFAULT;
10522              else
10523                  ret = get_errno(symlinkat(p, arg2, p2));
10524              unlock_user(p2, arg3, 0);
10525              unlock_user(p, arg1, 0);
10526          }
10527          return ret;
10528  #endif
10529  #ifdef TARGET_NR_readlink
10530      case TARGET_NR_readlink:
10531          {
10532              void *p2;
10533              p = lock_user_string(arg1);
10534              p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10535              ret = get_errno(do_guest_readlink(p, p2, arg3));
10536              unlock_user(p2, arg2, ret);
10537              unlock_user(p, arg1, 0);
10538          }
10539          return ret;
10540  #endif
10541  #if defined(TARGET_NR_readlinkat)
10542      case TARGET_NR_readlinkat:
10543          {
10544              void *p2;
10545              p  = lock_user_string(arg2);
10546              p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10547              if (!p || !p2) {
10548                  ret = -TARGET_EFAULT;
10549              } else if (!arg4) {
10550                  /* Short circuit this for the magic exe check. */
10551                  ret = -TARGET_EINVAL;
10552              } else if (is_proc_myself((const char *)p, "exe")) {
10553                  /*
10554                   * Don't worry about sign mismatch as earlier mapping
10555                   * logic would have thrown a bad address error.
10556                   */
10557                  ret = MIN(strlen(exec_path), arg4);
10558                  /* We cannot NUL terminate the string. */
10559                  memcpy(p2, exec_path, ret);
10560              } else {
10561                  ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10562              }
10563              unlock_user(p2, arg3, ret);
10564              unlock_user(p, arg2, 0);
10565          }
10566          return ret;
10567  #endif
10568  #ifdef TARGET_NR_swapon
10569      case TARGET_NR_swapon:
10570          if (!(p = lock_user_string(arg1)))
10571              return -TARGET_EFAULT;
10572          ret = get_errno(swapon(p, arg2));
10573          unlock_user(p, arg1, 0);
10574          return ret;
10575  #endif
10576      case TARGET_NR_reboot:
10577          if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10578             /* arg4 must be ignored in all other cases */
10579             p = lock_user_string(arg4);
10580             if (!p) {
10581                 return -TARGET_EFAULT;
10582             }
10583             ret = get_errno(reboot(arg1, arg2, arg3, p));
10584             unlock_user(p, arg4, 0);
10585          } else {
10586             ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10587          }
10588          return ret;
10589  #ifdef TARGET_NR_mmap
10590      case TARGET_NR_mmap:
10591  #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10592          {
10593              abi_ulong *v;
10594              abi_ulong v1, v2, v3, v4, v5, v6;
10595              if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10596                  return -TARGET_EFAULT;
10597              v1 = tswapal(v[0]);
10598              v2 = tswapal(v[1]);
10599              v3 = tswapal(v[2]);
10600              v4 = tswapal(v[3]);
10601              v5 = tswapal(v[4]);
10602              v6 = tswapal(v[5]);
10603              unlock_user(v, arg1, 0);
10604              return do_mmap(v1, v2, v3, v4, v5, v6);
10605          }
10606  #else
10607          /* mmap pointers are always untagged */
10608          return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10609  #endif
10610  #endif
10611  #ifdef TARGET_NR_mmap2
10612      case TARGET_NR_mmap2:
10613  #ifndef MMAP_SHIFT
10614  #define MMAP_SHIFT 12
10615  #endif
10616          return do_mmap(arg1, arg2, arg3, arg4, arg5,
10617                         (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10618  #endif
10619      case TARGET_NR_munmap:
10620          arg1 = cpu_untagged_addr(cpu, arg1);
10621          return get_errno(target_munmap(arg1, arg2));
10622      case TARGET_NR_mprotect:
10623          arg1 = cpu_untagged_addr(cpu, arg1);
10624          {
10625              TaskState *ts = get_task_state(cpu);
10626              /* Special hack to detect libc making the stack executable.  */
10627              if ((arg3 & PROT_GROWSDOWN)
10628                  && arg1 >= ts->info->stack_limit
10629                  && arg1 <= ts->info->start_stack) {
10630                  arg3 &= ~PROT_GROWSDOWN;
10631                  arg2 = arg2 + arg1 - ts->info->stack_limit;
10632                  arg1 = ts->info->stack_limit;
10633              }
10634          }
10635          return get_errno(target_mprotect(arg1, arg2, arg3));
10636  #ifdef TARGET_NR_mremap
10637      case TARGET_NR_mremap:
10638          arg1 = cpu_untagged_addr(cpu, arg1);
10639          /* mremap new_addr (arg5) is always untagged */
10640          return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10641  #endif
10642          /* ??? msync/mlock/munlock are broken for softmmu.  */
10643  #ifdef TARGET_NR_msync
10644      case TARGET_NR_msync:
10645          return get_errno(msync(g2h(cpu, arg1), arg2,
10646                                 target_to_host_msync_arg(arg3)));
10647  #endif
10648  #ifdef TARGET_NR_mlock
10649      case TARGET_NR_mlock:
10650          return get_errno(mlock(g2h(cpu, arg1), arg2));
10651  #endif
10652  #ifdef TARGET_NR_munlock
10653      case TARGET_NR_munlock:
10654          return get_errno(munlock(g2h(cpu, arg1), arg2));
10655  #endif
10656  #ifdef TARGET_NR_mlockall
10657      case TARGET_NR_mlockall:
10658          return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10659  #endif
10660  #ifdef TARGET_NR_munlockall
10661      case TARGET_NR_munlockall:
10662          return get_errno(munlockall());
10663  #endif
10664  #ifdef TARGET_NR_truncate
10665      case TARGET_NR_truncate:
10666          if (!(p = lock_user_string(arg1)))
10667              return -TARGET_EFAULT;
10668          ret = get_errno(truncate(p, arg2));
10669          unlock_user(p, arg1, 0);
10670          return ret;
10671  #endif
10672  #ifdef TARGET_NR_ftruncate
10673      case TARGET_NR_ftruncate:
10674          return get_errno(ftruncate(arg1, arg2));
10675  #endif
10676      case TARGET_NR_fchmod:
10677          return get_errno(fchmod(arg1, arg2));
10678  #if defined(TARGET_NR_fchmodat)
10679      case TARGET_NR_fchmodat:
10680          if (!(p = lock_user_string(arg2)))
10681              return -TARGET_EFAULT;
10682          ret = get_errno(fchmodat(arg1, p, arg3, 0));
10683          unlock_user(p, arg2, 0);
10684          return ret;
10685  #endif
10686      case TARGET_NR_getpriority:
10687          /* Note that negative values are valid for getpriority, so we must
10688             differentiate based on errno settings.  */
10689          errno = 0;
10690          ret = getpriority(arg1, arg2);
10691          if (ret == -1 && errno != 0) {
10692              return -host_to_target_errno(errno);
10693          }
10694  #ifdef TARGET_ALPHA
10695          /* Return value is the unbiased priority.  Signal no error.  */
10696          cpu_env->ir[IR_V0] = 0;
10697  #else
10698          /* Return value is a biased priority to avoid negative numbers.  */
10699          ret = 20 - ret;
10700  #endif
10701          return ret;
10702      case TARGET_NR_setpriority:
10703          return get_errno(setpriority(arg1, arg2, arg3));
10704  #ifdef TARGET_NR_statfs
10705      case TARGET_NR_statfs:
10706          if (!(p = lock_user_string(arg1))) {
10707              return -TARGET_EFAULT;
10708          }
10709          ret = get_errno(statfs(path(p), &stfs));
10710          unlock_user(p, arg1, 0);
10711      convert_statfs:
10712          if (!is_error(ret)) {
10713              struct target_statfs *target_stfs;
10714  
10715              if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10716                  return -TARGET_EFAULT;
10717              __put_user(stfs.f_type, &target_stfs->f_type);
10718              __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10719              __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10720              __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10721              __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10722              __put_user(stfs.f_files, &target_stfs->f_files);
10723              __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10724              __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10725              __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10726              __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10727              __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10728  #ifdef _STATFS_F_FLAGS
10729              __put_user(stfs.f_flags, &target_stfs->f_flags);
10730  #else
10731              __put_user(0, &target_stfs->f_flags);
10732  #endif
10733              memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10734              unlock_user_struct(target_stfs, arg2, 1);
10735          }
10736          return ret;
10737  #endif
10738  #ifdef TARGET_NR_fstatfs
10739      case TARGET_NR_fstatfs:
10740          ret = get_errno(fstatfs(arg1, &stfs));
10741          goto convert_statfs;
10742  #endif
10743  #ifdef TARGET_NR_statfs64
10744      case TARGET_NR_statfs64:
10745          if (!(p = lock_user_string(arg1))) {
10746              return -TARGET_EFAULT;
10747          }
10748          ret = get_errno(statfs(path(p), &stfs));
10749          unlock_user(p, arg1, 0);
10750      convert_statfs64:
10751          if (!is_error(ret)) {
10752              struct target_statfs64 *target_stfs;
10753  
10754              if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10755                  return -TARGET_EFAULT;
10756              __put_user(stfs.f_type, &target_stfs->f_type);
10757              __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10758              __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10759              __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10760              __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10761              __put_user(stfs.f_files, &target_stfs->f_files);
10762              __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10763              __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10764              __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10765              __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10766              __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10767  #ifdef _STATFS_F_FLAGS
10768              __put_user(stfs.f_flags, &target_stfs->f_flags);
10769  #else
10770              __put_user(0, &target_stfs->f_flags);
10771  #endif
10772              memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10773              unlock_user_struct(target_stfs, arg3, 1);
10774          }
10775          return ret;
10776      case TARGET_NR_fstatfs64:
10777          ret = get_errno(fstatfs(arg1, &stfs));
10778          goto convert_statfs64;
10779  #endif
10780  #ifdef TARGET_NR_socketcall
10781      case TARGET_NR_socketcall:
10782          return do_socketcall(arg1, arg2);
10783  #endif
10784  #ifdef TARGET_NR_accept
10785      case TARGET_NR_accept:
10786          return do_accept4(arg1, arg2, arg3, 0);
10787  #endif
10788  #ifdef TARGET_NR_accept4
10789      case TARGET_NR_accept4:
10790          return do_accept4(arg1, arg2, arg3, arg4);
10791  #endif
10792  #ifdef TARGET_NR_bind
10793      case TARGET_NR_bind:
10794          return do_bind(arg1, arg2, arg3);
10795  #endif
10796  #ifdef TARGET_NR_connect
10797      case TARGET_NR_connect:
10798          return do_connect(arg1, arg2, arg3);
10799  #endif
10800  #ifdef TARGET_NR_getpeername
10801      case TARGET_NR_getpeername:
10802          return do_getpeername(arg1, arg2, arg3);
10803  #endif
10804  #ifdef TARGET_NR_getsockname
10805      case TARGET_NR_getsockname:
10806          return do_getsockname(arg1, arg2, arg3);
10807  #endif
10808  #ifdef TARGET_NR_getsockopt
10809      case TARGET_NR_getsockopt:
10810          return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10811  #endif
10812  #ifdef TARGET_NR_listen
10813      case TARGET_NR_listen:
10814          return get_errno(listen(arg1, arg2));
10815  #endif
10816  #ifdef TARGET_NR_recv
10817      case TARGET_NR_recv:
10818          return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10819  #endif
10820  #ifdef TARGET_NR_recvfrom
10821      case TARGET_NR_recvfrom:
10822          return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10823  #endif
10824  #ifdef TARGET_NR_recvmsg
10825      case TARGET_NR_recvmsg:
10826          return do_sendrecvmsg(arg1, arg2, arg3, 0);
10827  #endif
10828  #ifdef TARGET_NR_send
10829      case TARGET_NR_send:
10830          return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10831  #endif
10832  #ifdef TARGET_NR_sendmsg
10833      case TARGET_NR_sendmsg:
10834          return do_sendrecvmsg(arg1, arg2, arg3, 1);
10835  #endif
10836  #ifdef TARGET_NR_sendmmsg
10837      case TARGET_NR_sendmmsg:
10838          return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10839  #endif
10840  #ifdef TARGET_NR_recvmmsg
10841      case TARGET_NR_recvmmsg:
10842          return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10843  #endif
10844  #ifdef TARGET_NR_sendto
10845      case TARGET_NR_sendto:
10846          return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10847  #endif
10848  #ifdef TARGET_NR_shutdown
10849      case TARGET_NR_shutdown:
10850          return get_errno(shutdown(arg1, arg2));
10851  #endif
10852  #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10853      case TARGET_NR_getrandom:
10854          p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10855          if (!p) {
10856              return -TARGET_EFAULT;
10857          }
10858          ret = get_errno(getrandom(p, arg2, arg3));
10859          unlock_user(p, arg1, ret);
10860          return ret;
10861  #endif
10862  #ifdef TARGET_NR_socket
10863      case TARGET_NR_socket:
10864          return do_socket(arg1, arg2, arg3);
10865  #endif
10866  #ifdef TARGET_NR_socketpair
10867      case TARGET_NR_socketpair:
10868          return do_socketpair(arg1, arg2, arg3, arg4);
10869  #endif
10870  #ifdef TARGET_NR_setsockopt
10871      case TARGET_NR_setsockopt:
10872          return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10873  #endif
10874  #if defined(TARGET_NR_syslog)
10875      case TARGET_NR_syslog:
10876          {
10877              int len = arg2;
10878  
10879              switch (arg1) {
10880              case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10881              case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10882              case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10883              case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10884              case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10885              case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10886              case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10887              case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10888                  return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10889              case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10890              case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10891              case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10892                  {
10893                      if (len < 0) {
10894                          return -TARGET_EINVAL;
10895                      }
10896                      if (len == 0) {
10897                          return 0;
10898                      }
10899                      p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10900                      if (!p) {
10901                          return -TARGET_EFAULT;
10902                      }
10903                      ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10904                      unlock_user(p, arg2, arg3);
10905                  }
10906                  return ret;
10907              default:
10908                  return -TARGET_EINVAL;
10909              }
10910          }
10911          break;
10912  #endif
10913      case TARGET_NR_setitimer:
10914          {
10915              struct itimerval value, ovalue, *pvalue;
10916  
10917              if (arg2) {
10918                  pvalue = &value;
10919                  if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10920                      || copy_from_user_timeval(&pvalue->it_value,
10921                                                arg2 + sizeof(struct target_timeval)))
10922                      return -TARGET_EFAULT;
10923              } else {
10924                  pvalue = NULL;
10925              }
10926              ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10927              if (!is_error(ret) && arg3) {
10928                  if (copy_to_user_timeval(arg3,
10929                                           &ovalue.it_interval)
10930                      || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10931                                              &ovalue.it_value))
10932                      return -TARGET_EFAULT;
10933              }
10934          }
10935          return ret;
10936      case TARGET_NR_getitimer:
10937          {
10938              struct itimerval value;
10939  
10940              ret = get_errno(getitimer(arg1, &value));
10941              if (!is_error(ret) && arg2) {
10942                  if (copy_to_user_timeval(arg2,
10943                                           &value.it_interval)
10944                      || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10945                                              &value.it_value))
10946                      return -TARGET_EFAULT;
10947              }
10948          }
10949          return ret;
10950  #ifdef TARGET_NR_stat
10951      case TARGET_NR_stat:
10952          if (!(p = lock_user_string(arg1))) {
10953              return -TARGET_EFAULT;
10954          }
10955          ret = get_errno(stat(path(p), &st));
10956          unlock_user(p, arg1, 0);
10957          goto do_stat;
10958  #endif
10959  #ifdef TARGET_NR_lstat
10960      case TARGET_NR_lstat:
10961          if (!(p = lock_user_string(arg1))) {
10962              return -TARGET_EFAULT;
10963          }
10964          ret = get_errno(lstat(path(p), &st));
10965          unlock_user(p, arg1, 0);
10966          goto do_stat;
10967  #endif
10968  #ifdef TARGET_NR_fstat
10969      case TARGET_NR_fstat:
10970          {
10971              ret = get_errno(fstat(arg1, &st));
10972  #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10973          do_stat:
10974  #endif
10975              if (!is_error(ret)) {
10976                  struct target_stat *target_st;
10977  
10978                  if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10979                      return -TARGET_EFAULT;
10980                  memset(target_st, 0, sizeof(*target_st));
10981                  __put_user(st.st_dev, &target_st->st_dev);
10982                  __put_user(st.st_ino, &target_st->st_ino);
10983                  __put_user(st.st_mode, &target_st->st_mode);
10984                  __put_user(st.st_uid, &target_st->st_uid);
10985                  __put_user(st.st_gid, &target_st->st_gid);
10986                  __put_user(st.st_nlink, &target_st->st_nlink);
10987                  __put_user(st.st_rdev, &target_st->st_rdev);
10988                  __put_user(st.st_size, &target_st->st_size);
10989                  __put_user(st.st_blksize, &target_st->st_blksize);
10990                  __put_user(st.st_blocks, &target_st->st_blocks);
10991                  __put_user(st.st_atime, &target_st->target_st_atime);
10992                  __put_user(st.st_mtime, &target_st->target_st_mtime);
10993                  __put_user(st.st_ctime, &target_st->target_st_ctime);
10994  #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10995                  __put_user(st.st_atim.tv_nsec,
10996                             &target_st->target_st_atime_nsec);
10997                  __put_user(st.st_mtim.tv_nsec,
10998                             &target_st->target_st_mtime_nsec);
10999                  __put_user(st.st_ctim.tv_nsec,
11000                             &target_st->target_st_ctime_nsec);
11001  #endif
11002                  unlock_user_struct(target_st, arg2, 1);
11003              }
11004          }
11005          return ret;
11006  #endif
11007      case TARGET_NR_vhangup:
11008          return get_errno(vhangup());
11009  #ifdef TARGET_NR_syscall
11010      case TARGET_NR_syscall:
11011          return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11012                            arg6, arg7, arg8, 0);
11013  #endif
11014  #if defined(TARGET_NR_wait4)
11015      case TARGET_NR_wait4:
11016          {
11017              int status;
11018              abi_long status_ptr = arg2;
11019              struct rusage rusage, *rusage_ptr;
11020              abi_ulong target_rusage = arg4;
11021              abi_long rusage_err;
11022              if (target_rusage)
11023                  rusage_ptr = &rusage;
11024              else
11025                  rusage_ptr = NULL;
11026              ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11027              if (!is_error(ret)) {
11028                  if (status_ptr && ret) {
11029                      status = host_to_target_waitstatus(status);
11030                      if (put_user_s32(status, status_ptr))
11031                          return -TARGET_EFAULT;
11032                  }
11033                  if (target_rusage) {
11034                      rusage_err = host_to_target_rusage(target_rusage, &rusage);
11035                      if (rusage_err) {
11036                          ret = rusage_err;
11037                      }
11038                  }
11039              }
11040          }
11041          return ret;
11042  #endif
11043  #ifdef TARGET_NR_swapoff
11044      case TARGET_NR_swapoff:
11045          if (!(p = lock_user_string(arg1)))
11046              return -TARGET_EFAULT;
11047          ret = get_errno(swapoff(p));
11048          unlock_user(p, arg1, 0);
11049          return ret;
11050  #endif
11051      case TARGET_NR_sysinfo:
11052          {
11053              struct target_sysinfo *target_value;
11054              struct sysinfo value;
11055              ret = get_errno(sysinfo(&value));
11056              if (!is_error(ret) && arg1)
11057              {
11058                  if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11059                      return -TARGET_EFAULT;
11060                  __put_user(value.uptime, &target_value->uptime);
11061                  __put_user(value.loads[0], &target_value->loads[0]);
11062                  __put_user(value.loads[1], &target_value->loads[1]);
11063                  __put_user(value.loads[2], &target_value->loads[2]);
11064                  __put_user(value.totalram, &target_value->totalram);
11065                  __put_user(value.freeram, &target_value->freeram);
11066                  __put_user(value.sharedram, &target_value->sharedram);
11067                  __put_user(value.bufferram, &target_value->bufferram);
11068                  __put_user(value.totalswap, &target_value->totalswap);
11069                  __put_user(value.freeswap, &target_value->freeswap);
11070                  __put_user(value.procs, &target_value->procs);
11071                  __put_user(value.totalhigh, &target_value->totalhigh);
11072                  __put_user(value.freehigh, &target_value->freehigh);
11073                  __put_user(value.mem_unit, &target_value->mem_unit);
11074                  unlock_user_struct(target_value, arg1, 1);
11075              }
11076          }
11077          return ret;
11078  #ifdef TARGET_NR_ipc
11079      case TARGET_NR_ipc:
11080          return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11081  #endif
11082  #ifdef TARGET_NR_semget
11083      case TARGET_NR_semget:
11084          return get_errno(semget(arg1, arg2, arg3));
11085  #endif
11086  #ifdef TARGET_NR_semop
11087      case TARGET_NR_semop:
11088          return do_semtimedop(arg1, arg2, arg3, 0, false);
11089  #endif
11090  #ifdef TARGET_NR_semtimedop
11091      case TARGET_NR_semtimedop:
11092          return do_semtimedop(arg1, arg2, arg3, arg4, false);
11093  #endif
11094  #ifdef TARGET_NR_semtimedop_time64
11095      case TARGET_NR_semtimedop_time64:
11096          return do_semtimedop(arg1, arg2, arg3, arg4, true);
11097  #endif
11098  #ifdef TARGET_NR_semctl
11099      case TARGET_NR_semctl:
11100          return do_semctl(arg1, arg2, arg3, arg4);
11101  #endif
11102  #ifdef TARGET_NR_msgctl
11103      case TARGET_NR_msgctl:
11104          return do_msgctl(arg1, arg2, arg3);
11105  #endif
11106  #ifdef TARGET_NR_msgget
11107      case TARGET_NR_msgget:
11108          return get_errno(msgget(arg1, arg2));
11109  #endif
11110  #ifdef TARGET_NR_msgrcv
11111      case TARGET_NR_msgrcv:
11112          return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11113  #endif
11114  #ifdef TARGET_NR_msgsnd
11115      case TARGET_NR_msgsnd:
11116          return do_msgsnd(arg1, arg2, arg3, arg4);
11117  #endif
11118  #ifdef TARGET_NR_shmget
11119      case TARGET_NR_shmget:
11120          return get_errno(shmget(arg1, arg2, arg3));
11121  #endif
11122  #ifdef TARGET_NR_shmctl
11123      case TARGET_NR_shmctl:
11124          return do_shmctl(arg1, arg2, arg3);
11125  #endif
11126  #ifdef TARGET_NR_shmat
11127      case TARGET_NR_shmat:
11128          return target_shmat(cpu_env, arg1, arg2, arg3);
11129  #endif
11130  #ifdef TARGET_NR_shmdt
11131      case TARGET_NR_shmdt:
11132          return target_shmdt(arg1);
11133  #endif
11134      case TARGET_NR_fsync:
11135          return get_errno(fsync(arg1));
11136      case TARGET_NR_clone:
11137          /* Linux manages to have three different orderings for its
11138           * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11139           * match the kernel's CONFIG_CLONE_* settings.
11140           * Microblaze is further special in that it uses a sixth
11141           * implicit argument to clone for the TLS pointer.
11142           */
11143  #if defined(TARGET_MICROBLAZE)
11144          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11145  #elif defined(TARGET_CLONE_BACKWARDS)
11146          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11147  #elif defined(TARGET_CLONE_BACKWARDS2)
11148          ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11149  #else
11150          ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11151  #endif
11152          return ret;
11153  #ifdef __NR_exit_group
11154          /* new thread calls */
11155      case TARGET_NR_exit_group:
11156          preexit_cleanup(cpu_env, arg1);
11157          return get_errno(exit_group(arg1));
11158  #endif
11159      case TARGET_NR_setdomainname:
11160          if (!(p = lock_user_string(arg1)))
11161              return -TARGET_EFAULT;
11162          ret = get_errno(setdomainname(p, arg2));
11163          unlock_user(p, arg1, 0);
11164          return ret;
11165      case TARGET_NR_uname:
11166          /* no need to transcode because we use the linux syscall */
11167          {
11168              struct new_utsname * buf;
11169  
11170              if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11171                  return -TARGET_EFAULT;
11172              ret = get_errno(sys_uname(buf));
11173              if (!is_error(ret)) {
11174                  /* Overwrite the native machine name with whatever is being
11175                     emulated. */
11176                  g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11177                            sizeof(buf->machine));
11178                  /* Allow the user to override the reported release.  */
11179                  if (qemu_uname_release && *qemu_uname_release) {
11180                      g_strlcpy(buf->release, qemu_uname_release,
11181                                sizeof(buf->release));
11182                  }
11183              }
11184              unlock_user_struct(buf, arg1, 1);
11185          }
11186          return ret;
11187  #ifdef TARGET_I386
11188      case TARGET_NR_modify_ldt:
11189          return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11190  #if !defined(TARGET_X86_64)
11191      case TARGET_NR_vm86:
11192          return do_vm86(cpu_env, arg1, arg2);
11193  #endif
11194  #endif
11195  #if defined(TARGET_NR_adjtimex)
11196      case TARGET_NR_adjtimex:
11197          {
11198              struct timex host_buf;
11199  
11200              if (target_to_host_timex(&host_buf, arg1) != 0) {
11201                  return -TARGET_EFAULT;
11202              }
11203              ret = get_errno(adjtimex(&host_buf));
11204              if (!is_error(ret)) {
11205                  if (host_to_target_timex(arg1, &host_buf) != 0) {
11206                      return -TARGET_EFAULT;
11207                  }
11208              }
11209          }
11210          return ret;
11211  #endif
11212  #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11213      case TARGET_NR_clock_adjtime:
11214          {
11215              struct timex htx;
11216  
11217              if (target_to_host_timex(&htx, arg2) != 0) {
11218                  return -TARGET_EFAULT;
11219              }
11220              ret = get_errno(clock_adjtime(arg1, &htx));
11221              if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11222                  return -TARGET_EFAULT;
11223              }
11224          }
11225          return ret;
11226  #endif
11227  #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11228      case TARGET_NR_clock_adjtime64:
11229          {
11230              struct timex htx;
11231  
11232              if (target_to_host_timex64(&htx, arg2) != 0) {
11233                  return -TARGET_EFAULT;
11234              }
11235              ret = get_errno(clock_adjtime(arg1, &htx));
11236              if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11237                      return -TARGET_EFAULT;
11238              }
11239          }
11240          return ret;
11241  #endif
11242      case TARGET_NR_getpgid:
11243          return get_errno(getpgid(arg1));
11244      case TARGET_NR_fchdir:
11245          return get_errno(fchdir(arg1));
11246      case TARGET_NR_personality:
11247          return get_errno(personality(arg1));
11248  #ifdef TARGET_NR__llseek /* Not on alpha */
11249      case TARGET_NR__llseek:
11250          {
11251              int64_t res;
11252  #if !defined(__NR_llseek)
11253              res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11254              if (res == -1) {
11255                  ret = get_errno(res);
11256              } else {
11257                  ret = 0;
11258              }
11259  #else
11260              ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11261  #endif
11262              if ((ret == 0) && put_user_s64(res, arg4)) {
11263                  return -TARGET_EFAULT;
11264              }
11265          }
11266          return ret;
11267  #endif
11268  #ifdef TARGET_NR_getdents
11269      case TARGET_NR_getdents:
11270          return do_getdents(arg1, arg2, arg3);
11271  #endif /* TARGET_NR_getdents */
11272  #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11273      case TARGET_NR_getdents64:
11274          return do_getdents64(arg1, arg2, arg3);
11275  #endif /* TARGET_NR_getdents64 */
11276  #if defined(TARGET_NR__newselect)
11277      case TARGET_NR__newselect:
11278          return do_select(arg1, arg2, arg3, arg4, arg5);
11279  #endif
11280  #ifdef TARGET_NR_poll
11281      case TARGET_NR_poll:
11282          return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11283  #endif
11284  #ifdef TARGET_NR_ppoll
11285      case TARGET_NR_ppoll:
11286          return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11287  #endif
11288  #ifdef TARGET_NR_ppoll_time64
11289      case TARGET_NR_ppoll_time64:
11290          return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11291  #endif
11292      case TARGET_NR_flock:
11293          /* NOTE: the flock constant seems to be the same for every
11294             Linux platform */
11295          return get_errno(safe_flock(arg1, arg2));
11296      case TARGET_NR_readv:
11297          {
11298              struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11299              if (vec != NULL) {
11300                  ret = get_errno(safe_readv(arg1, vec, arg3));
11301                  unlock_iovec(vec, arg2, arg3, 1);
11302              } else {
11303                  ret = -host_to_target_errno(errno);
11304              }
11305          }
11306          return ret;
11307      case TARGET_NR_writev:
11308          {
11309              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11310              if (vec != NULL) {
11311                  ret = get_errno(safe_writev(arg1, vec, arg3));
11312                  unlock_iovec(vec, arg2, arg3, 0);
11313              } else {
11314                  ret = -host_to_target_errno(errno);
11315              }
11316          }
11317          return ret;
11318  #if defined(TARGET_NR_preadv)
11319      case TARGET_NR_preadv:
11320          {
11321              struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11322              if (vec != NULL) {
11323                  unsigned long low, high;
11324  
11325                  target_to_host_low_high(arg4, arg5, &low, &high);
11326                  ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11327                  unlock_iovec(vec, arg2, arg3, 1);
11328              } else {
11329                  ret = -host_to_target_errno(errno);
11330             }
11331          }
11332          return ret;
11333  #endif
11334  #if defined(TARGET_NR_pwritev)
11335      case TARGET_NR_pwritev:
11336          {
11337              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11338              if (vec != NULL) {
11339                  unsigned long low, high;
11340  
11341                  target_to_host_low_high(arg4, arg5, &low, &high);
11342                  ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11343                  unlock_iovec(vec, arg2, arg3, 0);
11344              } else {
11345                  ret = -host_to_target_errno(errno);
11346             }
11347          }
11348          return ret;
11349  #endif
11350      case TARGET_NR_getsid:
11351          return get_errno(getsid(arg1));
11352  #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11353      case TARGET_NR_fdatasync:
11354          return get_errno(fdatasync(arg1));
11355  #endif
11356      case TARGET_NR_sched_getaffinity:
11357          {
11358              unsigned int mask_size;
11359              unsigned long *mask;
11360  
11361              /*
11362               * sched_getaffinity needs multiples of ulong, so need to take
11363               * care of mismatches between target ulong and host ulong sizes.
11364               */
11365              if (arg2 & (sizeof(abi_ulong) - 1)) {
11366                  return -TARGET_EINVAL;
11367              }
11368              mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11369  
11370              mask = alloca(mask_size);
11371              memset(mask, 0, mask_size);
11372              ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11373  
11374              if (!is_error(ret)) {
11375                  if (ret > arg2) {
11376                      /* More data returned than the caller's buffer will fit.
11377                       * This only happens if sizeof(abi_long) < sizeof(long)
11378                       * and the caller passed us a buffer holding an odd number
11379                       * of abi_longs. If the host kernel is actually using the
11380                       * extra 4 bytes then fail EINVAL; otherwise we can just
11381                       * ignore them and only copy the interesting part.
11382                       */
11383                      int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11384                      if (numcpus > arg2 * 8) {
11385                          return -TARGET_EINVAL;
11386                      }
11387                      ret = arg2;
11388                  }
11389  
11390                  if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11391                      return -TARGET_EFAULT;
11392                  }
11393              }
11394          }
11395          return ret;
11396      case TARGET_NR_sched_setaffinity:
11397          {
11398              unsigned int mask_size;
11399              unsigned long *mask;
11400  
11401              /*
11402               * sched_setaffinity needs multiples of ulong, so need to take
11403               * care of mismatches between target ulong and host ulong sizes.
11404               */
11405              if (arg2 & (sizeof(abi_ulong) - 1)) {
11406                  return -TARGET_EINVAL;
11407              }
11408              mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11409              mask = alloca(mask_size);
11410  
11411              ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11412              if (ret) {
11413                  return ret;
11414              }
11415  
11416              return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11417          }
11418      case TARGET_NR_getcpu:
11419          {
11420              unsigned cpuid, node;
11421              ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11422                                         arg2 ? &node : NULL,
11423                                         NULL));
11424              if (is_error(ret)) {
11425                  return ret;
11426              }
11427              if (arg1 && put_user_u32(cpuid, arg1)) {
11428                  return -TARGET_EFAULT;
11429              }
11430              if (arg2 && put_user_u32(node, arg2)) {
11431                  return -TARGET_EFAULT;
11432              }
11433          }
11434          return ret;
11435      case TARGET_NR_sched_setparam:
11436          {
11437              struct target_sched_param *target_schp;
11438              struct sched_param schp;
11439  
11440              if (arg2 == 0) {
11441                  return -TARGET_EINVAL;
11442              }
11443              if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11444                  return -TARGET_EFAULT;
11445              }
11446              schp.sched_priority = tswap32(target_schp->sched_priority);
11447              unlock_user_struct(target_schp, arg2, 0);
11448              return get_errno(sys_sched_setparam(arg1, &schp));
11449          }
11450      case TARGET_NR_sched_getparam:
11451          {
11452              struct target_sched_param *target_schp;
11453              struct sched_param schp;
11454  
11455              if (arg2 == 0) {
11456                  return -TARGET_EINVAL;
11457              }
11458              ret = get_errno(sys_sched_getparam(arg1, &schp));
11459              if (!is_error(ret)) {
11460                  if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11461                      return -TARGET_EFAULT;
11462                  }
11463                  target_schp->sched_priority = tswap32(schp.sched_priority);
11464                  unlock_user_struct(target_schp, arg2, 1);
11465              }
11466          }
11467          return ret;
11468      case TARGET_NR_sched_setscheduler:
11469          {
11470              struct target_sched_param *target_schp;
11471              struct sched_param schp;
11472              if (arg3 == 0) {
11473                  return -TARGET_EINVAL;
11474              }
11475              if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11476                  return -TARGET_EFAULT;
11477              }
11478              schp.sched_priority = tswap32(target_schp->sched_priority);
11479              unlock_user_struct(target_schp, arg3, 0);
11480              return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11481          }
11482      case TARGET_NR_sched_getscheduler:
11483          return get_errno(sys_sched_getscheduler(arg1));
11484      case TARGET_NR_sched_getattr:
11485          {
11486              struct target_sched_attr *target_scha;
11487              struct sched_attr scha;
11488              if (arg2 == 0) {
11489                  return -TARGET_EINVAL;
11490              }
11491              if (arg3 > sizeof(scha)) {
11492                  arg3 = sizeof(scha);
11493              }
11494              ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11495              if (!is_error(ret)) {
11496                  target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11497                  if (!target_scha) {
11498                      return -TARGET_EFAULT;
11499                  }
11500                  target_scha->size = tswap32(scha.size);
11501                  target_scha->sched_policy = tswap32(scha.sched_policy);
11502                  target_scha->sched_flags = tswap64(scha.sched_flags);
11503                  target_scha->sched_nice = tswap32(scha.sched_nice);
11504                  target_scha->sched_priority = tswap32(scha.sched_priority);
11505                  target_scha->sched_runtime = tswap64(scha.sched_runtime);
11506                  target_scha->sched_deadline = tswap64(scha.sched_deadline);
11507                  target_scha->sched_period = tswap64(scha.sched_period);
11508                  if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11509                      target_scha->sched_util_min = tswap32(scha.sched_util_min);
11510                      target_scha->sched_util_max = tswap32(scha.sched_util_max);
11511                  }
11512                  unlock_user(target_scha, arg2, arg3);
11513              }
11514              return ret;
11515          }
11516      case TARGET_NR_sched_setattr:
11517          {
11518              struct target_sched_attr *target_scha;
11519              struct sched_attr scha;
11520              uint32_t size;
11521              int zeroed;
11522              if (arg2 == 0) {
11523                  return -TARGET_EINVAL;
11524              }
11525              if (get_user_u32(size, arg2)) {
11526                  return -TARGET_EFAULT;
11527              }
11528              if (!size) {
11529                  size = offsetof(struct target_sched_attr, sched_util_min);
11530              }
11531              if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11532                  if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11533                      return -TARGET_EFAULT;
11534                  }
11535                  return -TARGET_E2BIG;
11536              }
11537  
11538              zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11539              if (zeroed < 0) {
11540                  return zeroed;
11541              } else if (zeroed == 0) {
11542                  if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11543                      return -TARGET_EFAULT;
11544                  }
11545                  return -TARGET_E2BIG;
11546              }
11547              if (size > sizeof(struct target_sched_attr)) {
11548                  size = sizeof(struct target_sched_attr);
11549              }
11550  
11551              target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11552              if (!target_scha) {
11553                  return -TARGET_EFAULT;
11554              }
11555              scha.size = size;
11556              scha.sched_policy = tswap32(target_scha->sched_policy);
11557              scha.sched_flags = tswap64(target_scha->sched_flags);
11558              scha.sched_nice = tswap32(target_scha->sched_nice);
11559              scha.sched_priority = tswap32(target_scha->sched_priority);
11560              scha.sched_runtime = tswap64(target_scha->sched_runtime);
11561              scha.sched_deadline = tswap64(target_scha->sched_deadline);
11562              scha.sched_period = tswap64(target_scha->sched_period);
11563              if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11564                  scha.sched_util_min = tswap32(target_scha->sched_util_min);
11565                  scha.sched_util_max = tswap32(target_scha->sched_util_max);
11566              }
11567              unlock_user(target_scha, arg2, 0);
11568              return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11569          }
11570      case TARGET_NR_sched_yield:
11571          return get_errno(sched_yield());
11572      case TARGET_NR_sched_get_priority_max:
11573          return get_errno(sched_get_priority_max(arg1));
11574      case TARGET_NR_sched_get_priority_min:
11575          return get_errno(sched_get_priority_min(arg1));
11576  #ifdef TARGET_NR_sched_rr_get_interval
11577      case TARGET_NR_sched_rr_get_interval:
11578          {
11579              struct timespec ts;
11580              ret = get_errno(sched_rr_get_interval(arg1, &ts));
11581              if (!is_error(ret)) {
11582                  ret = host_to_target_timespec(arg2, &ts);
11583              }
11584          }
11585          return ret;
11586  #endif
11587  #ifdef TARGET_NR_sched_rr_get_interval_time64
11588      case TARGET_NR_sched_rr_get_interval_time64:
11589          {
11590              struct timespec ts;
11591              ret = get_errno(sched_rr_get_interval(arg1, &ts));
11592              if (!is_error(ret)) {
11593                  ret = host_to_target_timespec64(arg2, &ts);
11594              }
11595          }
11596          return ret;
11597  #endif
11598  #if defined(TARGET_NR_nanosleep)
11599      case TARGET_NR_nanosleep:
11600          {
11601              struct timespec req, rem;
11602              target_to_host_timespec(&req, arg1);
11603              ret = get_errno(safe_nanosleep(&req, &rem));
11604              if (is_error(ret) && arg2) {
11605                  host_to_target_timespec(arg2, &rem);
11606              }
11607          }
11608          return ret;
11609  #endif
11610      case TARGET_NR_prctl:
11611          return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11612          break;
11613  #ifdef TARGET_NR_arch_prctl
11614      case TARGET_NR_arch_prctl:
11615          return do_arch_prctl(cpu_env, arg1, arg2);
11616  #endif
11617  #ifdef TARGET_NR_pread64
11618      case TARGET_NR_pread64:
11619          if (regpairs_aligned(cpu_env, num)) {
11620              arg4 = arg5;
11621              arg5 = arg6;
11622          }
11623          if (arg2 == 0 && arg3 == 0) {
11624              /* Special-case NULL buffer and zero length, which should succeed */
11625              p = 0;
11626          } else {
11627              p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11628              if (!p) {
11629                  return -TARGET_EFAULT;
11630              }
11631          }
11632          ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11633          unlock_user(p, arg2, ret);
11634          return ret;
11635      case TARGET_NR_pwrite64:
11636          if (regpairs_aligned(cpu_env, num)) {
11637              arg4 = arg5;
11638              arg5 = arg6;
11639          }
11640          if (arg2 == 0 && arg3 == 0) {
11641              /* Special-case NULL buffer and zero length, which should succeed */
11642              p = 0;
11643          } else {
11644              p = lock_user(VERIFY_READ, arg2, arg3, 1);
11645              if (!p) {
11646                  return -TARGET_EFAULT;
11647              }
11648          }
11649          ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11650          unlock_user(p, arg2, 0);
11651          return ret;
11652  #endif
11653      case TARGET_NR_getcwd:
11654          if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11655              return -TARGET_EFAULT;
11656          ret = get_errno(sys_getcwd1(p, arg2));
11657          unlock_user(p, arg1, ret);
11658          return ret;
11659      case TARGET_NR_capget:
11660      case TARGET_NR_capset:
11661      {
11662          struct target_user_cap_header *target_header;
11663          struct target_user_cap_data *target_data = NULL;
11664          struct __user_cap_header_struct header;
11665          struct __user_cap_data_struct data[2];
11666          struct __user_cap_data_struct *dataptr = NULL;
11667          int i, target_datalen;
11668          int data_items = 1;
11669  
11670          if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11671              return -TARGET_EFAULT;
11672          }
11673          header.version = tswap32(target_header->version);
11674          header.pid = tswap32(target_header->pid);
11675  
11676          if (header.version != _LINUX_CAPABILITY_VERSION) {
11677              /* Version 2 and up takes pointer to two user_data structs */
11678              data_items = 2;
11679          }
11680  
11681          target_datalen = sizeof(*target_data) * data_items;
11682  
11683          if (arg2) {
11684              if (num == TARGET_NR_capget) {
11685                  target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11686              } else {
11687                  target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11688              }
11689              if (!target_data) {
11690                  unlock_user_struct(target_header, arg1, 0);
11691                  return -TARGET_EFAULT;
11692              }
11693  
11694              if (num == TARGET_NR_capset) {
11695                  for (i = 0; i < data_items; i++) {
11696                      data[i].effective = tswap32(target_data[i].effective);
11697                      data[i].permitted = tswap32(target_data[i].permitted);
11698                      data[i].inheritable = tswap32(target_data[i].inheritable);
11699                  }
11700              }
11701  
11702              dataptr = data;
11703          }
11704  
11705          if (num == TARGET_NR_capget) {
11706              ret = get_errno(capget(&header, dataptr));
11707          } else {
11708              ret = get_errno(capset(&header, dataptr));
11709          }
11710  
11711          /* The kernel always updates version for both capget and capset */
11712          target_header->version = tswap32(header.version);
11713          unlock_user_struct(target_header, arg1, 1);
11714  
11715          if (arg2) {
11716              if (num == TARGET_NR_capget) {
11717                  for (i = 0; i < data_items; i++) {
11718                      target_data[i].effective = tswap32(data[i].effective);
11719                      target_data[i].permitted = tswap32(data[i].permitted);
11720                      target_data[i].inheritable = tswap32(data[i].inheritable);
11721                  }
11722                  unlock_user(target_data, arg2, target_datalen);
11723              } else {
11724                  unlock_user(target_data, arg2, 0);
11725              }
11726          }
11727          return ret;
11728      }
11729      case TARGET_NR_sigaltstack:
11730          return do_sigaltstack(arg1, arg2, cpu_env);
11731  
11732  #ifdef CONFIG_SENDFILE
11733  #ifdef TARGET_NR_sendfile
11734      case TARGET_NR_sendfile:
11735      {
11736          off_t *offp = NULL;
11737          off_t off;
11738          if (arg3) {
11739              ret = get_user_sal(off, arg3);
11740              if (is_error(ret)) {
11741                  return ret;
11742              }
11743              offp = &off;
11744          }
11745          ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11746          if (!is_error(ret) && arg3) {
11747              abi_long ret2 = put_user_sal(off, arg3);
11748              if (is_error(ret2)) {
11749                  ret = ret2;
11750              }
11751          }
11752          return ret;
11753      }
11754  #endif
11755  #ifdef TARGET_NR_sendfile64
11756      case TARGET_NR_sendfile64:
11757      {
11758          off_t *offp = NULL;
11759          off_t off;
11760          if (arg3) {
11761              ret = get_user_s64(off, arg3);
11762              if (is_error(ret)) {
11763                  return ret;
11764              }
11765              offp = &off;
11766          }
11767          ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11768          if (!is_error(ret) && arg3) {
11769              abi_long ret2 = put_user_s64(off, arg3);
11770              if (is_error(ret2)) {
11771                  ret = ret2;
11772              }
11773          }
11774          return ret;
11775      }
11776  #endif
11777  #endif
11778  #ifdef TARGET_NR_vfork
11779      case TARGET_NR_vfork:
11780          return get_errno(do_fork(cpu_env,
11781                           CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11782                           0, 0, 0, 0));
11783  #endif
11784  #ifdef TARGET_NR_ugetrlimit
11785      case TARGET_NR_ugetrlimit:
11786      {
11787  	struct rlimit rlim;
11788  	int resource = target_to_host_resource(arg1);
11789  	ret = get_errno(getrlimit(resource, &rlim));
11790  	if (!is_error(ret)) {
11791  	    struct target_rlimit *target_rlim;
11792              if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11793                  return -TARGET_EFAULT;
11794  	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11795  	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11796              unlock_user_struct(target_rlim, arg2, 1);
11797  	}
11798          return ret;
11799      }
11800  #endif
11801  #ifdef TARGET_NR_truncate64
11802      case TARGET_NR_truncate64:
11803          if (!(p = lock_user_string(arg1)))
11804              return -TARGET_EFAULT;
11805  	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11806          unlock_user(p, arg1, 0);
11807          return ret;
11808  #endif
11809  #ifdef TARGET_NR_ftruncate64
11810      case TARGET_NR_ftruncate64:
11811          return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11812  #endif
11813  #ifdef TARGET_NR_stat64
11814      case TARGET_NR_stat64:
11815          if (!(p = lock_user_string(arg1))) {
11816              return -TARGET_EFAULT;
11817          }
11818          ret = get_errno(stat(path(p), &st));
11819          unlock_user(p, arg1, 0);
11820          if (!is_error(ret))
11821              ret = host_to_target_stat64(cpu_env, arg2, &st);
11822          return ret;
11823  #endif
11824  #ifdef TARGET_NR_lstat64
11825      case TARGET_NR_lstat64:
11826          if (!(p = lock_user_string(arg1))) {
11827              return -TARGET_EFAULT;
11828          }
11829          ret = get_errno(lstat(path(p), &st));
11830          unlock_user(p, arg1, 0);
11831          if (!is_error(ret))
11832              ret = host_to_target_stat64(cpu_env, arg2, &st);
11833          return ret;
11834  #endif
11835  #ifdef TARGET_NR_fstat64
11836      case TARGET_NR_fstat64:
11837          ret = get_errno(fstat(arg1, &st));
11838          if (!is_error(ret))
11839              ret = host_to_target_stat64(cpu_env, arg2, &st);
11840          return ret;
11841  #endif
11842  #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11843  #ifdef TARGET_NR_fstatat64
11844      case TARGET_NR_fstatat64:
11845  #endif
11846  #ifdef TARGET_NR_newfstatat
11847      case TARGET_NR_newfstatat:
11848  #endif
11849          if (!(p = lock_user_string(arg2))) {
11850              return -TARGET_EFAULT;
11851          }
11852          ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11853          unlock_user(p, arg2, 0);
11854          if (!is_error(ret))
11855              ret = host_to_target_stat64(cpu_env, arg3, &st);
11856          return ret;
11857  #endif
11858  #if defined(TARGET_NR_statx)
11859      case TARGET_NR_statx:
11860          {
11861              struct target_statx *target_stx;
11862              int dirfd = arg1;
11863              int flags = arg3;
11864  
11865              p = lock_user_string(arg2);
11866              if (p == NULL) {
11867                  return -TARGET_EFAULT;
11868              }
11869  #if defined(__NR_statx)
11870              {
11871                  /*
11872                   * It is assumed that struct statx is architecture independent.
11873                   */
11874                  struct target_statx host_stx;
11875                  int mask = arg4;
11876  
11877                  ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11878                  if (!is_error(ret)) {
11879                      if (host_to_target_statx(&host_stx, arg5) != 0) {
11880                          unlock_user(p, arg2, 0);
11881                          return -TARGET_EFAULT;
11882                      }
11883                  }
11884  
11885                  if (ret != -TARGET_ENOSYS) {
11886                      unlock_user(p, arg2, 0);
11887                      return ret;
11888                  }
11889              }
11890  #endif
11891              ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11892              unlock_user(p, arg2, 0);
11893  
11894              if (!is_error(ret)) {
11895                  if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11896                      return -TARGET_EFAULT;
11897                  }
11898                  memset(target_stx, 0, sizeof(*target_stx));
11899                  __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11900                  __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11901                  __put_user(st.st_ino, &target_stx->stx_ino);
11902                  __put_user(st.st_mode, &target_stx->stx_mode);
11903                  __put_user(st.st_uid, &target_stx->stx_uid);
11904                  __put_user(st.st_gid, &target_stx->stx_gid);
11905                  __put_user(st.st_nlink, &target_stx->stx_nlink);
11906                  __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11907                  __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11908                  __put_user(st.st_size, &target_stx->stx_size);
11909                  __put_user(st.st_blksize, &target_stx->stx_blksize);
11910                  __put_user(st.st_blocks, &target_stx->stx_blocks);
11911                  __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11912                  __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11913                  __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11914                  unlock_user_struct(target_stx, arg5, 1);
11915              }
11916          }
11917          return ret;
11918  #endif
11919  #ifdef TARGET_NR_lchown
11920      case TARGET_NR_lchown:
11921          if (!(p = lock_user_string(arg1)))
11922              return -TARGET_EFAULT;
11923          ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11924          unlock_user(p, arg1, 0);
11925          return ret;
11926  #endif
11927  #ifdef TARGET_NR_getuid
11928      case TARGET_NR_getuid:
11929          return get_errno(high2lowuid(getuid()));
11930  #endif
11931  #ifdef TARGET_NR_getgid
11932      case TARGET_NR_getgid:
11933          return get_errno(high2lowgid(getgid()));
11934  #endif
11935  #ifdef TARGET_NR_geteuid
11936      case TARGET_NR_geteuid:
11937          return get_errno(high2lowuid(geteuid()));
11938  #endif
11939  #ifdef TARGET_NR_getegid
11940      case TARGET_NR_getegid:
11941          return get_errno(high2lowgid(getegid()));
11942  #endif
11943      case TARGET_NR_setreuid:
11944          return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11945      case TARGET_NR_setregid:
11946          return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11947      case TARGET_NR_getgroups:
11948          { /* the same code as for TARGET_NR_getgroups32 */
11949              int gidsetsize = arg1;
11950              target_id *target_grouplist;
11951              g_autofree gid_t *grouplist = NULL;
11952              int i;
11953  
11954              if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11955                  return -TARGET_EINVAL;
11956              }
11957              if (gidsetsize > 0) {
11958                  grouplist = g_try_new(gid_t, gidsetsize);
11959                  if (!grouplist) {
11960                      return -TARGET_ENOMEM;
11961                  }
11962              }
11963              ret = get_errno(getgroups(gidsetsize, grouplist));
11964              if (!is_error(ret) && gidsetsize > 0) {
11965                  target_grouplist = lock_user(VERIFY_WRITE, arg2,
11966                                               gidsetsize * sizeof(target_id), 0);
11967                  if (!target_grouplist) {
11968                      return -TARGET_EFAULT;
11969                  }
11970                  for (i = 0; i < ret; i++) {
11971                      target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11972                  }
11973                  unlock_user(target_grouplist, arg2,
11974                              gidsetsize * sizeof(target_id));
11975              }
11976              return ret;
11977          }
11978      case TARGET_NR_setgroups:
11979          { /* the same code as for TARGET_NR_setgroups32 */
11980              int gidsetsize = arg1;
11981              target_id *target_grouplist;
11982              g_autofree gid_t *grouplist = NULL;
11983              int i;
11984  
11985              if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11986                  return -TARGET_EINVAL;
11987              }
11988              if (gidsetsize > 0) {
11989                  grouplist = g_try_new(gid_t, gidsetsize);
11990                  if (!grouplist) {
11991                      return -TARGET_ENOMEM;
11992                  }
11993                  target_grouplist = lock_user(VERIFY_READ, arg2,
11994                                               gidsetsize * sizeof(target_id), 1);
11995                  if (!target_grouplist) {
11996                      return -TARGET_EFAULT;
11997                  }
11998                  for (i = 0; i < gidsetsize; i++) {
11999                      grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12000                  }
12001                  unlock_user(target_grouplist, arg2,
12002                              gidsetsize * sizeof(target_id));
12003              }
12004              return get_errno(sys_setgroups(gidsetsize, grouplist));
12005          }
12006      case TARGET_NR_fchown:
12007          return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12008  #if defined(TARGET_NR_fchownat)
12009      case TARGET_NR_fchownat:
12010          if (!(p = lock_user_string(arg2)))
12011              return -TARGET_EFAULT;
12012          ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12013                                   low2highgid(arg4), arg5));
12014          unlock_user(p, arg2, 0);
12015          return ret;
12016  #endif
12017  #ifdef TARGET_NR_setresuid
12018      case TARGET_NR_setresuid:
12019          return get_errno(sys_setresuid(low2highuid(arg1),
12020                                         low2highuid(arg2),
12021                                         low2highuid(arg3)));
12022  #endif
12023  #ifdef TARGET_NR_getresuid
12024      case TARGET_NR_getresuid:
12025          {
12026              uid_t ruid, euid, suid;
12027              ret = get_errno(getresuid(&ruid, &euid, &suid));
12028              if (!is_error(ret)) {
12029                  if (put_user_id(high2lowuid(ruid), arg1)
12030                      || put_user_id(high2lowuid(euid), arg2)
12031                      || put_user_id(high2lowuid(suid), arg3))
12032                      return -TARGET_EFAULT;
12033              }
12034          }
12035          return ret;
12036  #endif
12037  #ifdef TARGET_NR_getresgid
12038      case TARGET_NR_setresgid:
12039          return get_errno(sys_setresgid(low2highgid(arg1),
12040                                         low2highgid(arg2),
12041                                         low2highgid(arg3)));
12042  #endif
12043  #ifdef TARGET_NR_getresgid
12044      case TARGET_NR_getresgid:
12045          {
12046              gid_t rgid, egid, sgid;
12047              ret = get_errno(getresgid(&rgid, &egid, &sgid));
12048              if (!is_error(ret)) {
12049                  if (put_user_id(high2lowgid(rgid), arg1)
12050                      || put_user_id(high2lowgid(egid), arg2)
12051                      || put_user_id(high2lowgid(sgid), arg3))
12052                      return -TARGET_EFAULT;
12053              }
12054          }
12055          return ret;
12056  #endif
12057  #ifdef TARGET_NR_chown
12058      case TARGET_NR_chown:
12059          if (!(p = lock_user_string(arg1)))
12060              return -TARGET_EFAULT;
12061          ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12062          unlock_user(p, arg1, 0);
12063          return ret;
12064  #endif
12065      case TARGET_NR_setuid:
12066          return get_errno(sys_setuid(low2highuid(arg1)));
12067      case TARGET_NR_setgid:
12068          return get_errno(sys_setgid(low2highgid(arg1)));
12069      case TARGET_NR_setfsuid:
12070          return get_errno(setfsuid(arg1));
12071      case TARGET_NR_setfsgid:
12072          return get_errno(setfsgid(arg1));
12073  
12074  #ifdef TARGET_NR_lchown32
12075      case TARGET_NR_lchown32:
12076          if (!(p = lock_user_string(arg1)))
12077              return -TARGET_EFAULT;
12078          ret = get_errno(lchown(p, arg2, arg3));
12079          unlock_user(p, arg1, 0);
12080          return ret;
12081  #endif
12082  #ifdef TARGET_NR_getuid32
12083      case TARGET_NR_getuid32:
12084          return get_errno(getuid());
12085  #endif
12086  
12087  #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12088     /* Alpha specific */
12089      case TARGET_NR_getxuid:
12090           {
12091              uid_t euid;
12092              euid=geteuid();
12093              cpu_env->ir[IR_A4]=euid;
12094           }
12095          return get_errno(getuid());
12096  #endif
12097  #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12098     /* Alpha specific */
12099      case TARGET_NR_getxgid:
12100           {
12101              uid_t egid;
12102              egid=getegid();
12103              cpu_env->ir[IR_A4]=egid;
12104           }
12105          return get_errno(getgid());
12106  #endif
12107  #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12108      /* Alpha specific */
12109      case TARGET_NR_osf_getsysinfo:
12110          ret = -TARGET_EOPNOTSUPP;
12111          switch (arg1) {
12112            case TARGET_GSI_IEEE_FP_CONTROL:
12113              {
12114                  uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12115                  uint64_t swcr = cpu_env->swcr;
12116  
12117                  swcr &= ~SWCR_STATUS_MASK;
12118                  swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12119  
12120                  if (put_user_u64 (swcr, arg2))
12121                          return -TARGET_EFAULT;
12122                  ret = 0;
12123              }
12124              break;
12125  
12126            /* case GSI_IEEE_STATE_AT_SIGNAL:
12127               -- Not implemented in linux kernel.
12128               case GSI_UACPROC:
12129               -- Retrieves current unaligned access state; not much used.
12130               case GSI_PROC_TYPE:
12131               -- Retrieves implver information; surely not used.
12132               case GSI_GET_HWRPB:
12133               -- Grabs a copy of the HWRPB; surely not used.
12134            */
12135          }
12136          return ret;
12137  #endif
12138  #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12139      /* Alpha specific */
12140      case TARGET_NR_osf_setsysinfo:
12141          ret = -TARGET_EOPNOTSUPP;
12142          switch (arg1) {
12143            case TARGET_SSI_IEEE_FP_CONTROL:
12144              {
12145                  uint64_t swcr, fpcr;
12146  
12147                  if (get_user_u64 (swcr, arg2)) {
12148                      return -TARGET_EFAULT;
12149                  }
12150  
12151                  /*
12152                   * The kernel calls swcr_update_status to update the
12153                   * status bits from the fpcr at every point that it
12154                   * could be queried.  Therefore, we store the status
12155                   * bits only in FPCR.
12156                   */
12157                  cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12158  
12159                  fpcr = cpu_alpha_load_fpcr(cpu_env);
12160                  fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12161                  fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12162                  cpu_alpha_store_fpcr(cpu_env, fpcr);
12163                  ret = 0;
12164              }
12165              break;
12166  
12167            case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12168              {
12169                  uint64_t exc, fpcr, fex;
12170  
12171                  if (get_user_u64(exc, arg2)) {
12172                      return -TARGET_EFAULT;
12173                  }
12174                  exc &= SWCR_STATUS_MASK;
12175                  fpcr = cpu_alpha_load_fpcr(cpu_env);
12176  
12177                  /* Old exceptions are not signaled.  */
12178                  fex = alpha_ieee_fpcr_to_swcr(fpcr);
12179                  fex = exc & ~fex;
12180                  fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12181                  fex &= (cpu_env)->swcr;
12182  
12183                  /* Update the hardware fpcr.  */
12184                  fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12185                  cpu_alpha_store_fpcr(cpu_env, fpcr);
12186  
12187                  if (fex) {
12188                      int si_code = TARGET_FPE_FLTUNK;
12189                      target_siginfo_t info;
12190  
12191                      if (fex & SWCR_TRAP_ENABLE_DNO) {
12192                          si_code = TARGET_FPE_FLTUND;
12193                      }
12194                      if (fex & SWCR_TRAP_ENABLE_INE) {
12195                          si_code = TARGET_FPE_FLTRES;
12196                      }
12197                      if (fex & SWCR_TRAP_ENABLE_UNF) {
12198                          si_code = TARGET_FPE_FLTUND;
12199                      }
12200                      if (fex & SWCR_TRAP_ENABLE_OVF) {
12201                          si_code = TARGET_FPE_FLTOVF;
12202                      }
12203                      if (fex & SWCR_TRAP_ENABLE_DZE) {
12204                          si_code = TARGET_FPE_FLTDIV;
12205                      }
12206                      if (fex & SWCR_TRAP_ENABLE_INV) {
12207                          si_code = TARGET_FPE_FLTINV;
12208                      }
12209  
12210                      info.si_signo = SIGFPE;
12211                      info.si_errno = 0;
12212                      info.si_code = si_code;
12213                      info._sifields._sigfault._addr = (cpu_env)->pc;
12214                      queue_signal(cpu_env, info.si_signo,
12215                                   QEMU_SI_FAULT, &info);
12216                  }
12217                  ret = 0;
12218              }
12219              break;
12220  
12221            /* case SSI_NVPAIRS:
12222               -- Used with SSIN_UACPROC to enable unaligned accesses.
12223               case SSI_IEEE_STATE_AT_SIGNAL:
12224               case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12225               -- Not implemented in linux kernel
12226            */
12227          }
12228          return ret;
12229  #endif
12230  #ifdef TARGET_NR_osf_sigprocmask
12231      /* Alpha specific.  */
12232      case TARGET_NR_osf_sigprocmask:
12233          {
12234              abi_ulong mask;
12235              int how;
12236              sigset_t set, oldset;
12237  
12238              switch(arg1) {
12239              case TARGET_SIG_BLOCK:
12240                  how = SIG_BLOCK;
12241                  break;
12242              case TARGET_SIG_UNBLOCK:
12243                  how = SIG_UNBLOCK;
12244                  break;
12245              case TARGET_SIG_SETMASK:
12246                  how = SIG_SETMASK;
12247                  break;
12248              default:
12249                  return -TARGET_EINVAL;
12250              }
12251              mask = arg2;
12252              target_to_host_old_sigset(&set, &mask);
12253              ret = do_sigprocmask(how, &set, &oldset);
12254              if (!ret) {
12255                  host_to_target_old_sigset(&mask, &oldset);
12256                  ret = mask;
12257              }
12258          }
12259          return ret;
12260  #endif
12261  
12262  #ifdef TARGET_NR_getgid32
12263      case TARGET_NR_getgid32:
12264          return get_errno(getgid());
12265  #endif
12266  #ifdef TARGET_NR_geteuid32
12267      case TARGET_NR_geteuid32:
12268          return get_errno(geteuid());
12269  #endif
12270  #ifdef TARGET_NR_getegid32
12271      case TARGET_NR_getegid32:
12272          return get_errno(getegid());
12273  #endif
12274  #ifdef TARGET_NR_setreuid32
12275      case TARGET_NR_setreuid32:
12276          return get_errno(sys_setreuid(arg1, arg2));
12277  #endif
12278  #ifdef TARGET_NR_setregid32
12279      case TARGET_NR_setregid32:
12280          return get_errno(sys_setregid(arg1, arg2));
12281  #endif
12282  #ifdef TARGET_NR_getgroups32
12283      case TARGET_NR_getgroups32:
12284          { /* the same code as for TARGET_NR_getgroups */
12285              int gidsetsize = arg1;
12286              uint32_t *target_grouplist;
12287              g_autofree gid_t *grouplist = NULL;
12288              int i;
12289  
12290              if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12291                  return -TARGET_EINVAL;
12292              }
12293              if (gidsetsize > 0) {
12294                  grouplist = g_try_new(gid_t, gidsetsize);
12295                  if (!grouplist) {
12296                      return -TARGET_ENOMEM;
12297                  }
12298              }
12299              ret = get_errno(getgroups(gidsetsize, grouplist));
12300              if (!is_error(ret) && gidsetsize > 0) {
12301                  target_grouplist = lock_user(VERIFY_WRITE, arg2,
12302                                               gidsetsize * 4, 0);
12303                  if (!target_grouplist) {
12304                      return -TARGET_EFAULT;
12305                  }
12306                  for (i = 0; i < ret; i++) {
12307                      target_grouplist[i] = tswap32(grouplist[i]);
12308                  }
12309                  unlock_user(target_grouplist, arg2, gidsetsize * 4);
12310              }
12311              return ret;
12312          }
12313  #endif
12314  #ifdef TARGET_NR_setgroups32
12315      case TARGET_NR_setgroups32:
12316          { /* the same code as for TARGET_NR_setgroups */
12317              int gidsetsize = arg1;
12318              uint32_t *target_grouplist;
12319              g_autofree gid_t *grouplist = NULL;
12320              int i;
12321  
12322              if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12323                  return -TARGET_EINVAL;
12324              }
12325              if (gidsetsize > 0) {
12326                  grouplist = g_try_new(gid_t, gidsetsize);
12327                  if (!grouplist) {
12328                      return -TARGET_ENOMEM;
12329                  }
12330                  target_grouplist = lock_user(VERIFY_READ, arg2,
12331                                               gidsetsize * 4, 1);
12332                  if (!target_grouplist) {
12333                      return -TARGET_EFAULT;
12334                  }
12335                  for (i = 0; i < gidsetsize; i++) {
12336                      grouplist[i] = tswap32(target_grouplist[i]);
12337                  }
12338                  unlock_user(target_grouplist, arg2, 0);
12339              }
12340              return get_errno(sys_setgroups(gidsetsize, grouplist));
12341          }
12342  #endif
12343  #ifdef TARGET_NR_fchown32
12344      case TARGET_NR_fchown32:
12345          return get_errno(fchown(arg1, arg2, arg3));
12346  #endif
12347  #ifdef TARGET_NR_setresuid32
12348      case TARGET_NR_setresuid32:
12349          return get_errno(sys_setresuid(arg1, arg2, arg3));
12350  #endif
12351  #ifdef TARGET_NR_getresuid32
12352      case TARGET_NR_getresuid32:
12353          {
12354              uid_t ruid, euid, suid;
12355              ret = get_errno(getresuid(&ruid, &euid, &suid));
12356              if (!is_error(ret)) {
12357                  if (put_user_u32(ruid, arg1)
12358                      || put_user_u32(euid, arg2)
12359                      || put_user_u32(suid, arg3))
12360                      return -TARGET_EFAULT;
12361              }
12362          }
12363          return ret;
12364  #endif
12365  #ifdef TARGET_NR_setresgid32
12366      case TARGET_NR_setresgid32:
12367          return get_errno(sys_setresgid(arg1, arg2, arg3));
12368  #endif
12369  #ifdef TARGET_NR_getresgid32
12370      case TARGET_NR_getresgid32:
12371          {
12372              gid_t rgid, egid, sgid;
12373              ret = get_errno(getresgid(&rgid, &egid, &sgid));
12374              if (!is_error(ret)) {
12375                  if (put_user_u32(rgid, arg1)
12376                      || put_user_u32(egid, arg2)
12377                      || put_user_u32(sgid, arg3))
12378                      return -TARGET_EFAULT;
12379              }
12380          }
12381          return ret;
12382  #endif
12383  #ifdef TARGET_NR_chown32
12384      case TARGET_NR_chown32:
12385          if (!(p = lock_user_string(arg1)))
12386              return -TARGET_EFAULT;
12387          ret = get_errno(chown(p, arg2, arg3));
12388          unlock_user(p, arg1, 0);
12389          return ret;
12390  #endif
12391  #ifdef TARGET_NR_setuid32
12392      case TARGET_NR_setuid32:
12393          return get_errno(sys_setuid(arg1));
12394  #endif
12395  #ifdef TARGET_NR_setgid32
12396      case TARGET_NR_setgid32:
12397          return get_errno(sys_setgid(arg1));
12398  #endif
12399  #ifdef TARGET_NR_setfsuid32
12400      case TARGET_NR_setfsuid32:
12401          return get_errno(setfsuid(arg1));
12402  #endif
12403  #ifdef TARGET_NR_setfsgid32
12404      case TARGET_NR_setfsgid32:
12405          return get_errno(setfsgid(arg1));
12406  #endif
12407  #ifdef TARGET_NR_mincore
12408      case TARGET_NR_mincore:
12409          {
12410              void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12411              if (!a) {
12412                  return -TARGET_ENOMEM;
12413              }
12414              p = lock_user_string(arg3);
12415              if (!p) {
12416                  ret = -TARGET_EFAULT;
12417              } else {
12418                  ret = get_errno(mincore(a, arg2, p));
12419                  unlock_user(p, arg3, ret);
12420              }
12421              unlock_user(a, arg1, 0);
12422          }
12423          return ret;
12424  #endif
12425  #ifdef TARGET_NR_arm_fadvise64_64
12426      case TARGET_NR_arm_fadvise64_64:
12427          /* arm_fadvise64_64 looks like fadvise64_64 but
12428           * with different argument order: fd, advice, offset, len
12429           * rather than the usual fd, offset, len, advice.
12430           * Note that offset and len are both 64-bit so appear as
12431           * pairs of 32-bit registers.
12432           */
12433          ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12434                              target_offset64(arg5, arg6), arg2);
12435          return -host_to_target_errno(ret);
12436  #endif
12437  
12438  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12439  
12440  #ifdef TARGET_NR_fadvise64_64
12441      case TARGET_NR_fadvise64_64:
12442  #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12443          /* 6 args: fd, advice, offset (high, low), len (high, low) */
12444          ret = arg2;
12445          arg2 = arg3;
12446          arg3 = arg4;
12447          arg4 = arg5;
12448          arg5 = arg6;
12449          arg6 = ret;
12450  #else
12451          /* 6 args: fd, offset (high, low), len (high, low), advice */
12452          if (regpairs_aligned(cpu_env, num)) {
12453              /* offset is in (3,4), len in (5,6) and advice in 7 */
12454              arg2 = arg3;
12455              arg3 = arg4;
12456              arg4 = arg5;
12457              arg5 = arg6;
12458              arg6 = arg7;
12459          }
12460  #endif
12461          ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12462                              target_offset64(arg4, arg5), arg6);
12463          return -host_to_target_errno(ret);
12464  #endif
12465  
12466  #ifdef TARGET_NR_fadvise64
12467      case TARGET_NR_fadvise64:
12468          /* 5 args: fd, offset (high, low), len, advice */
12469          if (regpairs_aligned(cpu_env, num)) {
12470              /* offset is in (3,4), len in 5 and advice in 6 */
12471              arg2 = arg3;
12472              arg3 = arg4;
12473              arg4 = arg5;
12474              arg5 = arg6;
12475          }
12476          ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12477          return -host_to_target_errno(ret);
12478  #endif
12479  
12480  #else /* not a 32-bit ABI */
12481  #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12482  #ifdef TARGET_NR_fadvise64_64
12483      case TARGET_NR_fadvise64_64:
12484  #endif
12485  #ifdef TARGET_NR_fadvise64
12486      case TARGET_NR_fadvise64:
12487  #endif
12488  #ifdef TARGET_S390X
12489          switch (arg4) {
12490          case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12491          case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12492          case 6: arg4 = POSIX_FADV_DONTNEED; break;
12493          case 7: arg4 = POSIX_FADV_NOREUSE; break;
12494          default: break;
12495          }
12496  #endif
12497          return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12498  #endif
12499  #endif /* end of 64-bit ABI fadvise handling */
12500  
12501  #ifdef TARGET_NR_madvise
12502      case TARGET_NR_madvise:
12503          return target_madvise(arg1, arg2, arg3);
12504  #endif
12505  #ifdef TARGET_NR_fcntl64
12506      case TARGET_NR_fcntl64:
12507      {
12508          int cmd;
12509          struct flock fl;
12510          from_flock64_fn *copyfrom = copy_from_user_flock64;
12511          to_flock64_fn *copyto = copy_to_user_flock64;
12512  
12513  #ifdef TARGET_ARM
12514          if (!cpu_env->eabi) {
12515              copyfrom = copy_from_user_oabi_flock64;
12516              copyto = copy_to_user_oabi_flock64;
12517          }
12518  #endif
12519  
12520          cmd = target_to_host_fcntl_cmd(arg2);
12521          if (cmd == -TARGET_EINVAL) {
12522              return cmd;
12523          }
12524  
12525          switch(arg2) {
12526          case TARGET_F_GETLK64:
12527              ret = copyfrom(&fl, arg3);
12528              if (ret) {
12529                  break;
12530              }
12531              ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12532              if (ret == 0) {
12533                  ret = copyto(arg3, &fl);
12534              }
12535  	    break;
12536  
12537          case TARGET_F_SETLK64:
12538          case TARGET_F_SETLKW64:
12539              ret = copyfrom(&fl, arg3);
12540              if (ret) {
12541                  break;
12542              }
12543              ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12544  	    break;
12545          default:
12546              ret = do_fcntl(arg1, arg2, arg3);
12547              break;
12548          }
12549          return ret;
12550      }
12551  #endif
12552  #ifdef TARGET_NR_cacheflush
12553      case TARGET_NR_cacheflush:
12554          /* self-modifying code is handled automatically, so nothing needed */
12555          return 0;
12556  #endif
12557  #ifdef TARGET_NR_getpagesize
12558      case TARGET_NR_getpagesize:
12559          return TARGET_PAGE_SIZE;
12560  #endif
12561      case TARGET_NR_gettid:
12562          return get_errno(sys_gettid());
12563  #ifdef TARGET_NR_readahead
12564      case TARGET_NR_readahead:
12565  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12566          if (regpairs_aligned(cpu_env, num)) {
12567              arg2 = arg3;
12568              arg3 = arg4;
12569              arg4 = arg5;
12570          }
12571          ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12572  #else
12573          ret = get_errno(readahead(arg1, arg2, arg3));
12574  #endif
12575          return ret;
12576  #endif
12577  #ifdef CONFIG_ATTR
12578  #ifdef TARGET_NR_setxattr
12579      case TARGET_NR_listxattr:
12580      case TARGET_NR_llistxattr:
12581      {
12582          void *b = 0;
12583          if (arg2) {
12584              b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12585              if (!b) {
12586                  return -TARGET_EFAULT;
12587              }
12588          }
12589          p = lock_user_string(arg1);
12590          if (p) {
12591              if (num == TARGET_NR_listxattr) {
12592                  ret = get_errno(listxattr(p, b, arg3));
12593              } else {
12594                  ret = get_errno(llistxattr(p, b, arg3));
12595              }
12596          } else {
12597              ret = -TARGET_EFAULT;
12598          }
12599          unlock_user(p, arg1, 0);
12600          unlock_user(b, arg2, arg3);
12601          return ret;
12602      }
12603      case TARGET_NR_flistxattr:
12604      {
12605          void *b = 0;
12606          if (arg2) {
12607              b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12608              if (!b) {
12609                  return -TARGET_EFAULT;
12610              }
12611          }
12612          ret = get_errno(flistxattr(arg1, b, arg3));
12613          unlock_user(b, arg2, arg3);
12614          return ret;
12615      }
12616      case TARGET_NR_setxattr:
12617      case TARGET_NR_lsetxattr:
12618          {
12619              void *n, *v = 0;
12620              if (arg3) {
12621                  v = lock_user(VERIFY_READ, arg3, arg4, 1);
12622                  if (!v) {
12623                      return -TARGET_EFAULT;
12624                  }
12625              }
12626              p = lock_user_string(arg1);
12627              n = lock_user_string(arg2);
12628              if (p && n) {
12629                  if (num == TARGET_NR_setxattr) {
12630                      ret = get_errno(setxattr(p, n, v, arg4, arg5));
12631                  } else {
12632                      ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12633                  }
12634              } else {
12635                  ret = -TARGET_EFAULT;
12636              }
12637              unlock_user(p, arg1, 0);
12638              unlock_user(n, arg2, 0);
12639              unlock_user(v, arg3, 0);
12640          }
12641          return ret;
12642      case TARGET_NR_fsetxattr:
12643          {
12644              void *n, *v = 0;
12645              if (arg3) {
12646                  v = lock_user(VERIFY_READ, arg3, arg4, 1);
12647                  if (!v) {
12648                      return -TARGET_EFAULT;
12649                  }
12650              }
12651              n = lock_user_string(arg2);
12652              if (n) {
12653                  ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12654              } else {
12655                  ret = -TARGET_EFAULT;
12656              }
12657              unlock_user(n, arg2, 0);
12658              unlock_user(v, arg3, 0);
12659          }
12660          return ret;
12661      case TARGET_NR_getxattr:
12662      case TARGET_NR_lgetxattr:
12663          {
12664              void *n, *v = 0;
12665              if (arg3) {
12666                  v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12667                  if (!v) {
12668                      return -TARGET_EFAULT;
12669                  }
12670              }
12671              p = lock_user_string(arg1);
12672              n = lock_user_string(arg2);
12673              if (p && n) {
12674                  if (num == TARGET_NR_getxattr) {
12675                      ret = get_errno(getxattr(p, n, v, arg4));
12676                  } else {
12677                      ret = get_errno(lgetxattr(p, n, v, arg4));
12678                  }
12679              } else {
12680                  ret = -TARGET_EFAULT;
12681              }
12682              unlock_user(p, arg1, 0);
12683              unlock_user(n, arg2, 0);
12684              unlock_user(v, arg3, arg4);
12685          }
12686          return ret;
12687      case TARGET_NR_fgetxattr:
12688          {
12689              void *n, *v = 0;
12690              if (arg3) {
12691                  v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12692                  if (!v) {
12693                      return -TARGET_EFAULT;
12694                  }
12695              }
12696              n = lock_user_string(arg2);
12697              if (n) {
12698                  ret = get_errno(fgetxattr(arg1, n, v, arg4));
12699              } else {
12700                  ret = -TARGET_EFAULT;
12701              }
12702              unlock_user(n, arg2, 0);
12703              unlock_user(v, arg3, arg4);
12704          }
12705          return ret;
12706      case TARGET_NR_removexattr:
12707      case TARGET_NR_lremovexattr:
12708          {
12709              void *n;
12710              p = lock_user_string(arg1);
12711              n = lock_user_string(arg2);
12712              if (p && n) {
12713                  if (num == TARGET_NR_removexattr) {
12714                      ret = get_errno(removexattr(p, n));
12715                  } else {
12716                      ret = get_errno(lremovexattr(p, n));
12717                  }
12718              } else {
12719                  ret = -TARGET_EFAULT;
12720              }
12721              unlock_user(p, arg1, 0);
12722              unlock_user(n, arg2, 0);
12723          }
12724          return ret;
12725      case TARGET_NR_fremovexattr:
12726          {
12727              void *n;
12728              n = lock_user_string(arg2);
12729              if (n) {
12730                  ret = get_errno(fremovexattr(arg1, n));
12731              } else {
12732                  ret = -TARGET_EFAULT;
12733              }
12734              unlock_user(n, arg2, 0);
12735          }
12736          return ret;
12737  #endif
12738  #endif /* CONFIG_ATTR */
12739  #ifdef TARGET_NR_set_thread_area
12740      case TARGET_NR_set_thread_area:
12741  #if defined(TARGET_MIPS)
12742        cpu_env->active_tc.CP0_UserLocal = arg1;
12743        return 0;
12744  #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12745        return do_set_thread_area(cpu_env, arg1);
12746  #elif defined(TARGET_M68K)
12747        {
12748            TaskState *ts = get_task_state(cpu);
12749            ts->tp_value = arg1;
12750            return 0;
12751        }
12752  #else
12753        return -TARGET_ENOSYS;
12754  #endif
12755  #endif
12756  #ifdef TARGET_NR_get_thread_area
12757      case TARGET_NR_get_thread_area:
12758  #if defined(TARGET_I386) && defined(TARGET_ABI32)
12759          return do_get_thread_area(cpu_env, arg1);
12760  #elif defined(TARGET_M68K)
12761          {
12762              TaskState *ts = get_task_state(cpu);
12763              return ts->tp_value;
12764          }
12765  #else
12766          return -TARGET_ENOSYS;
12767  #endif
12768  #endif
12769  #ifdef TARGET_NR_getdomainname
12770      case TARGET_NR_getdomainname:
12771          return -TARGET_ENOSYS;
12772  #endif
12773  
12774  #ifdef TARGET_NR_clock_settime
12775      case TARGET_NR_clock_settime:
12776      {
12777          struct timespec ts;
12778  
12779          ret = target_to_host_timespec(&ts, arg2);
12780          if (!is_error(ret)) {
12781              ret = get_errno(clock_settime(arg1, &ts));
12782          }
12783          return ret;
12784      }
12785  #endif
12786  #ifdef TARGET_NR_clock_settime64
12787      case TARGET_NR_clock_settime64:
12788      {
12789          struct timespec ts;
12790  
12791          ret = target_to_host_timespec64(&ts, arg2);
12792          if (!is_error(ret)) {
12793              ret = get_errno(clock_settime(arg1, &ts));
12794          }
12795          return ret;
12796      }
12797  #endif
12798  #ifdef TARGET_NR_clock_gettime
12799      case TARGET_NR_clock_gettime:
12800      {
12801          struct timespec ts;
12802          ret = get_errno(clock_gettime(arg1, &ts));
12803          if (!is_error(ret)) {
12804              ret = host_to_target_timespec(arg2, &ts);
12805          }
12806          return ret;
12807      }
12808  #endif
12809  #ifdef TARGET_NR_clock_gettime64
12810      case TARGET_NR_clock_gettime64:
12811      {
12812          struct timespec ts;
12813          ret = get_errno(clock_gettime(arg1, &ts));
12814          if (!is_error(ret)) {
12815              ret = host_to_target_timespec64(arg2, &ts);
12816          }
12817          return ret;
12818      }
12819  #endif
12820  #ifdef TARGET_NR_clock_getres
12821      case TARGET_NR_clock_getres:
12822      {
12823          struct timespec ts;
12824          ret = get_errno(clock_getres(arg1, &ts));
12825          if (!is_error(ret)) {
12826              host_to_target_timespec(arg2, &ts);
12827          }
12828          return ret;
12829      }
12830  #endif
12831  #ifdef TARGET_NR_clock_getres_time64
12832      case TARGET_NR_clock_getres_time64:
12833      {
12834          struct timespec ts;
12835          ret = get_errno(clock_getres(arg1, &ts));
12836          if (!is_error(ret)) {
12837              host_to_target_timespec64(arg2, &ts);
12838          }
12839          return ret;
12840      }
12841  #endif
12842  #ifdef TARGET_NR_clock_nanosleep
12843      case TARGET_NR_clock_nanosleep:
12844      {
12845          struct timespec ts;
12846          if (target_to_host_timespec(&ts, arg3)) {
12847              return -TARGET_EFAULT;
12848          }
12849          ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12850                                               &ts, arg4 ? &ts : NULL));
12851          /*
12852           * if the call is interrupted by a signal handler, it fails
12853           * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12854           * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12855           */
12856          if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12857              host_to_target_timespec(arg4, &ts)) {
12858                return -TARGET_EFAULT;
12859          }
12860  
12861          return ret;
12862      }
12863  #endif
12864  #ifdef TARGET_NR_clock_nanosleep_time64
12865      case TARGET_NR_clock_nanosleep_time64:
12866      {
12867          struct timespec ts;
12868  
12869          if (target_to_host_timespec64(&ts, arg3)) {
12870              return -TARGET_EFAULT;
12871          }
12872  
12873          ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12874                                               &ts, arg4 ? &ts : NULL));
12875  
12876          if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12877              host_to_target_timespec64(arg4, &ts)) {
12878              return -TARGET_EFAULT;
12879          }
12880          return ret;
12881      }
12882  #endif
12883  
12884  #if defined(TARGET_NR_set_tid_address)
12885      case TARGET_NR_set_tid_address:
12886      {
12887          TaskState *ts = get_task_state(cpu);
12888          ts->child_tidptr = arg1;
12889          /* do not call host set_tid_address() syscall, instead return tid() */
12890          return get_errno(sys_gettid());
12891      }
12892  #endif
12893  
12894      case TARGET_NR_tkill:
12895          return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12896  
12897      case TARGET_NR_tgkill:
12898          return get_errno(safe_tgkill((int)arg1, (int)arg2,
12899                           target_to_host_signal(arg3)));
12900  
12901  #ifdef TARGET_NR_set_robust_list
12902      case TARGET_NR_set_robust_list:
12903      case TARGET_NR_get_robust_list:
12904          /* The ABI for supporting robust futexes has userspace pass
12905           * the kernel a pointer to a linked list which is updated by
12906           * userspace after the syscall; the list is walked by the kernel
12907           * when the thread exits. Since the linked list in QEMU guest
12908           * memory isn't a valid linked list for the host and we have
12909           * no way to reliably intercept the thread-death event, we can't
12910           * support these. Silently return ENOSYS so that guest userspace
12911           * falls back to a non-robust futex implementation (which should
12912           * be OK except in the corner case of the guest crashing while
12913           * holding a mutex that is shared with another process via
12914           * shared memory).
12915           */
12916          return -TARGET_ENOSYS;
12917  #endif
12918  
12919  #if defined(TARGET_NR_utimensat)
12920      case TARGET_NR_utimensat:
12921          {
12922              struct timespec *tsp, ts[2];
12923              if (!arg3) {
12924                  tsp = NULL;
12925              } else {
12926                  if (target_to_host_timespec(ts, arg3)) {
12927                      return -TARGET_EFAULT;
12928                  }
12929                  if (target_to_host_timespec(ts + 1, arg3 +
12930                                              sizeof(struct target_timespec))) {
12931                      return -TARGET_EFAULT;
12932                  }
12933                  tsp = ts;
12934              }
12935              if (!arg2)
12936                  ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12937              else {
12938                  if (!(p = lock_user_string(arg2))) {
12939                      return -TARGET_EFAULT;
12940                  }
12941                  ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12942                  unlock_user(p, arg2, 0);
12943              }
12944          }
12945          return ret;
12946  #endif
12947  #ifdef TARGET_NR_utimensat_time64
12948      case TARGET_NR_utimensat_time64:
12949          {
12950              struct timespec *tsp, ts[2];
12951              if (!arg3) {
12952                  tsp = NULL;
12953              } else {
12954                  if (target_to_host_timespec64(ts, arg3)) {
12955                      return -TARGET_EFAULT;
12956                  }
12957                  if (target_to_host_timespec64(ts + 1, arg3 +
12958                                       sizeof(struct target__kernel_timespec))) {
12959                      return -TARGET_EFAULT;
12960                  }
12961                  tsp = ts;
12962              }
12963              if (!arg2)
12964                  ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12965              else {
12966                  p = lock_user_string(arg2);
12967                  if (!p) {
12968                      return -TARGET_EFAULT;
12969                  }
12970                  ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12971                  unlock_user(p, arg2, 0);
12972              }
12973          }
12974          return ret;
12975  #endif
12976  #ifdef TARGET_NR_futex
12977      case TARGET_NR_futex:
12978          return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12979  #endif
12980  #ifdef TARGET_NR_futex_time64
12981      case TARGET_NR_futex_time64:
12982          return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12983  #endif
12984  #ifdef CONFIG_INOTIFY
12985  #if defined(TARGET_NR_inotify_init)
12986      case TARGET_NR_inotify_init:
12987          ret = get_errno(inotify_init());
12988          if (ret >= 0) {
12989              fd_trans_register(ret, &target_inotify_trans);
12990          }
12991          return ret;
12992  #endif
12993  #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12994      case TARGET_NR_inotify_init1:
12995          ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12996                                            fcntl_flags_tbl)));
12997          if (ret >= 0) {
12998              fd_trans_register(ret, &target_inotify_trans);
12999          }
13000          return ret;
13001  #endif
13002  #if defined(TARGET_NR_inotify_add_watch)
13003      case TARGET_NR_inotify_add_watch:
13004          p = lock_user_string(arg2);
13005          ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13006          unlock_user(p, arg2, 0);
13007          return ret;
13008  #endif
13009  #if defined(TARGET_NR_inotify_rm_watch)
13010      case TARGET_NR_inotify_rm_watch:
13011          return get_errno(inotify_rm_watch(arg1, arg2));
13012  #endif
13013  #endif
13014  
13015  #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13016      case TARGET_NR_mq_open:
13017          {
13018              struct mq_attr posix_mq_attr;
13019              struct mq_attr *pposix_mq_attr;
13020              int host_flags;
13021  
13022              host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13023              pposix_mq_attr = NULL;
13024              if (arg4) {
13025                  if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13026                      return -TARGET_EFAULT;
13027                  }
13028                  pposix_mq_attr = &posix_mq_attr;
13029              }
13030              p = lock_user_string(arg1 - 1);
13031              if (!p) {
13032                  return -TARGET_EFAULT;
13033              }
13034              ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13035              unlock_user (p, arg1, 0);
13036          }
13037          return ret;
13038  
13039      case TARGET_NR_mq_unlink:
13040          p = lock_user_string(arg1 - 1);
13041          if (!p) {
13042              return -TARGET_EFAULT;
13043          }
13044          ret = get_errno(mq_unlink(p));
13045          unlock_user (p, arg1, 0);
13046          return ret;
13047  
13048  #ifdef TARGET_NR_mq_timedsend
13049      case TARGET_NR_mq_timedsend:
13050          {
13051              struct timespec ts;
13052  
13053              p = lock_user (VERIFY_READ, arg2, arg3, 1);
13054              if (arg5 != 0) {
13055                  if (target_to_host_timespec(&ts, arg5)) {
13056                      return -TARGET_EFAULT;
13057                  }
13058                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13059                  if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13060                      return -TARGET_EFAULT;
13061                  }
13062              } else {
13063                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13064              }
13065              unlock_user (p, arg2, arg3);
13066          }
13067          return ret;
13068  #endif
13069  #ifdef TARGET_NR_mq_timedsend_time64
13070      case TARGET_NR_mq_timedsend_time64:
13071          {
13072              struct timespec ts;
13073  
13074              p = lock_user(VERIFY_READ, arg2, arg3, 1);
13075              if (arg5 != 0) {
13076                  if (target_to_host_timespec64(&ts, arg5)) {
13077                      return -TARGET_EFAULT;
13078                  }
13079                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13080                  if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13081                      return -TARGET_EFAULT;
13082                  }
13083              } else {
13084                  ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13085              }
13086              unlock_user(p, arg2, arg3);
13087          }
13088          return ret;
13089  #endif
13090  
13091  #ifdef TARGET_NR_mq_timedreceive
13092      case TARGET_NR_mq_timedreceive:
13093          {
13094              struct timespec ts;
13095              unsigned int prio;
13096  
13097              p = lock_user (VERIFY_READ, arg2, arg3, 1);
13098              if (arg5 != 0) {
13099                  if (target_to_host_timespec(&ts, arg5)) {
13100                      return -TARGET_EFAULT;
13101                  }
13102                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13103                                                       &prio, &ts));
13104                  if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13105                      return -TARGET_EFAULT;
13106                  }
13107              } else {
13108                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13109                                                       &prio, NULL));
13110              }
13111              unlock_user (p, arg2, arg3);
13112              if (arg4 != 0)
13113                  put_user_u32(prio, arg4);
13114          }
13115          return ret;
13116  #endif
13117  #ifdef TARGET_NR_mq_timedreceive_time64
13118      case TARGET_NR_mq_timedreceive_time64:
13119          {
13120              struct timespec ts;
13121              unsigned int prio;
13122  
13123              p = lock_user(VERIFY_READ, arg2, arg3, 1);
13124              if (arg5 != 0) {
13125                  if (target_to_host_timespec64(&ts, arg5)) {
13126                      return -TARGET_EFAULT;
13127                  }
13128                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13129                                                       &prio, &ts));
13130                  if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13131                      return -TARGET_EFAULT;
13132                  }
13133              } else {
13134                  ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13135                                                       &prio, NULL));
13136              }
13137              unlock_user(p, arg2, arg3);
13138              if (arg4 != 0) {
13139                  put_user_u32(prio, arg4);
13140              }
13141          }
13142          return ret;
13143  #endif
13144  
13145      /* Not implemented for now... */
13146  /*     case TARGET_NR_mq_notify: */
13147  /*         break; */
13148  
13149      case TARGET_NR_mq_getsetattr:
13150          {
13151              struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13152              ret = 0;
13153              if (arg2 != 0) {
13154                  copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13155                  ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13156                                             &posix_mq_attr_out));
13157              } else if (arg3 != 0) {
13158                  ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13159              }
13160              if (ret == 0 && arg3 != 0) {
13161                  copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13162              }
13163          }
13164          return ret;
13165  #endif
13166  
13167  #ifdef CONFIG_SPLICE
13168  #ifdef TARGET_NR_tee
13169      case TARGET_NR_tee:
13170          {
13171              ret = get_errno(tee(arg1,arg2,arg3,arg4));
13172          }
13173          return ret;
13174  #endif
13175  #ifdef TARGET_NR_splice
13176      case TARGET_NR_splice:
13177          {
13178              loff_t loff_in, loff_out;
13179              loff_t *ploff_in = NULL, *ploff_out = NULL;
13180              if (arg2) {
13181                  if (get_user_u64(loff_in, arg2)) {
13182                      return -TARGET_EFAULT;
13183                  }
13184                  ploff_in = &loff_in;
13185              }
13186              if (arg4) {
13187                  if (get_user_u64(loff_out, arg4)) {
13188                      return -TARGET_EFAULT;
13189                  }
13190                  ploff_out = &loff_out;
13191              }
13192              ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13193              if (arg2) {
13194                  if (put_user_u64(loff_in, arg2)) {
13195                      return -TARGET_EFAULT;
13196                  }
13197              }
13198              if (arg4) {
13199                  if (put_user_u64(loff_out, arg4)) {
13200                      return -TARGET_EFAULT;
13201                  }
13202              }
13203          }
13204          return ret;
13205  #endif
13206  #ifdef TARGET_NR_vmsplice
13207  	case TARGET_NR_vmsplice:
13208          {
13209              struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13210              if (vec != NULL) {
13211                  ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13212                  unlock_iovec(vec, arg2, arg3, 0);
13213              } else {
13214                  ret = -host_to_target_errno(errno);
13215              }
13216          }
13217          return ret;
13218  #endif
13219  #endif /* CONFIG_SPLICE */
13220  #ifdef CONFIG_EVENTFD
13221  #if defined(TARGET_NR_eventfd)
13222      case TARGET_NR_eventfd:
13223          ret = get_errno(eventfd(arg1, 0));
13224          if (ret >= 0) {
13225              fd_trans_register(ret, &target_eventfd_trans);
13226          }
13227          return ret;
13228  #endif
13229  #if defined(TARGET_NR_eventfd2)
13230      case TARGET_NR_eventfd2:
13231      {
13232          int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13233          if (arg2 & TARGET_O_NONBLOCK) {
13234              host_flags |= O_NONBLOCK;
13235          }
13236          if (arg2 & TARGET_O_CLOEXEC) {
13237              host_flags |= O_CLOEXEC;
13238          }
13239          ret = get_errno(eventfd(arg1, host_flags));
13240          if (ret >= 0) {
13241              fd_trans_register(ret, &target_eventfd_trans);
13242          }
13243          return ret;
13244      }
13245  #endif
13246  #endif /* CONFIG_EVENTFD  */
13247  #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13248      case TARGET_NR_fallocate:
13249  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13250          ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13251                                    target_offset64(arg5, arg6)));
13252  #else
13253          ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13254  #endif
13255          return ret;
13256  #endif
13257  #if defined(CONFIG_SYNC_FILE_RANGE)
13258  #if defined(TARGET_NR_sync_file_range)
13259      case TARGET_NR_sync_file_range:
13260  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13261  #if defined(TARGET_MIPS)
13262          ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13263                                          target_offset64(arg5, arg6), arg7));
13264  #else
13265          ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13266                                          target_offset64(arg4, arg5), arg6));
13267  #endif /* !TARGET_MIPS */
13268  #else
13269          ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13270  #endif
13271          return ret;
13272  #endif
13273  #if defined(TARGET_NR_sync_file_range2) || \
13274      defined(TARGET_NR_arm_sync_file_range)
13275  #if defined(TARGET_NR_sync_file_range2)
13276      case TARGET_NR_sync_file_range2:
13277  #endif
13278  #if defined(TARGET_NR_arm_sync_file_range)
13279      case TARGET_NR_arm_sync_file_range:
13280  #endif
13281          /* This is like sync_file_range but the arguments are reordered */
13282  #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13283          ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13284                                          target_offset64(arg5, arg6), arg2));
13285  #else
13286          ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13287  #endif
13288          return ret;
13289  #endif
13290  #endif
13291  #if defined(TARGET_NR_signalfd4)
13292      case TARGET_NR_signalfd4:
13293          return do_signalfd4(arg1, arg2, arg4);
13294  #endif
13295  #if defined(TARGET_NR_signalfd)
13296      case TARGET_NR_signalfd:
13297          return do_signalfd4(arg1, arg2, 0);
13298  #endif
13299  #if defined(CONFIG_EPOLL)
13300  #if defined(TARGET_NR_epoll_create)
13301      case TARGET_NR_epoll_create:
13302          return get_errno(epoll_create(arg1));
13303  #endif
13304  #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13305      case TARGET_NR_epoll_create1:
13306          return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13307  #endif
13308  #if defined(TARGET_NR_epoll_ctl)
13309      case TARGET_NR_epoll_ctl:
13310      {
13311          struct epoll_event ep;
13312          struct epoll_event *epp = 0;
13313          if (arg4) {
13314              if (arg2 != EPOLL_CTL_DEL) {
13315                  struct target_epoll_event *target_ep;
13316                  if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13317                      return -TARGET_EFAULT;
13318                  }
13319                  ep.events = tswap32(target_ep->events);
13320                  /*
13321                   * The epoll_data_t union is just opaque data to the kernel,
13322                   * so we transfer all 64 bits across and need not worry what
13323                   * actual data type it is.
13324                   */
13325                  ep.data.u64 = tswap64(target_ep->data.u64);
13326                  unlock_user_struct(target_ep, arg4, 0);
13327              }
13328              /*
13329               * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13330               * non-null pointer, even though this argument is ignored.
13331               *
13332               */
13333              epp = &ep;
13334          }
13335          return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13336      }
13337  #endif
13338  
13339  #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13340  #if defined(TARGET_NR_epoll_wait)
13341      case TARGET_NR_epoll_wait:
13342  #endif
13343  #if defined(TARGET_NR_epoll_pwait)
13344      case TARGET_NR_epoll_pwait:
13345  #endif
13346      {
13347          struct target_epoll_event *target_ep;
13348          struct epoll_event *ep;
13349          int epfd = arg1;
13350          int maxevents = arg3;
13351          int timeout = arg4;
13352  
13353          if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13354              return -TARGET_EINVAL;
13355          }
13356  
13357          target_ep = lock_user(VERIFY_WRITE, arg2,
13358                                maxevents * sizeof(struct target_epoll_event), 1);
13359          if (!target_ep) {
13360              return -TARGET_EFAULT;
13361          }
13362  
13363          ep = g_try_new(struct epoll_event, maxevents);
13364          if (!ep) {
13365              unlock_user(target_ep, arg2, 0);
13366              return -TARGET_ENOMEM;
13367          }
13368  
13369          switch (num) {
13370  #if defined(TARGET_NR_epoll_pwait)
13371          case TARGET_NR_epoll_pwait:
13372          {
13373              sigset_t *set = NULL;
13374  
13375              if (arg5) {
13376                  ret = process_sigsuspend_mask(&set, arg5, arg6);
13377                  if (ret != 0) {
13378                      break;
13379                  }
13380              }
13381  
13382              ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13383                                               set, SIGSET_T_SIZE));
13384  
13385              if (set) {
13386                  finish_sigsuspend_mask(ret);
13387              }
13388              break;
13389          }
13390  #endif
13391  #if defined(TARGET_NR_epoll_wait)
13392          case TARGET_NR_epoll_wait:
13393              ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13394                                               NULL, 0));
13395              break;
13396  #endif
13397          default:
13398              ret = -TARGET_ENOSYS;
13399          }
13400          if (!is_error(ret)) {
13401              int i;
13402              for (i = 0; i < ret; i++) {
13403                  target_ep[i].events = tswap32(ep[i].events);
13404                  target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13405              }
13406              unlock_user(target_ep, arg2,
13407                          ret * sizeof(struct target_epoll_event));
13408          } else {
13409              unlock_user(target_ep, arg2, 0);
13410          }
13411          g_free(ep);
13412          return ret;
13413      }
13414  #endif
13415  #endif
13416  #ifdef TARGET_NR_prlimit64
13417      case TARGET_NR_prlimit64:
13418      {
13419          /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13420          struct target_rlimit64 *target_rnew, *target_rold;
13421          struct host_rlimit64 rnew, rold, *rnewp = 0;
13422          int resource = target_to_host_resource(arg2);
13423  
13424          if (arg3 && (resource != RLIMIT_AS &&
13425                       resource != RLIMIT_DATA &&
13426                       resource != RLIMIT_STACK)) {
13427              if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13428                  return -TARGET_EFAULT;
13429              }
13430              __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13431              __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13432              unlock_user_struct(target_rnew, arg3, 0);
13433              rnewp = &rnew;
13434          }
13435  
13436          ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13437          if (!is_error(ret) && arg4) {
13438              if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13439                  return -TARGET_EFAULT;
13440              }
13441              __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13442              __put_user(rold.rlim_max, &target_rold->rlim_max);
13443              unlock_user_struct(target_rold, arg4, 1);
13444          }
13445          return ret;
13446      }
13447  #endif
13448  #ifdef TARGET_NR_gethostname
13449      case TARGET_NR_gethostname:
13450      {
13451          char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13452          if (name) {
13453              ret = get_errno(gethostname(name, arg2));
13454              unlock_user(name, arg1, arg2);
13455          } else {
13456              ret = -TARGET_EFAULT;
13457          }
13458          return ret;
13459      }
13460  #endif
13461  #ifdef TARGET_NR_atomic_cmpxchg_32
13462      case TARGET_NR_atomic_cmpxchg_32:
13463      {
13464          /* should use start_exclusive from main.c */
13465          abi_ulong mem_value;
13466          if (get_user_u32(mem_value, arg6)) {
13467              target_siginfo_t info;
13468              info.si_signo = SIGSEGV;
13469              info.si_errno = 0;
13470              info.si_code = TARGET_SEGV_MAPERR;
13471              info._sifields._sigfault._addr = arg6;
13472              queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13473              ret = 0xdeadbeef;
13474  
13475          }
13476          if (mem_value == arg2)
13477              put_user_u32(arg1, arg6);
13478          return mem_value;
13479      }
13480  #endif
13481  #ifdef TARGET_NR_atomic_barrier
13482      case TARGET_NR_atomic_barrier:
13483          /* Like the kernel implementation and the
13484             qemu arm barrier, no-op this? */
13485          return 0;
13486  #endif
13487  
13488  #ifdef TARGET_NR_timer_create
13489      case TARGET_NR_timer_create:
13490      {
13491          /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13492  
13493          struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13494  
13495          int clkid = arg1;
13496          int timer_index = next_free_host_timer();
13497  
13498          if (timer_index < 0) {
13499              ret = -TARGET_EAGAIN;
13500          } else {
13501              timer_t *phtimer = g_posix_timers  + timer_index;
13502  
13503              if (arg2) {
13504                  phost_sevp = &host_sevp;
13505                  ret = target_to_host_sigevent(phost_sevp, arg2);
13506                  if (ret != 0) {
13507                      free_host_timer_slot(timer_index);
13508                      return ret;
13509                  }
13510              }
13511  
13512              ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13513              if (ret) {
13514                  free_host_timer_slot(timer_index);
13515              } else {
13516                  if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13517                      timer_delete(*phtimer);
13518                      free_host_timer_slot(timer_index);
13519                      return -TARGET_EFAULT;
13520                  }
13521              }
13522          }
13523          return ret;
13524      }
13525  #endif
13526  
13527  #ifdef TARGET_NR_timer_settime
13528      case TARGET_NR_timer_settime:
13529      {
13530          /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13531           * struct itimerspec * old_value */
13532          target_timer_t timerid = get_timer_id(arg1);
13533  
13534          if (timerid < 0) {
13535              ret = timerid;
13536          } else if (arg3 == 0) {
13537              ret = -TARGET_EINVAL;
13538          } else {
13539              timer_t htimer = g_posix_timers[timerid];
13540              struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13541  
13542              if (target_to_host_itimerspec(&hspec_new, arg3)) {
13543                  return -TARGET_EFAULT;
13544              }
13545              ret = get_errno(
13546                            timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13547              if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13548                  return -TARGET_EFAULT;
13549              }
13550          }
13551          return ret;
13552      }
13553  #endif
13554  
13555  #ifdef TARGET_NR_timer_settime64
13556      case TARGET_NR_timer_settime64:
13557      {
13558          target_timer_t timerid = get_timer_id(arg1);
13559  
13560          if (timerid < 0) {
13561              ret = timerid;
13562          } else if (arg3 == 0) {
13563              ret = -TARGET_EINVAL;
13564          } else {
13565              timer_t htimer = g_posix_timers[timerid];
13566              struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13567  
13568              if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13569                  return -TARGET_EFAULT;
13570              }
13571              ret = get_errno(
13572                            timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13573              if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13574                  return -TARGET_EFAULT;
13575              }
13576          }
13577          return ret;
13578      }
13579  #endif
13580  
13581  #ifdef TARGET_NR_timer_gettime
13582      case TARGET_NR_timer_gettime:
13583      {
13584          /* args: timer_t timerid, struct itimerspec *curr_value */
13585          target_timer_t timerid = get_timer_id(arg1);
13586  
13587          if (timerid < 0) {
13588              ret = timerid;
13589          } else if (!arg2) {
13590              ret = -TARGET_EFAULT;
13591          } else {
13592              timer_t htimer = g_posix_timers[timerid];
13593              struct itimerspec hspec;
13594              ret = get_errno(timer_gettime(htimer, &hspec));
13595  
13596              if (host_to_target_itimerspec(arg2, &hspec)) {
13597                  ret = -TARGET_EFAULT;
13598              }
13599          }
13600          return ret;
13601      }
13602  #endif
13603  
13604  #ifdef TARGET_NR_timer_gettime64
13605      case TARGET_NR_timer_gettime64:
13606      {
13607          /* args: timer_t timerid, struct itimerspec64 *curr_value */
13608          target_timer_t timerid = get_timer_id(arg1);
13609  
13610          if (timerid < 0) {
13611              ret = timerid;
13612          } else if (!arg2) {
13613              ret = -TARGET_EFAULT;
13614          } else {
13615              timer_t htimer = g_posix_timers[timerid];
13616              struct itimerspec hspec;
13617              ret = get_errno(timer_gettime(htimer, &hspec));
13618  
13619              if (host_to_target_itimerspec64(arg2, &hspec)) {
13620                  ret = -TARGET_EFAULT;
13621              }
13622          }
13623          return ret;
13624      }
13625  #endif
13626  
13627  #ifdef TARGET_NR_timer_getoverrun
13628      case TARGET_NR_timer_getoverrun:
13629      {
13630          /* args: timer_t timerid */
13631          target_timer_t timerid = get_timer_id(arg1);
13632  
13633          if (timerid < 0) {
13634              ret = timerid;
13635          } else {
13636              timer_t htimer = g_posix_timers[timerid];
13637              ret = get_errno(timer_getoverrun(htimer));
13638          }
13639          return ret;
13640      }
13641  #endif
13642  
13643  #ifdef TARGET_NR_timer_delete
13644      case TARGET_NR_timer_delete:
13645      {
13646          /* args: timer_t timerid */
13647          target_timer_t timerid = get_timer_id(arg1);
13648  
13649          if (timerid < 0) {
13650              ret = timerid;
13651          } else {
13652              timer_t htimer = g_posix_timers[timerid];
13653              ret = get_errno(timer_delete(htimer));
13654              free_host_timer_slot(timerid);
13655          }
13656          return ret;
13657      }
13658  #endif
13659  
13660  #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13661      case TARGET_NR_timerfd_create:
13662          ret = get_errno(timerfd_create(arg1,
13663                          target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13664          if (ret >= 0) {
13665              fd_trans_register(ret, &target_timerfd_trans);
13666          }
13667          return ret;
13668  #endif
13669  
13670  #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13671      case TARGET_NR_timerfd_gettime:
13672          {
13673              struct itimerspec its_curr;
13674  
13675              ret = get_errno(timerfd_gettime(arg1, &its_curr));
13676  
13677              if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13678                  return -TARGET_EFAULT;
13679              }
13680          }
13681          return ret;
13682  #endif
13683  
13684  #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13685      case TARGET_NR_timerfd_gettime64:
13686          {
13687              struct itimerspec its_curr;
13688  
13689              ret = get_errno(timerfd_gettime(arg1, &its_curr));
13690  
13691              if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13692                  return -TARGET_EFAULT;
13693              }
13694          }
13695          return ret;
13696  #endif
13697  
13698  #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13699      case TARGET_NR_timerfd_settime:
13700          {
13701              struct itimerspec its_new, its_old, *p_new;
13702  
13703              if (arg3) {
13704                  if (target_to_host_itimerspec(&its_new, arg3)) {
13705                      return -TARGET_EFAULT;
13706                  }
13707                  p_new = &its_new;
13708              } else {
13709                  p_new = NULL;
13710              }
13711  
13712              ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13713  
13714              if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13715                  return -TARGET_EFAULT;
13716              }
13717          }
13718          return ret;
13719  #endif
13720  
13721  #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13722      case TARGET_NR_timerfd_settime64:
13723          {
13724              struct itimerspec its_new, its_old, *p_new;
13725  
13726              if (arg3) {
13727                  if (target_to_host_itimerspec64(&its_new, arg3)) {
13728                      return -TARGET_EFAULT;
13729                  }
13730                  p_new = &its_new;
13731              } else {
13732                  p_new = NULL;
13733              }
13734  
13735              ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13736  
13737              if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13738                  return -TARGET_EFAULT;
13739              }
13740          }
13741          return ret;
13742  #endif
13743  
13744  #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13745      case TARGET_NR_ioprio_get:
13746          return get_errno(ioprio_get(arg1, arg2));
13747  #endif
13748  
13749  #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13750      case TARGET_NR_ioprio_set:
13751          return get_errno(ioprio_set(arg1, arg2, arg3));
13752  #endif
13753  
13754  #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13755      case TARGET_NR_setns:
13756          return get_errno(setns(arg1, arg2));
13757  #endif
13758  #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13759      case TARGET_NR_unshare:
13760          return get_errno(unshare(arg1));
13761  #endif
13762  #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13763      case TARGET_NR_kcmp:
13764          return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13765  #endif
13766  #ifdef TARGET_NR_swapcontext
13767      case TARGET_NR_swapcontext:
13768          /* PowerPC specific.  */
13769          return do_swapcontext(cpu_env, arg1, arg2, arg3);
13770  #endif
13771  #ifdef TARGET_NR_memfd_create
13772      case TARGET_NR_memfd_create:
13773          p = lock_user_string(arg1);
13774          if (!p) {
13775              return -TARGET_EFAULT;
13776          }
13777          ret = get_errno(memfd_create(p, arg2));
13778          fd_trans_unregister(ret);
13779          unlock_user(p, arg1, 0);
13780          return ret;
13781  #endif
13782  #if defined TARGET_NR_membarrier && defined __NR_membarrier
13783      case TARGET_NR_membarrier:
13784          return get_errno(membarrier(arg1, arg2));
13785  #endif
13786  
13787  #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13788      case TARGET_NR_copy_file_range:
13789          {
13790              loff_t inoff, outoff;
13791              loff_t *pinoff = NULL, *poutoff = NULL;
13792  
13793              if (arg2) {
13794                  if (get_user_u64(inoff, arg2)) {
13795                      return -TARGET_EFAULT;
13796                  }
13797                  pinoff = &inoff;
13798              }
13799              if (arg4) {
13800                  if (get_user_u64(outoff, arg4)) {
13801                      return -TARGET_EFAULT;
13802                  }
13803                  poutoff = &outoff;
13804              }
13805              /* Do not sign-extend the count parameter. */
13806              ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13807                                                   (abi_ulong)arg5, arg6));
13808              if (!is_error(ret) && ret > 0) {
13809                  if (arg2) {
13810                      if (put_user_u64(inoff, arg2)) {
13811                          return -TARGET_EFAULT;
13812                      }
13813                  }
13814                  if (arg4) {
13815                      if (put_user_u64(outoff, arg4)) {
13816                          return -TARGET_EFAULT;
13817                      }
13818                  }
13819              }
13820          }
13821          return ret;
13822  #endif
13823  
13824  #if defined(TARGET_NR_pivot_root)
13825      case TARGET_NR_pivot_root:
13826          {
13827              void *p2;
13828              p = lock_user_string(arg1); /* new_root */
13829              p2 = lock_user_string(arg2); /* put_old */
13830              if (!p || !p2) {
13831                  ret = -TARGET_EFAULT;
13832              } else {
13833                  ret = get_errno(pivot_root(p, p2));
13834              }
13835              unlock_user(p2, arg2, 0);
13836              unlock_user(p, arg1, 0);
13837          }
13838          return ret;
13839  #endif
13840  
13841  #if defined(TARGET_NR_riscv_hwprobe)
13842      case TARGET_NR_riscv_hwprobe:
13843          return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13844  #endif
13845  
13846      default:
13847          qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13848          return -TARGET_ENOSYS;
13849      }
13850      return ret;
13851  }
13852  
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13853  abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13854                      abi_long arg2, abi_long arg3, abi_long arg4,
13855                      abi_long arg5, abi_long arg6, abi_long arg7,
13856                      abi_long arg8)
13857  {
13858      CPUState *cpu = env_cpu(cpu_env);
13859      abi_long ret;
13860  
13861  #ifdef DEBUG_ERESTARTSYS
13862      /* Debug-only code for exercising the syscall-restart code paths
13863       * in the per-architecture cpu main loops: restart every syscall
13864       * the guest makes once before letting it through.
13865       */
13866      {
13867          static bool flag;
13868          flag = !flag;
13869          if (flag) {
13870              return -QEMU_ERESTARTSYS;
13871          }
13872      }
13873  #endif
13874  
13875      record_syscall_start(cpu, num, arg1,
13876                           arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13877  
13878      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13879          print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13880      }
13881  
13882      ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13883                        arg5, arg6, arg7, arg8);
13884  
13885      if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13886          print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13887                            arg3, arg4, arg5, arg6);
13888      }
13889  
13890      record_syscall_return(cpu, num, ret);
13891      return ret;
13892  }
13893