xref: /openbmc/qemu/linux-user/syscall.c (revision d83b61f59c809d3447127f880dd65944180ce478)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include <elf.h>
30 #include <endian.h>
31 #include <grp.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/mount.h>
36 #include <sys/file.h>
37 #include <sys/fsuid.h>
38 #include <sys/personality.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
41 #include <sys/swap.h>
42 #include <linux/capability.h>
43 #include <sched.h>
44 #include <sys/timex.h>
45 #include <sys/socket.h>
46 #include <linux/sockios.h>
47 #include <sys/un.h>
48 #include <sys/uio.h>
49 #include <poll.h>
50 #include <sys/times.h>
51 #include <sys/shm.h>
52 #include <sys/sem.h>
53 #include <sys/statfs.h>
54 #include <utime.h>
55 #include <sys/sysinfo.h>
56 #include <sys/signalfd.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61 #include <linux/wireless.h>
62 #include <linux/icmp.h>
63 #include <linux/icmpv6.h>
64 #include <linux/if_tun.h>
65 #include <linux/in6.h>
66 #include <linux/errqueue.h>
67 #include <linux/random.h>
68 #ifdef CONFIG_TIMERFD
69 #include <sys/timerfd.h>
70 #endif
71 #ifdef CONFIG_EVENTFD
72 #include <sys/eventfd.h>
73 #endif
74 #ifdef CONFIG_EPOLL
75 #include <sys/epoll.h>
76 #endif
77 #ifdef CONFIG_ATTR
78 #include "qemu/xattr.h"
79 #endif
80 #ifdef CONFIG_SENDFILE
81 #include <sys/sendfile.h>
82 #endif
83 #ifdef HAVE_SYS_KCOV_H
84 #include <sys/kcov.h>
85 #endif
86 
87 #define termios host_termios
88 #define winsize host_winsize
89 #define termio host_termio
90 #define sgttyb host_sgttyb /* same as target */
91 #define tchars host_tchars /* same as target */
92 #define ltchars host_ltchars /* same as target */
93 
94 #include <linux/termios.h>
95 #include <linux/unistd.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
99 #include <linux/kd.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #include <linux/fd.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #if defined(CONFIG_USBFS)
108 #include <linux/usbdevice_fs.h>
109 #include <linux/usb/ch9.h>
110 #endif
111 #include <linux/vt.h>
112 #include <linux/dm-ioctl.h>
113 #include <linux/reboot.h>
114 #include <linux/route.h>
115 #include <linux/filter.h>
116 #include <linux/blkpg.h>
117 #include <netpacket/packet.h>
118 #include <linux/netlink.h>
119 #include <linux/if_alg.h>
120 #include <linux/rtc.h>
121 #include <sound/asound.h>
122 #ifdef HAVE_BTRFS_H
123 #include <linux/btrfs.h>
124 #endif
125 #ifdef HAVE_DRM_H
126 #include <libdrm/drm.h>
127 #include <libdrm/i915_drm.h>
128 #endif
129 #include "linux_loop.h"
130 #include "uname.h"
131 
132 #include "qemu.h"
133 #include "user-internals.h"
134 #include "strace.h"
135 #include "signal-common.h"
136 #include "loader.h"
137 #include "user-mmap.h"
138 #include "user/safe-syscall.h"
139 #include "qemu/guest-random.h"
140 #include "qemu/selfmap.h"
141 #include "user/syscall-trace.h"
142 #include "special-errno.h"
143 #include "qapi/error.h"
144 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc < 2.41 */
362 #ifndef SCHED_ATTR_SIZE_VER0
363 struct sched_attr {
364     uint32_t size;
365     uint32_t sched_policy;
366     uint64_t sched_flags;
367     int32_t sched_nice;
368     uint32_t sched_priority;
369     uint64_t sched_runtime;
370     uint64_t sched_deadline;
371     uint64_t sched_period;
372     uint32_t sched_util_min;
373     uint32_t sched_util_max;
374 };
375 #endif
376 #define __NR_sys_sched_getattr __NR_sched_getattr
377 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
378           unsigned int, size, unsigned int, flags);
379 #define __NR_sys_sched_setattr __NR_sched_setattr
380 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
381           unsigned int, flags);
382 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
383 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
384 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
385 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
386           const struct sched_param *, param);
387 #define __NR_sys_sched_getparam __NR_sched_getparam
388 _syscall2(int, sys_sched_getparam, pid_t, pid,
389           struct sched_param *, param);
390 #define __NR_sys_sched_setparam __NR_sched_setparam
391 _syscall2(int, sys_sched_setparam, pid_t, pid,
392           const struct sched_param *, param);
393 #define __NR_sys_getcpu __NR_getcpu
394 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
395 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
396           void *, arg);
397 _syscall2(int, capget, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 _syscall2(int, capset, struct __user_cap_header_struct *, header,
400           struct __user_cap_data_struct *, data);
401 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
402 _syscall2(int, ioprio_get, int, which, int, who)
403 #endif
404 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
405 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
406 #endif
407 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
408 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
409 #endif
410 
411 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
412 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
413           unsigned long, idx1, unsigned long, idx2)
414 #endif
415 
416 /*
417  * It is assumed that struct statx is architecture independent.
418  */
419 #if defined(TARGET_NR_statx) && defined(__NR_statx)
420 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
421           unsigned int, mask, struct target_statx *, statxbuf)
422 #endif
423 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
424 _syscall2(int, membarrier, int, cmd, int, flags)
425 #endif
426 
427 static const bitmask_transtbl fcntl_flags_tbl[] = {
428   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
429   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
430   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
431   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
432   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
433   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
434   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
435   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
436   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
437   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
438   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
439   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
440   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
441 #if defined(O_DIRECT)
442   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
443 #endif
444 #if defined(O_NOATIME)
445   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
446 #endif
447 #if defined(O_CLOEXEC)
448   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
449 #endif
450 #if defined(O_PATH)
451   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
452 #endif
453 #if defined(O_TMPFILE)
454   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
455 #endif
456   /* Don't terminate the list prematurely on 64-bit host+guest.  */
457 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
458   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
459 #endif
460 };
461 
462 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
463 
464 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
465 #if defined(__NR_utimensat)
466 #define __NR_sys_utimensat __NR_utimensat
467 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
468           const struct timespec *,tsp,int,flags)
469 #else
470 static int sys_utimensat(int dirfd, const char *pathname,
471                          const struct timespec times[2], int flags)
472 {
473     errno = ENOSYS;
474     return -1;
475 }
476 #endif
477 #endif /* TARGET_NR_utimensat */
478 
479 #ifdef TARGET_NR_renameat2
480 #if defined(__NR_renameat2)
481 #define __NR_sys_renameat2 __NR_renameat2
482 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
483           const char *, new, unsigned int, flags)
484 #else
485 static int sys_renameat2(int oldfd, const char *old,
486                          int newfd, const char *new, int flags)
487 {
488     if (flags == 0) {
489         return renameat(oldfd, old, newfd, new);
490     }
491     errno = ENOSYS;
492     return -1;
493 }
494 #endif
495 #endif /* TARGET_NR_renameat2 */
496 
497 #ifdef CONFIG_INOTIFY
498 #include <sys/inotify.h>
499 #else
500 /* Userspace can usually survive runtime without inotify */
501 #undef TARGET_NR_inotify_init
502 #undef TARGET_NR_inotify_init1
503 #undef TARGET_NR_inotify_add_watch
504 #undef TARGET_NR_inotify_rm_watch
505 #endif /* CONFIG_INOTIFY  */
506 
507 #if defined(TARGET_NR_prlimit64)
508 #ifndef __NR_prlimit64
509 # define __NR_prlimit64 -1
510 #endif
511 #define __NR_sys_prlimit64 __NR_prlimit64
512 /* The glibc rlimit structure may not be that used by the underlying syscall */
513 struct host_rlimit64 {
514     uint64_t rlim_cur;
515     uint64_t rlim_max;
516 };
517 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
518           const struct host_rlimit64 *, new_limit,
519           struct host_rlimit64 *, old_limit)
520 #endif
521 
522 
523 #if defined(TARGET_NR_timer_create)
524 /* Maximum of 32 active POSIX timers allowed at any one time. */
525 #define GUEST_TIMER_MAX 32
526 static timer_t g_posix_timers[GUEST_TIMER_MAX];
527 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
528 
next_free_host_timer(void)529 static inline int next_free_host_timer(void)
530 {
531     int k;
532     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
533         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
534             return k;
535         }
536     }
537     return -1;
538 }
539 
free_host_timer_slot(int id)540 static inline void free_host_timer_slot(int id)
541 {
542     qatomic_store_release(g_posix_timer_allocated + id, 0);
543 }
544 #endif
545 
host_to_target_errno(int host_errno)546 static inline int host_to_target_errno(int host_errno)
547 {
548     switch (host_errno) {
549 #define E(X)  case X: return TARGET_##X;
550 #include "errnos.c.inc"
551 #undef E
552     default:
553         return host_errno;
554     }
555 }
556 
target_to_host_errno(int target_errno)557 static inline int target_to_host_errno(int target_errno)
558 {
559     switch (target_errno) {
560 #define E(X)  case TARGET_##X: return X;
561 #include "errnos.c.inc"
562 #undef E
563     default:
564         return target_errno;
565     }
566 }
567 
get_errno(abi_long ret)568 abi_long get_errno(abi_long ret)
569 {
570     if (ret == -1)
571         return -host_to_target_errno(errno);
572     else
573         return ret;
574 }
575 
target_strerror(int err)576 const char *target_strerror(int err)
577 {
578     if (err == QEMU_ERESTARTSYS) {
579         return "To be restarted";
580     }
581     if (err == QEMU_ESIGRETURN) {
582         return "Successful exit from sigreturn";
583     }
584 
585     return strerror(target_to_host_errno(err));
586 }
587 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)588 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
589 {
590     int i;
591     uint8_t b;
592     if (usize <= ksize) {
593         return 1;
594     }
595     for (i = ksize; i < usize; i++) {
596         if (get_user_u8(b, addr + i)) {
597             return -TARGET_EFAULT;
598         }
599         if (b != 0) {
600             return 0;
601         }
602     }
603     return 1;
604 }
605 
606 /*
607  * Copies a target struct to a host struct, in a way that guarantees
608  * backwards-compatibility for struct syscall arguments.
609  *
610  * Similar to kernels uaccess.h:copy_struct_from_user()
611  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)612 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
613 {
614     size_t size = MIN(ksize, usize);
615     size_t rest = MAX(ksize, usize) - size;
616 
617     /* Deal with trailing bytes. */
618     if (usize < ksize) {
619         memset(dst + size, 0, rest);
620     } else if (usize > ksize) {
621         int ret = check_zeroed_user(src, ksize, usize);
622         if (ret <= 0) {
623             return ret ?: -TARGET_E2BIG;
624         }
625     }
626     /* Copy the interoperable parts of the struct. */
627     if (copy_from_user(dst, src, size)) {
628         return -TARGET_EFAULT;
629     }
630     return 0;
631 }
632 
633 #define safe_syscall0(type, name) \
634 static type safe_##name(void) \
635 { \
636     return safe_syscall(__NR_##name); \
637 }
638 
639 #define safe_syscall1(type, name, type1, arg1) \
640 static type safe_##name(type1 arg1) \
641 { \
642     return safe_syscall(__NR_##name, arg1); \
643 }
644 
645 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
646 static type safe_##name(type1 arg1, type2 arg2) \
647 { \
648     return safe_syscall(__NR_##name, arg1, arg2); \
649 }
650 
651 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
652 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
653 { \
654     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
655 }
656 
657 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
658     type4, arg4) \
659 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
660 { \
661     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
662 }
663 
664 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
665     type4, arg4, type5, arg5) \
666 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
667     type5 arg5) \
668 { \
669     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
670 }
671 
672 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
673     type4, arg4, type5, arg5, type6, arg6) \
674 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
675     type5 arg5, type6 arg6) \
676 { \
677     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
678 }
679 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)680 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
681 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
682 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
683               int, flags, mode_t, mode)
684 
685 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
686               const struct open_how_ver0 *, how, size_t, size)
687 
688 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
689 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
690               struct rusage *, rusage)
691 #endif
692 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
693               int, options, struct rusage *, rusage)
694 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
695 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
696               char **, argv, char **, envp, int, flags)
697 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
698     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
699 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
700               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
701 #endif
702 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
703 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
704               struct timespec *, tsp, const sigset_t *, sigmask,
705               size_t, sigsetsize)
706 #endif
707 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
708               int, maxevents, int, timeout, const sigset_t *, sigmask,
709               size_t, sigsetsize)
710 #if defined(__NR_futex)
711 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
712               const struct timespec *,timeout,int *,uaddr2,int,val3)
713 #endif
714 #if defined(__NR_futex_time64)
715 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
716               const struct timespec *,timeout,int *,uaddr2,int,val3)
717 #endif
718 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
719 safe_syscall2(int, kill, pid_t, pid, int, sig)
720 safe_syscall2(int, tkill, int, tid, int, sig)
721 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
722 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
723 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
724 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
725               unsigned long, pos_l, unsigned long, pos_h)
726 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
727               unsigned long, pos_l, unsigned long, pos_h)
728 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
729               socklen_t, addrlen)
730 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
731               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
732 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
733               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
734 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
735 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
736 safe_syscall2(int, flock, int, fd, int, operation)
737 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
738 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
739               const struct timespec *, uts, size_t, sigsetsize)
740 #endif
741 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
742               int, flags)
743 #if defined(TARGET_NR_nanosleep)
744 safe_syscall2(int, nanosleep, const struct timespec *, req,
745               struct timespec *, rem)
746 #endif
747 #if defined(TARGET_NR_clock_nanosleep) || \
748     defined(TARGET_NR_clock_nanosleep_time64)
749 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
750               const struct timespec *, req, struct timespec *, rem)
751 #endif
752 #ifdef __NR_ipc
753 #ifdef __s390x__
754 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
755               void *, ptr)
756 #else
757 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
758               void *, ptr, long, fifth)
759 #endif
760 #endif
761 #ifdef __NR_msgsnd
762 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
763               int, flags)
764 #endif
765 #ifdef __NR_msgrcv
766 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
767               long, msgtype, int, flags)
768 #endif
769 #ifdef __NR_semtimedop
770 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
771               unsigned, nsops, const struct timespec *, timeout)
772 #endif
773 #if defined(TARGET_NR_mq_timedsend) || \
774     defined(TARGET_NR_mq_timedsend_time64)
775 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
776               size_t, len, unsigned, prio, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_mq_timedreceive) || \
779     defined(TARGET_NR_mq_timedreceive_time64)
780 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
781               size_t, len, unsigned *, prio, const struct timespec *, timeout)
782 #endif
783 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
784 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
785               int, outfd, loff_t *, poutoff, size_t, length,
786               unsigned int, flags)
787 #endif
788 
789 /* We do ioctl like this rather than via safe_syscall3 to preserve the
790  * "third argument might be integer or pointer or not present" behaviour of
791  * the libc function.
792  */
793 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
794 /* Similarly for fcntl. Since we always build with LFS enabled,
795  * we should be using the 64-bit structures automatically.
796  */
797 #ifdef __NR_fcntl64
798 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
799 #else
800 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
801 #endif
802 
803 static inline int host_to_target_sock_type(int host_type)
804 {
805     int target_type;
806 
807     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
808     case SOCK_DGRAM:
809         target_type = TARGET_SOCK_DGRAM;
810         break;
811     case SOCK_STREAM:
812         target_type = TARGET_SOCK_STREAM;
813         break;
814     default:
815         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
816         break;
817     }
818 
819 #if defined(SOCK_CLOEXEC)
820     if (host_type & SOCK_CLOEXEC) {
821         target_type |= TARGET_SOCK_CLOEXEC;
822     }
823 #endif
824 
825 #if defined(SOCK_NONBLOCK)
826     if (host_type & SOCK_NONBLOCK) {
827         target_type |= TARGET_SOCK_NONBLOCK;
828     }
829 #endif
830 
831     return target_type;
832 }
833 
834 static abi_ulong target_brk, initial_target_brk;
835 
target_set_brk(abi_ulong new_brk)836 void target_set_brk(abi_ulong new_brk)
837 {
838     target_brk = TARGET_PAGE_ALIGN(new_brk);
839     initial_target_brk = target_brk;
840 }
841 
842 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)843 abi_long do_brk(abi_ulong brk_val)
844 {
845     abi_long mapped_addr;
846     abi_ulong new_brk;
847     abi_ulong old_brk;
848 
849     /* brk pointers are always untagged */
850 
851     /* do not allow to shrink below initial brk value */
852     if (brk_val < initial_target_brk) {
853         return target_brk;
854     }
855 
856     new_brk = TARGET_PAGE_ALIGN(brk_val);
857     old_brk = TARGET_PAGE_ALIGN(target_brk);
858 
859     /* new and old target_brk might be on the same page */
860     if (new_brk == old_brk) {
861         target_brk = brk_val;
862         return target_brk;
863     }
864 
865     /* Release heap if necessary */
866     if (new_brk < old_brk) {
867         target_munmap(new_brk, old_brk - new_brk);
868 
869         target_brk = brk_val;
870         return target_brk;
871     }
872 
873     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
874                               PROT_READ | PROT_WRITE,
875                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
876                               -1, 0);
877 
878     if (mapped_addr == old_brk) {
879         target_brk = brk_val;
880         return target_brk;
881     }
882 
883 #if defined(TARGET_ALPHA)
884     /* We (partially) emulate OSF/1 on Alpha, which requires we
885        return a proper errno, not an unchanged brk value.  */
886     return -TARGET_ENOMEM;
887 #endif
888     /* For everything else, return the previous break. */
889     return target_brk;
890 }
891 
892 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
893     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)894 static inline abi_long copy_from_user_fdset(fd_set *fds,
895                                             abi_ulong target_fds_addr,
896                                             int n)
897 {
898     int i, nw, j, k;
899     abi_ulong b, *target_fds;
900 
901     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
902     if (!(target_fds = lock_user(VERIFY_READ,
903                                  target_fds_addr,
904                                  sizeof(abi_ulong) * nw,
905                                  1)))
906         return -TARGET_EFAULT;
907 
908     FD_ZERO(fds);
909     k = 0;
910     for (i = 0; i < nw; i++) {
911         /* grab the abi_ulong */
912         __get_user(b, &target_fds[i]);
913         for (j = 0; j < TARGET_ABI_BITS; j++) {
914             /* check the bit inside the abi_ulong */
915             if ((b >> j) & 1)
916                 FD_SET(k, fds);
917             k++;
918         }
919     }
920 
921     unlock_user(target_fds, target_fds_addr, 0);
922 
923     return 0;
924 }
925 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)926 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
927                                                  abi_ulong target_fds_addr,
928                                                  int n)
929 {
930     if (target_fds_addr) {
931         if (copy_from_user_fdset(fds, target_fds_addr, n))
932             return -TARGET_EFAULT;
933         *fds_ptr = fds;
934     } else {
935         *fds_ptr = NULL;
936     }
937     return 0;
938 }
939 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)940 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
941                                           const fd_set *fds,
942                                           int n)
943 {
944     int i, nw, j, k;
945     abi_long v;
946     abi_ulong *target_fds;
947 
948     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
949     if (!(target_fds = lock_user(VERIFY_WRITE,
950                                  target_fds_addr,
951                                  sizeof(abi_ulong) * nw,
952                                  0)))
953         return -TARGET_EFAULT;
954 
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         v = 0;
958         for (j = 0; j < TARGET_ABI_BITS; j++) {
959             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
960             k++;
961         }
962         __put_user(v, &target_fds[i]);
963     }
964 
965     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
966 
967     return 0;
968 }
969 #endif
970 
971 #if defined(__alpha__)
972 #define HOST_HZ 1024
973 #else
974 #define HOST_HZ 100
975 #endif
976 
host_to_target_clock_t(long ticks)977 static inline abi_long host_to_target_clock_t(long ticks)
978 {
979 #if HOST_HZ == TARGET_HZ
980     return ticks;
981 #else
982     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
983 #endif
984 }
985 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)986 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
987                                              const struct rusage *rusage)
988 {
989     struct target_rusage *target_rusage;
990 
991     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
992         return -TARGET_EFAULT;
993     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
994     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
995     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
996     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
997     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
998     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
999     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1000     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1001     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1002     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1003     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1004     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1005     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1006     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1007     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1008     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1009     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1010     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1011     unlock_user_struct(target_rusage, target_addr, 1);
1012 
1013     return 0;
1014 }
1015 
1016 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1017 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1018 {
1019     abi_ulong target_rlim_swap;
1020     rlim_t result;
1021 
1022     target_rlim_swap = tswapal(target_rlim);
1023     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1024         return RLIM_INFINITY;
1025 
1026     result = target_rlim_swap;
1027     if (target_rlim_swap != (rlim_t)result)
1028         return RLIM_INFINITY;
1029 
1030     return result;
1031 }
1032 #endif
1033 
1034 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1035 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1036 {
1037     abi_ulong target_rlim_swap;
1038     abi_ulong result;
1039 
1040     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1041         target_rlim_swap = TARGET_RLIM_INFINITY;
1042     else
1043         target_rlim_swap = rlim;
1044     result = tswapal(target_rlim_swap);
1045 
1046     return result;
1047 }
1048 #endif
1049 
target_to_host_resource(int code)1050 static inline int target_to_host_resource(int code)
1051 {
1052     switch (code) {
1053     case TARGET_RLIMIT_AS:
1054         return RLIMIT_AS;
1055     case TARGET_RLIMIT_CORE:
1056         return RLIMIT_CORE;
1057     case TARGET_RLIMIT_CPU:
1058         return RLIMIT_CPU;
1059     case TARGET_RLIMIT_DATA:
1060         return RLIMIT_DATA;
1061     case TARGET_RLIMIT_FSIZE:
1062         return RLIMIT_FSIZE;
1063     case TARGET_RLIMIT_LOCKS:
1064         return RLIMIT_LOCKS;
1065     case TARGET_RLIMIT_MEMLOCK:
1066         return RLIMIT_MEMLOCK;
1067     case TARGET_RLIMIT_MSGQUEUE:
1068         return RLIMIT_MSGQUEUE;
1069     case TARGET_RLIMIT_NICE:
1070         return RLIMIT_NICE;
1071     case TARGET_RLIMIT_NOFILE:
1072         return RLIMIT_NOFILE;
1073     case TARGET_RLIMIT_NPROC:
1074         return RLIMIT_NPROC;
1075     case TARGET_RLIMIT_RSS:
1076         return RLIMIT_RSS;
1077     case TARGET_RLIMIT_RTPRIO:
1078         return RLIMIT_RTPRIO;
1079 #ifdef RLIMIT_RTTIME
1080     case TARGET_RLIMIT_RTTIME:
1081         return RLIMIT_RTTIME;
1082 #endif
1083     case TARGET_RLIMIT_SIGPENDING:
1084         return RLIMIT_SIGPENDING;
1085     case TARGET_RLIMIT_STACK:
1086         return RLIMIT_STACK;
1087     default:
1088         return code;
1089     }
1090 }
1091 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1092 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1093                                               abi_ulong target_tv_addr)
1094 {
1095     struct target_timeval *target_tv;
1096 
1097     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1098         return -TARGET_EFAULT;
1099     }
1100 
1101     __get_user(tv->tv_sec, &target_tv->tv_sec);
1102     __get_user(tv->tv_usec, &target_tv->tv_usec);
1103 
1104     unlock_user_struct(target_tv, target_tv_addr, 0);
1105 
1106     return 0;
1107 }
1108 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1109 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1110                                             const struct timeval *tv)
1111 {
1112     struct target_timeval *target_tv;
1113 
1114     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1115         return -TARGET_EFAULT;
1116     }
1117 
1118     __put_user(tv->tv_sec, &target_tv->tv_sec);
1119     __put_user(tv->tv_usec, &target_tv->tv_usec);
1120 
1121     unlock_user_struct(target_tv, target_tv_addr, 1);
1122 
1123     return 0;
1124 }
1125 
1126 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1127 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1128                                                 abi_ulong target_tv_addr)
1129 {
1130     struct target__kernel_sock_timeval *target_tv;
1131 
1132     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1133         return -TARGET_EFAULT;
1134     }
1135 
1136     __get_user(tv->tv_sec, &target_tv->tv_sec);
1137     __get_user(tv->tv_usec, &target_tv->tv_usec);
1138 
1139     unlock_user_struct(target_tv, target_tv_addr, 0);
1140 
1141     return 0;
1142 }
1143 #endif
1144 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1145 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1146                                               const struct timeval *tv)
1147 {
1148     struct target__kernel_sock_timeval *target_tv;
1149 
1150     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1151         return -TARGET_EFAULT;
1152     }
1153 
1154     __put_user(tv->tv_sec, &target_tv->tv_sec);
1155     __put_user(tv->tv_usec, &target_tv->tv_usec);
1156 
1157     unlock_user_struct(target_tv, target_tv_addr, 1);
1158 
1159     return 0;
1160 }
1161 
1162 #if defined(TARGET_NR_futex) || \
1163     defined(TARGET_NR_rt_sigtimedwait) || \
1164     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1165     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1166     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1167     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1168     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1169     defined(TARGET_NR_timer_settime) || \
1170     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1171 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1172                                                abi_ulong target_addr)
1173 {
1174     struct target_timespec *target_ts;
1175 
1176     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1177         return -TARGET_EFAULT;
1178     }
1179     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1180     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1181     unlock_user_struct(target_ts, target_addr, 0);
1182     return 0;
1183 }
1184 #endif
1185 
1186 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1187     defined(TARGET_NR_timer_settime64) || \
1188     defined(TARGET_NR_mq_timedsend_time64) || \
1189     defined(TARGET_NR_mq_timedreceive_time64) || \
1190     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1191     defined(TARGET_NR_clock_nanosleep_time64) || \
1192     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1193     defined(TARGET_NR_utimensat) || \
1194     defined(TARGET_NR_utimensat_time64) || \
1195     defined(TARGET_NR_semtimedop_time64) || \
1196     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1197 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1198                                                  abi_ulong target_addr)
1199 {
1200     struct target__kernel_timespec *target_ts;
1201 
1202     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1203         return -TARGET_EFAULT;
1204     }
1205     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1206     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1207     /* in 32bit mode, this drops the padding */
1208     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1209     unlock_user_struct(target_ts, target_addr, 0);
1210     return 0;
1211 }
1212 #endif
1213 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1214 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1215                                                struct timespec *host_ts)
1216 {
1217     struct target_timespec *target_ts;
1218 
1219     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1220         return -TARGET_EFAULT;
1221     }
1222     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1223     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1224     unlock_user_struct(target_ts, target_addr, 1);
1225     return 0;
1226 }
1227 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1228 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1229                                                  struct timespec *host_ts)
1230 {
1231     struct target__kernel_timespec *target_ts;
1232 
1233     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1234         return -TARGET_EFAULT;
1235     }
1236     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1237     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1238     unlock_user_struct(target_ts, target_addr, 1);
1239     return 0;
1240 }
1241 
1242 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1243 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1244                                              struct timezone *tz)
1245 {
1246     struct target_timezone *target_tz;
1247 
1248     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1249         return -TARGET_EFAULT;
1250     }
1251 
1252     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1253     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1254 
1255     unlock_user_struct(target_tz, target_tz_addr, 1);
1256 
1257     return 0;
1258 }
1259 #endif
1260 
1261 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1262 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1263                                                abi_ulong target_tz_addr)
1264 {
1265     struct target_timezone *target_tz;
1266 
1267     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1268         return -TARGET_EFAULT;
1269     }
1270 
1271     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1272     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1273 
1274     unlock_user_struct(target_tz, target_tz_addr, 0);
1275 
1276     return 0;
1277 }
1278 #endif
1279 
1280 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1281 #include <mqueue.h>
1282 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1283 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1284                                               abi_ulong target_mq_attr_addr)
1285 {
1286     struct target_mq_attr *target_mq_attr;
1287 
1288     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1289                           target_mq_attr_addr, 1))
1290         return -TARGET_EFAULT;
1291 
1292     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1293     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1294     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1295     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1296 
1297     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1298 
1299     return 0;
1300 }
1301 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1302 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1303                                             const struct mq_attr *attr)
1304 {
1305     struct target_mq_attr *target_mq_attr;
1306 
1307     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1308                           target_mq_attr_addr, 0))
1309         return -TARGET_EFAULT;
1310 
1311     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1312     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1313     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1314     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1315 
1316     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1317 
1318     return 0;
1319 }
1320 #endif
1321 
1322 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1323 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1324 static abi_long do_select(int n,
1325                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1326                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1327 {
1328     fd_set rfds, wfds, efds;
1329     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1330     struct timeval tv;
1331     struct timespec ts, *ts_ptr;
1332     abi_long ret;
1333 
1334     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1335     if (ret) {
1336         return ret;
1337     }
1338     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346 
1347     if (target_tv_addr) {
1348         if (copy_from_user_timeval(&tv, target_tv_addr))
1349             return -TARGET_EFAULT;
1350         ts.tv_sec = tv.tv_sec;
1351         ts.tv_nsec = tv.tv_usec * 1000;
1352         ts_ptr = &ts;
1353     } else {
1354         ts_ptr = NULL;
1355     }
1356 
1357     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1358                                   ts_ptr, NULL));
1359 
1360     if (!is_error(ret)) {
1361         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1362             return -TARGET_EFAULT;
1363         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1364             return -TARGET_EFAULT;
1365         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1366             return -TARGET_EFAULT;
1367 
1368         if (target_tv_addr) {
1369             tv.tv_sec = ts.tv_sec;
1370             tv.tv_usec = ts.tv_nsec / 1000;
1371             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1372                 return -TARGET_EFAULT;
1373             }
1374         }
1375     }
1376 
1377     return ret;
1378 }
1379 
1380 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1381 static abi_long do_old_select(abi_ulong arg1)
1382 {
1383     struct target_sel_arg_struct *sel;
1384     abi_ulong inp, outp, exp, tvp;
1385     long nsel;
1386 
1387     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1388         return -TARGET_EFAULT;
1389     }
1390 
1391     nsel = tswapal(sel->n);
1392     inp = tswapal(sel->inp);
1393     outp = tswapal(sel->outp);
1394     exp = tswapal(sel->exp);
1395     tvp = tswapal(sel->tvp);
1396 
1397     unlock_user_struct(sel, arg1, 0);
1398 
1399     return do_select(nsel, inp, outp, exp, tvp);
1400 }
1401 #endif
1402 #endif
1403 
1404 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1405 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1406                             abi_long arg4, abi_long arg5, abi_long arg6,
1407                             bool time64)
1408 {
1409     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1410     fd_set rfds, wfds, efds;
1411     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1412     struct timespec ts, *ts_ptr;
1413     abi_long ret;
1414 
1415     /*
1416      * The 6th arg is actually two args smashed together,
1417      * so we cannot use the C library.
1418      */
1419     struct {
1420         sigset_t *set;
1421         size_t size;
1422     } sig, *sig_ptr;
1423 
1424     abi_ulong arg_sigset, arg_sigsize, *arg7;
1425 
1426     n = arg1;
1427     rfd_addr = arg2;
1428     wfd_addr = arg3;
1429     efd_addr = arg4;
1430     ts_addr = arg5;
1431 
1432     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1433     if (ret) {
1434         return ret;
1435     }
1436     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444 
1445     /*
1446      * This takes a timespec, and not a timeval, so we cannot
1447      * use the do_select() helper ...
1448      */
1449     if (ts_addr) {
1450         if (time64) {
1451             if (target_to_host_timespec64(&ts, ts_addr)) {
1452                 return -TARGET_EFAULT;
1453             }
1454         } else {
1455             if (target_to_host_timespec(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         }
1459             ts_ptr = &ts;
1460     } else {
1461         ts_ptr = NULL;
1462     }
1463 
1464     /* Extract the two packed args for the sigset */
1465     sig_ptr = NULL;
1466     if (arg6) {
1467         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1468         if (!arg7) {
1469             return -TARGET_EFAULT;
1470         }
1471         arg_sigset = tswapal(arg7[0]);
1472         arg_sigsize = tswapal(arg7[1]);
1473         unlock_user(arg7, arg6, 0);
1474 
1475         if (arg_sigset) {
1476             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1477             if (ret != 0) {
1478                 return ret;
1479             }
1480             sig_ptr = &sig;
1481             sig.size = SIGSET_T_SIZE;
1482         }
1483     }
1484 
1485     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486                                   ts_ptr, sig_ptr));
1487 
1488     if (sig_ptr) {
1489         finish_sigsuspend_mask(ret);
1490     }
1491 
1492     if (!is_error(ret)) {
1493         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1494             return -TARGET_EFAULT;
1495         }
1496         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1497             return -TARGET_EFAULT;
1498         }
1499         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1500             return -TARGET_EFAULT;
1501         }
1502         if (time64) {
1503             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1504                 return -TARGET_EFAULT;
1505             }
1506         } else {
1507             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         }
1511     }
1512     return ret;
1513 }
1514 #endif
1515 
1516 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1517     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1518 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1519                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1520 {
1521     struct target_pollfd *target_pfd;
1522     unsigned int nfds = arg2;
1523     struct pollfd *pfd;
1524     unsigned int i;
1525     abi_long ret;
1526 
1527     pfd = NULL;
1528     target_pfd = NULL;
1529     if (nfds) {
1530         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1531             return -TARGET_EINVAL;
1532         }
1533         target_pfd = lock_user(VERIFY_WRITE, arg1,
1534                                sizeof(struct target_pollfd) * nfds, 1);
1535         if (!target_pfd) {
1536             return -TARGET_EFAULT;
1537         }
1538 
1539         pfd = alloca(sizeof(struct pollfd) * nfds);
1540         for (i = 0; i < nfds; i++) {
1541             pfd[i].fd = tswap32(target_pfd[i].fd);
1542             pfd[i].events = tswap16(target_pfd[i].events);
1543         }
1544     }
1545     if (ppoll) {
1546         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1547         sigset_t *set = NULL;
1548 
1549         if (arg3) {
1550             if (time64) {
1551                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1552                     unlock_user(target_pfd, arg1, 0);
1553                     return -TARGET_EFAULT;
1554                 }
1555             } else {
1556                 if (target_to_host_timespec(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             }
1561         } else {
1562             timeout_ts = NULL;
1563         }
1564 
1565         if (arg4) {
1566             ret = process_sigsuspend_mask(&set, arg4, arg5);
1567             if (ret != 0) {
1568                 unlock_user(target_pfd, arg1, 0);
1569                 return ret;
1570             }
1571         }
1572 
1573         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1574                                    set, SIGSET_T_SIZE));
1575 
1576         if (set) {
1577             finish_sigsuspend_mask(ret);
1578         }
1579         if (!is_error(ret) && arg3) {
1580             if (time64) {
1581                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1582                     return -TARGET_EFAULT;
1583                 }
1584             } else {
1585                 if (host_to_target_timespec(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             }
1589         }
1590     } else {
1591           struct timespec ts, *pts;
1592 
1593           if (arg3 >= 0) {
1594               /* Convert ms to secs, ns */
1595               ts.tv_sec = arg3 / 1000;
1596               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1597               pts = &ts;
1598           } else {
1599               /* -ve poll() timeout means "infinite" */
1600               pts = NULL;
1601           }
1602           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1603     }
1604 
1605     if (!is_error(ret)) {
1606         for (i = 0; i < nfds; i++) {
1607             target_pfd[i].revents = tswap16(pfd[i].revents);
1608         }
1609     }
1610     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1611     return ret;
1612 }
1613 #endif
1614 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1615 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1616                         int flags, int is_pipe2)
1617 {
1618     int host_pipe[2];
1619     abi_long ret;
1620     ret = pipe2(host_pipe, flags);
1621 
1622     if (is_error(ret))
1623         return get_errno(ret);
1624 
1625     /* Several targets have special calling conventions for the original
1626        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1627     if (!is_pipe2) {
1628 #if defined(TARGET_ALPHA)
1629         cpu_env->ir[IR_A4] = host_pipe[1];
1630         return host_pipe[0];
1631 #elif defined(TARGET_MIPS)
1632         cpu_env->active_tc.gpr[3] = host_pipe[1];
1633         return host_pipe[0];
1634 #elif defined(TARGET_SH4)
1635         cpu_env->gregs[1] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_SPARC)
1638         cpu_env->regwptr[1] = host_pipe[1];
1639         return host_pipe[0];
1640 #endif
1641     }
1642 
1643     if (put_user_s32(host_pipe[0], pipedes)
1644         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1645         return -TARGET_EFAULT;
1646     return get_errno(ret);
1647 }
1648 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1649 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1650                                                abi_ulong target_addr,
1651                                                socklen_t len)
1652 {
1653     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1654     sa_family_t sa_family;
1655     struct target_sockaddr *target_saddr;
1656 
1657     if (fd_trans_target_to_host_addr(fd)) {
1658         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1659     }
1660 
1661     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1662     if (!target_saddr)
1663         return -TARGET_EFAULT;
1664 
1665     sa_family = tswap16(target_saddr->sa_family);
1666 
1667     /* Oops. The caller might send a incomplete sun_path; sun_path
1668      * must be terminated by \0 (see the manual page), but
1669      * unfortunately it is quite common to specify sockaddr_un
1670      * length as "strlen(x->sun_path)" while it should be
1671      * "strlen(...) + 1". We'll fix that here if needed.
1672      * Linux kernel has a similar feature.
1673      */
1674 
1675     if (sa_family == AF_UNIX) {
1676         if (len < unix_maxlen && len > 0) {
1677             char *cp = (char*)target_saddr;
1678 
1679             if ( cp[len-1] && !cp[len] )
1680                 len++;
1681         }
1682         if (len > unix_maxlen)
1683             len = unix_maxlen;
1684     }
1685 
1686     memcpy(addr, target_saddr, len);
1687     addr->sa_family = sa_family;
1688     if (sa_family == AF_NETLINK) {
1689         struct sockaddr_nl *nladdr;
1690 
1691         nladdr = (struct sockaddr_nl *)addr;
1692         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1693         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1694     } else if (sa_family == AF_PACKET) {
1695 	struct target_sockaddr_ll *lladdr;
1696 
1697 	lladdr = (struct target_sockaddr_ll *)addr;
1698 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1699 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1700     } else if (sa_family == AF_INET6) {
1701         struct sockaddr_in6 *in6addr;
1702 
1703         in6addr = (struct sockaddr_in6 *)addr;
1704         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1705     }
1706     unlock_user(target_saddr, target_addr, 0);
1707 
1708     return 0;
1709 }
1710 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1711 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1712                                                struct sockaddr *addr,
1713                                                socklen_t len)
1714 {
1715     struct target_sockaddr *target_saddr;
1716 
1717     if (len == 0) {
1718         return 0;
1719     }
1720     assert(addr);
1721 
1722     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1723     if (!target_saddr)
1724         return -TARGET_EFAULT;
1725     memcpy(target_saddr, addr, len);
1726     if (len >= offsetof(struct target_sockaddr, sa_family) +
1727         sizeof(target_saddr->sa_family)) {
1728         target_saddr->sa_family = tswap16(addr->sa_family);
1729     }
1730     if (addr->sa_family == AF_NETLINK &&
1731         len >= sizeof(struct target_sockaddr_nl)) {
1732         struct target_sockaddr_nl *target_nl =
1733                (struct target_sockaddr_nl *)target_saddr;
1734         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1735         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1736     } else if (addr->sa_family == AF_PACKET) {
1737         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1738         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1739         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1740     } else if (addr->sa_family == AF_INET6 &&
1741                len >= sizeof(struct target_sockaddr_in6)) {
1742         struct target_sockaddr_in6 *target_in6 =
1743                (struct target_sockaddr_in6 *)target_saddr;
1744         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1745     }
1746     unlock_user(target_saddr, target_addr, len);
1747 
1748     return 0;
1749 }
1750 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1751 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1752                                            struct target_msghdr *target_msgh)
1753 {
1754     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1755     abi_long msg_controllen;
1756     abi_ulong target_cmsg_addr;
1757     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1758     socklen_t space = 0;
1759 
1760     msg_controllen = tswapal(target_msgh->msg_controllen);
1761     if (msg_controllen < sizeof (struct target_cmsghdr))
1762         goto the_end;
1763     target_cmsg_addr = tswapal(target_msgh->msg_control);
1764     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1765     target_cmsg_start = target_cmsg;
1766     if (!target_cmsg)
1767         return -TARGET_EFAULT;
1768 
1769     while (cmsg && target_cmsg) {
1770         void *data = CMSG_DATA(cmsg);
1771         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1772 
1773         int len = tswapal(target_cmsg->cmsg_len)
1774             - sizeof(struct target_cmsghdr);
1775 
1776         space += CMSG_SPACE(len);
1777         if (space > msgh->msg_controllen) {
1778             space -= CMSG_SPACE(len);
1779             /* This is a QEMU bug, since we allocated the payload
1780              * area ourselves (unlike overflow in host-to-target
1781              * conversion, which is just the guest giving us a buffer
1782              * that's too small). It can't happen for the payload types
1783              * we currently support; if it becomes an issue in future
1784              * we would need to improve our allocation strategy to
1785              * something more intelligent than "twice the size of the
1786              * target buffer we're reading from".
1787              */
1788             qemu_log_mask(LOG_UNIMP,
1789                           ("Unsupported ancillary data %d/%d: "
1790                            "unhandled msg size\n"),
1791                           tswap32(target_cmsg->cmsg_level),
1792                           tswap32(target_cmsg->cmsg_type));
1793             break;
1794         }
1795 
1796         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1797             cmsg->cmsg_level = SOL_SOCKET;
1798         } else {
1799             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1800         }
1801         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1802         cmsg->cmsg_len = CMSG_LEN(len);
1803 
1804         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1805             int *fd = (int *)data;
1806             int *target_fd = (int *)target_data;
1807             int i, numfds = len / sizeof(int);
1808 
1809             for (i = 0; i < numfds; i++) {
1810                 __get_user(fd[i], target_fd + i);
1811             }
1812         } else if (cmsg->cmsg_level == SOL_SOCKET
1813                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1814             struct ucred *cred = (struct ucred *)data;
1815             struct target_ucred *target_cred =
1816                 (struct target_ucred *)target_data;
1817 
1818             __get_user(cred->pid, &target_cred->pid);
1819             __get_user(cred->uid, &target_cred->uid);
1820             __get_user(cred->gid, &target_cred->gid);
1821         } else if (cmsg->cmsg_level == SOL_ALG) {
1822             uint32_t *dst = (uint32_t *)data;
1823 
1824             memcpy(dst, target_data, len);
1825             /* fix endianness of first 32-bit word */
1826             if (len >= sizeof(uint32_t)) {
1827                 *dst = tswap32(*dst);
1828             }
1829         } else {
1830             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1831                           cmsg->cmsg_level, cmsg->cmsg_type);
1832             memcpy(data, target_data, len);
1833         }
1834 
1835         cmsg = CMSG_NXTHDR(msgh, cmsg);
1836         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1837                                          target_cmsg_start);
1838     }
1839     unlock_user(target_cmsg, target_cmsg_addr, 0);
1840  the_end:
1841     msgh->msg_controllen = space;
1842     return 0;
1843 }
1844 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1845 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1846                                            struct msghdr *msgh)
1847 {
1848     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1849     abi_long msg_controllen;
1850     abi_ulong target_cmsg_addr;
1851     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1852     socklen_t space = 0;
1853 
1854     msg_controllen = tswapal(target_msgh->msg_controllen);
1855     if (msg_controllen < sizeof (struct target_cmsghdr))
1856         goto the_end;
1857     target_cmsg_addr = tswapal(target_msgh->msg_control);
1858     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1859     target_cmsg_start = target_cmsg;
1860     if (!target_cmsg)
1861         return -TARGET_EFAULT;
1862 
1863     while (cmsg && target_cmsg) {
1864         void *data = CMSG_DATA(cmsg);
1865         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1866 
1867         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1868         int tgt_len, tgt_space;
1869 
1870         /* We never copy a half-header but may copy half-data;
1871          * this is Linux's behaviour in put_cmsg(). Note that
1872          * truncation here is a guest problem (which we report
1873          * to the guest via the CTRUNC bit), unlike truncation
1874          * in target_to_host_cmsg, which is a QEMU bug.
1875          */
1876         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1877             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1878             break;
1879         }
1880 
1881         if (cmsg->cmsg_level == SOL_SOCKET) {
1882             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1883         } else {
1884             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1885         }
1886         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1887 
1888         /* Payload types which need a different size of payload on
1889          * the target must adjust tgt_len here.
1890          */
1891         tgt_len = len;
1892         switch (cmsg->cmsg_level) {
1893         case SOL_SOCKET:
1894             switch (cmsg->cmsg_type) {
1895             case SO_TIMESTAMP:
1896                 tgt_len = sizeof(struct target_timeval);
1897                 break;
1898             default:
1899                 break;
1900             }
1901             break;
1902         default:
1903             break;
1904         }
1905 
1906         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1907             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1908             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1909         }
1910 
1911         /* We must now copy-and-convert len bytes of payload
1912          * into tgt_len bytes of destination space. Bear in mind
1913          * that in both source and destination we may be dealing
1914          * with a truncated value!
1915          */
1916         switch (cmsg->cmsg_level) {
1917         case SOL_SOCKET:
1918             switch (cmsg->cmsg_type) {
1919             case SCM_RIGHTS:
1920             {
1921                 int *fd = (int *)data;
1922                 int *target_fd = (int *)target_data;
1923                 int i, numfds = tgt_len / sizeof(int);
1924 
1925                 for (i = 0; i < numfds; i++) {
1926                     __put_user(fd[i], target_fd + i);
1927                 }
1928                 break;
1929             }
1930             case SO_TIMESTAMP:
1931             {
1932                 struct timeval *tv = (struct timeval *)data;
1933                 struct target_timeval *target_tv =
1934                     (struct target_timeval *)target_data;
1935 
1936                 if (len != sizeof(struct timeval) ||
1937                     tgt_len != sizeof(struct target_timeval)) {
1938                     goto unimplemented;
1939                 }
1940 
1941                 /* copy struct timeval to target */
1942                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1943                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1944                 break;
1945             }
1946             case SCM_CREDENTIALS:
1947             {
1948                 struct ucred *cred = (struct ucred *)data;
1949                 struct target_ucred *target_cred =
1950                     (struct target_ucred *)target_data;
1951 
1952                 __put_user(cred->pid, &target_cred->pid);
1953                 __put_user(cred->uid, &target_cred->uid);
1954                 __put_user(cred->gid, &target_cred->gid);
1955                 break;
1956             }
1957             default:
1958                 goto unimplemented;
1959             }
1960             break;
1961 
1962         case SOL_IP:
1963             switch (cmsg->cmsg_type) {
1964             case IP_TTL:
1965             {
1966                 uint32_t *v = (uint32_t *)data;
1967                 uint32_t *t_int = (uint32_t *)target_data;
1968 
1969                 if (len != sizeof(uint32_t) ||
1970                     tgt_len != sizeof(uint32_t)) {
1971                     goto unimplemented;
1972                 }
1973                 __put_user(*v, t_int);
1974                 break;
1975             }
1976             case IP_RECVERR:
1977             {
1978                 struct errhdr_t {
1979                    struct sock_extended_err ee;
1980                    struct sockaddr_in offender;
1981                 };
1982                 struct errhdr_t *errh = (struct errhdr_t *)data;
1983                 struct errhdr_t *target_errh =
1984                     (struct errhdr_t *)target_data;
1985 
1986                 if (len != sizeof(struct errhdr_t) ||
1987                     tgt_len != sizeof(struct errhdr_t)) {
1988                     goto unimplemented;
1989                 }
1990                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1991                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1992                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1993                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1994                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1995                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1996                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1997                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1998                     (void *) &errh->offender, sizeof(errh->offender));
1999                 break;
2000             }
2001             default:
2002                 goto unimplemented;
2003             }
2004             break;
2005 
2006         case SOL_IPV6:
2007             switch (cmsg->cmsg_type) {
2008             case IPV6_HOPLIMIT:
2009             {
2010                 uint32_t *v = (uint32_t *)data;
2011                 uint32_t *t_int = (uint32_t *)target_data;
2012 
2013                 if (len != sizeof(uint32_t) ||
2014                     tgt_len != sizeof(uint32_t)) {
2015                     goto unimplemented;
2016                 }
2017                 __put_user(*v, t_int);
2018                 break;
2019             }
2020             case IPV6_RECVERR:
2021             {
2022                 struct errhdr6_t {
2023                    struct sock_extended_err ee;
2024                    struct sockaddr_in6 offender;
2025                 };
2026                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2027                 struct errhdr6_t *target_errh =
2028                     (struct errhdr6_t *)target_data;
2029 
2030                 if (len != sizeof(struct errhdr6_t) ||
2031                     tgt_len != sizeof(struct errhdr6_t)) {
2032                     goto unimplemented;
2033                 }
2034                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2035                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2036                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2037                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2038                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2039                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2040                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2041                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2042                     (void *) &errh->offender, sizeof(errh->offender));
2043                 break;
2044             }
2045             default:
2046                 goto unimplemented;
2047             }
2048             break;
2049 
2050         default:
2051         unimplemented:
2052             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2053                           cmsg->cmsg_level, cmsg->cmsg_type);
2054             memcpy(target_data, data, MIN(len, tgt_len));
2055             if (tgt_len > len) {
2056                 memset(target_data + len, 0, tgt_len - len);
2057             }
2058         }
2059 
2060         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2061         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2062         if (msg_controllen < tgt_space) {
2063             tgt_space = msg_controllen;
2064         }
2065         msg_controllen -= tgt_space;
2066         space += tgt_space;
2067         cmsg = CMSG_NXTHDR(msgh, cmsg);
2068         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2069                                          target_cmsg_start);
2070     }
2071     unlock_user(target_cmsg, target_cmsg_addr, space);
2072  the_end:
2073     target_msgh->msg_controllen = tswapal(space);
2074     return 0;
2075 }
2076 
2077 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2078 static abi_long do_setsockopt(int sockfd, int level, int optname,
2079                               abi_ulong optval_addr, socklen_t optlen)
2080 {
2081     abi_long ret;
2082     int val;
2083 
2084     switch(level) {
2085     case SOL_TCP:
2086     case SOL_UDP:
2087         /* TCP and UDP options all take an 'int' value.  */
2088         if (optlen < sizeof(uint32_t))
2089             return -TARGET_EINVAL;
2090 
2091         if (get_user_u32(val, optval_addr))
2092             return -TARGET_EFAULT;
2093         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2094         break;
2095     case SOL_IP:
2096         switch(optname) {
2097         case IP_TOS:
2098         case IP_TTL:
2099         case IP_HDRINCL:
2100         case IP_ROUTER_ALERT:
2101         case IP_RECVOPTS:
2102         case IP_RETOPTS:
2103         case IP_PKTINFO:
2104         case IP_MTU_DISCOVER:
2105         case IP_RECVERR:
2106         case IP_RECVTTL:
2107         case IP_RECVTOS:
2108 #ifdef IP_FREEBIND
2109         case IP_FREEBIND:
2110 #endif
2111         case IP_MULTICAST_TTL:
2112         case IP_MULTICAST_LOOP:
2113             val = 0;
2114             if (optlen >= sizeof(uint32_t)) {
2115                 if (get_user_u32(val, optval_addr))
2116                     return -TARGET_EFAULT;
2117             } else if (optlen >= 1) {
2118                 if (get_user_u8(val, optval_addr))
2119                     return -TARGET_EFAULT;
2120             }
2121             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2122             break;
2123         case IP_ADD_MEMBERSHIP:
2124         case IP_DROP_MEMBERSHIP:
2125         {
2126             struct ip_mreqn ip_mreq;
2127             struct target_ip_mreqn *target_smreqn;
2128 
2129             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2130                               sizeof(struct target_ip_mreq));
2131 
2132             if (optlen < sizeof (struct target_ip_mreq) ||
2133                 optlen > sizeof (struct target_ip_mreqn)) {
2134                 return -TARGET_EINVAL;
2135             }
2136 
2137             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2138             if (!target_smreqn) {
2139                 return -TARGET_EFAULT;
2140             }
2141             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2142             ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2143             if (optlen == sizeof(struct target_ip_mreqn)) {
2144                 ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2145                 optlen = sizeof(struct ip_mreqn);
2146             }
2147             unlock_user(target_smreqn, optval_addr, 0);
2148 
2149             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2150             break;
2151         }
2152         case IP_BLOCK_SOURCE:
2153         case IP_UNBLOCK_SOURCE:
2154         case IP_ADD_SOURCE_MEMBERSHIP:
2155         case IP_DROP_SOURCE_MEMBERSHIP:
2156         {
2157             struct ip_mreq_source *ip_mreq_source;
2158 
2159             if (optlen != sizeof (struct target_ip_mreq_source))
2160                 return -TARGET_EINVAL;
2161 
2162             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2163             if (!ip_mreq_source) {
2164                 return -TARGET_EFAULT;
2165             }
2166             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2167             unlock_user (ip_mreq_source, optval_addr, 0);
2168             break;
2169         }
2170         default:
2171             goto unimplemented;
2172         }
2173         break;
2174     case SOL_IPV6:
2175         switch (optname) {
2176         case IPV6_MTU_DISCOVER:
2177         case IPV6_MTU:
2178         case IPV6_V6ONLY:
2179         case IPV6_RECVPKTINFO:
2180         case IPV6_UNICAST_HOPS:
2181         case IPV6_MULTICAST_HOPS:
2182         case IPV6_MULTICAST_LOOP:
2183         case IPV6_RECVERR:
2184         case IPV6_RECVHOPLIMIT:
2185         case IPV6_2292HOPLIMIT:
2186         case IPV6_CHECKSUM:
2187         case IPV6_ADDRFORM:
2188         case IPV6_2292PKTINFO:
2189         case IPV6_RECVTCLASS:
2190         case IPV6_RECVRTHDR:
2191         case IPV6_2292RTHDR:
2192         case IPV6_RECVHOPOPTS:
2193         case IPV6_2292HOPOPTS:
2194         case IPV6_RECVDSTOPTS:
2195         case IPV6_2292DSTOPTS:
2196         case IPV6_TCLASS:
2197         case IPV6_ADDR_PREFERENCES:
2198 #ifdef IPV6_RECVPATHMTU
2199         case IPV6_RECVPATHMTU:
2200 #endif
2201 #ifdef IPV6_TRANSPARENT
2202         case IPV6_TRANSPARENT:
2203 #endif
2204 #ifdef IPV6_FREEBIND
2205         case IPV6_FREEBIND:
2206 #endif
2207 #ifdef IPV6_RECVORIGDSTADDR
2208         case IPV6_RECVORIGDSTADDR:
2209 #endif
2210             val = 0;
2211             if (optlen < sizeof(uint32_t)) {
2212                 return -TARGET_EINVAL;
2213             }
2214             if (get_user_u32(val, optval_addr)) {
2215                 return -TARGET_EFAULT;
2216             }
2217             ret = get_errno(setsockopt(sockfd, level, optname,
2218                                        &val, sizeof(val)));
2219             break;
2220         case IPV6_PKTINFO:
2221         {
2222             struct in6_pktinfo pki;
2223 
2224             if (optlen < sizeof(pki)) {
2225                 return -TARGET_EINVAL;
2226             }
2227 
2228             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2229                 return -TARGET_EFAULT;
2230             }
2231 
2232             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2233 
2234             ret = get_errno(setsockopt(sockfd, level, optname,
2235                                        &pki, sizeof(pki)));
2236             break;
2237         }
2238         case IPV6_ADD_MEMBERSHIP:
2239         case IPV6_DROP_MEMBERSHIP:
2240         {
2241             struct ipv6_mreq ipv6mreq;
2242 
2243             if (optlen < sizeof(ipv6mreq)) {
2244                 return -TARGET_EINVAL;
2245             }
2246 
2247             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2248                 return -TARGET_EFAULT;
2249             }
2250 
2251             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2252 
2253             ret = get_errno(setsockopt(sockfd, level, optname,
2254                                        &ipv6mreq, sizeof(ipv6mreq)));
2255             break;
2256         }
2257         default:
2258             goto unimplemented;
2259         }
2260         break;
2261     case SOL_ICMPV6:
2262         switch (optname) {
2263         case ICMPV6_FILTER:
2264         {
2265             struct icmp6_filter icmp6f;
2266 
2267             if (optlen > sizeof(icmp6f)) {
2268                 optlen = sizeof(icmp6f);
2269             }
2270 
2271             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2272                 return -TARGET_EFAULT;
2273             }
2274 
2275             for (val = 0; val < 8; val++) {
2276                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2277             }
2278 
2279             ret = get_errno(setsockopt(sockfd, level, optname,
2280                                        &icmp6f, optlen));
2281             break;
2282         }
2283         default:
2284             goto unimplemented;
2285         }
2286         break;
2287     case SOL_RAW:
2288         switch (optname) {
2289         case ICMP_FILTER:
2290         case IPV6_CHECKSUM:
2291             /* those take an u32 value */
2292             if (optlen < sizeof(uint32_t)) {
2293                 return -TARGET_EINVAL;
2294             }
2295 
2296             if (get_user_u32(val, optval_addr)) {
2297                 return -TARGET_EFAULT;
2298             }
2299             ret = get_errno(setsockopt(sockfd, level, optname,
2300                                        &val, sizeof(val)));
2301             break;
2302 
2303         default:
2304             goto unimplemented;
2305         }
2306         break;
2307 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2308     case SOL_ALG:
2309         switch (optname) {
2310         case ALG_SET_KEY:
2311         {
2312             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2313             if (!alg_key) {
2314                 return -TARGET_EFAULT;
2315             }
2316             ret = get_errno(setsockopt(sockfd, level, optname,
2317                                        alg_key, optlen));
2318             unlock_user(alg_key, optval_addr, optlen);
2319             break;
2320         }
2321         case ALG_SET_AEAD_AUTHSIZE:
2322         {
2323             ret = get_errno(setsockopt(sockfd, level, optname,
2324                                        NULL, optlen));
2325             break;
2326         }
2327         default:
2328             goto unimplemented;
2329         }
2330         break;
2331 #endif
2332     case TARGET_SOL_SOCKET:
2333         switch (optname) {
2334         case TARGET_SO_RCVTIMEO:
2335         case TARGET_SO_SNDTIMEO:
2336         {
2337                 struct timeval tv;
2338 
2339                 if (optlen != sizeof(struct target_timeval)) {
2340                     return -TARGET_EINVAL;
2341                 }
2342 
2343                 if (copy_from_user_timeval(&tv, optval_addr)) {
2344                     return -TARGET_EFAULT;
2345                 }
2346 
2347                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2348                                 optname == TARGET_SO_RCVTIMEO ?
2349                                     SO_RCVTIMEO : SO_SNDTIMEO,
2350                                 &tv, sizeof(tv)));
2351                 return ret;
2352         }
2353         case TARGET_SO_ATTACH_FILTER:
2354         {
2355                 struct target_sock_fprog *tfprog;
2356                 struct target_sock_filter *tfilter;
2357                 struct sock_fprog fprog;
2358                 struct sock_filter *filter;
2359                 int i;
2360 
2361                 if (optlen != sizeof(*tfprog)) {
2362                     return -TARGET_EINVAL;
2363                 }
2364                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2365                     return -TARGET_EFAULT;
2366                 }
2367                 if (!lock_user_struct(VERIFY_READ, tfilter,
2368                                       tswapal(tfprog->filter), 0)) {
2369                     unlock_user_struct(tfprog, optval_addr, 1);
2370                     return -TARGET_EFAULT;
2371                 }
2372 
2373                 fprog.len = tswap16(tfprog->len);
2374                 filter = g_try_new(struct sock_filter, fprog.len);
2375                 if (filter == NULL) {
2376                     unlock_user_struct(tfilter, tfprog->filter, 1);
2377                     unlock_user_struct(tfprog, optval_addr, 1);
2378                     return -TARGET_ENOMEM;
2379                 }
2380                 for (i = 0; i < fprog.len; i++) {
2381                     filter[i].code = tswap16(tfilter[i].code);
2382                     filter[i].jt = tfilter[i].jt;
2383                     filter[i].jf = tfilter[i].jf;
2384                     filter[i].k = tswap32(tfilter[i].k);
2385                 }
2386                 fprog.filter = filter;
2387 
2388                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2389                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2390                 g_free(filter);
2391 
2392                 unlock_user_struct(tfilter, tfprog->filter, 1);
2393                 unlock_user_struct(tfprog, optval_addr, 1);
2394                 return ret;
2395         }
2396 	case TARGET_SO_BINDTODEVICE:
2397 	{
2398 		char *dev_ifname, *addr_ifname;
2399 
2400 		if (optlen > IFNAMSIZ - 1) {
2401 		    optlen = IFNAMSIZ - 1;
2402 		}
2403 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2404 		if (!dev_ifname) {
2405 		    return -TARGET_EFAULT;
2406 		}
2407 		optname = SO_BINDTODEVICE;
2408 		addr_ifname = alloca(IFNAMSIZ);
2409 		memcpy(addr_ifname, dev_ifname, optlen);
2410 		addr_ifname[optlen] = 0;
2411 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2412                                            addr_ifname, optlen));
2413 		unlock_user (dev_ifname, optval_addr, 0);
2414 		return ret;
2415 	}
2416         case TARGET_SO_LINGER:
2417         {
2418                 struct linger lg;
2419                 struct target_linger *tlg;
2420 
2421                 if (optlen != sizeof(struct target_linger)) {
2422                     return -TARGET_EINVAL;
2423                 }
2424                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2425                     return -TARGET_EFAULT;
2426                 }
2427                 __get_user(lg.l_onoff, &tlg->l_onoff);
2428                 __get_user(lg.l_linger, &tlg->l_linger);
2429                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2430                                 &lg, sizeof(lg)));
2431                 unlock_user_struct(tlg, optval_addr, 0);
2432                 return ret;
2433         }
2434             /* Options with 'int' argument.  */
2435         case TARGET_SO_DEBUG:
2436 		optname = SO_DEBUG;
2437 		break;
2438         case TARGET_SO_REUSEADDR:
2439 		optname = SO_REUSEADDR;
2440 		break;
2441 #ifdef SO_REUSEPORT
2442         case TARGET_SO_REUSEPORT:
2443                 optname = SO_REUSEPORT;
2444                 break;
2445 #endif
2446         case TARGET_SO_TYPE:
2447 		optname = SO_TYPE;
2448 		break;
2449         case TARGET_SO_ERROR:
2450 		optname = SO_ERROR;
2451 		break;
2452         case TARGET_SO_DONTROUTE:
2453 		optname = SO_DONTROUTE;
2454 		break;
2455         case TARGET_SO_BROADCAST:
2456 		optname = SO_BROADCAST;
2457 		break;
2458         case TARGET_SO_SNDBUF:
2459 		optname = SO_SNDBUF;
2460 		break;
2461         case TARGET_SO_SNDBUFFORCE:
2462                 optname = SO_SNDBUFFORCE;
2463                 break;
2464         case TARGET_SO_RCVBUF:
2465 		optname = SO_RCVBUF;
2466 		break;
2467         case TARGET_SO_RCVBUFFORCE:
2468                 optname = SO_RCVBUFFORCE;
2469                 break;
2470         case TARGET_SO_KEEPALIVE:
2471 		optname = SO_KEEPALIVE;
2472 		break;
2473         case TARGET_SO_OOBINLINE:
2474 		optname = SO_OOBINLINE;
2475 		break;
2476         case TARGET_SO_NO_CHECK:
2477 		optname = SO_NO_CHECK;
2478 		break;
2479         case TARGET_SO_PRIORITY:
2480 		optname = SO_PRIORITY;
2481 		break;
2482 #ifdef SO_BSDCOMPAT
2483         case TARGET_SO_BSDCOMPAT:
2484 		optname = SO_BSDCOMPAT;
2485 		break;
2486 #endif
2487         case TARGET_SO_PASSCRED:
2488 		optname = SO_PASSCRED;
2489 		break;
2490         case TARGET_SO_PASSSEC:
2491                 optname = SO_PASSSEC;
2492                 break;
2493         case TARGET_SO_TIMESTAMP:
2494 		optname = SO_TIMESTAMP;
2495 		break;
2496         case TARGET_SO_RCVLOWAT:
2497 		optname = SO_RCVLOWAT;
2498 		break;
2499         default:
2500             goto unimplemented;
2501         }
2502 	if (optlen < sizeof(uint32_t))
2503             return -TARGET_EINVAL;
2504 
2505 	if (get_user_u32(val, optval_addr))
2506             return -TARGET_EFAULT;
2507 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2508         break;
2509 #ifdef SOL_NETLINK
2510     case SOL_NETLINK:
2511         switch (optname) {
2512         case NETLINK_PKTINFO:
2513         case NETLINK_ADD_MEMBERSHIP:
2514         case NETLINK_DROP_MEMBERSHIP:
2515         case NETLINK_BROADCAST_ERROR:
2516         case NETLINK_NO_ENOBUFS:
2517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2518         case NETLINK_LISTEN_ALL_NSID:
2519         case NETLINK_CAP_ACK:
2520 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2521 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2522         case NETLINK_EXT_ACK:
2523 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2525         case NETLINK_GET_STRICT_CHK:
2526 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2527             break;
2528         default:
2529             goto unimplemented;
2530         }
2531         val = 0;
2532         if (optlen < sizeof(uint32_t)) {
2533             return -TARGET_EINVAL;
2534         }
2535         if (get_user_u32(val, optval_addr)) {
2536             return -TARGET_EFAULT;
2537         }
2538         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2539                                    sizeof(val)));
2540         break;
2541 #endif /* SOL_NETLINK */
2542     default:
2543     unimplemented:
2544         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2545                       level, optname);
2546         ret = -TARGET_ENOPROTOOPT;
2547     }
2548     return ret;
2549 }
2550 
2551 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2552 static abi_long do_getsockopt(int sockfd, int level, int optname,
2553                               abi_ulong optval_addr, abi_ulong optlen)
2554 {
2555     abi_long ret;
2556     int len, val;
2557     socklen_t lv;
2558 
2559     switch(level) {
2560     case TARGET_SOL_SOCKET:
2561         level = SOL_SOCKET;
2562         switch (optname) {
2563         /* These don't just return a single integer */
2564         case TARGET_SO_PEERNAME:
2565             goto unimplemented;
2566         case TARGET_SO_RCVTIMEO: {
2567             struct timeval tv;
2568             socklen_t tvlen;
2569 
2570             optname = SO_RCVTIMEO;
2571 
2572 get_timeout:
2573             if (get_user_u32(len, optlen)) {
2574                 return -TARGET_EFAULT;
2575             }
2576             if (len < 0) {
2577                 return -TARGET_EINVAL;
2578             }
2579 
2580             tvlen = sizeof(tv);
2581             ret = get_errno(getsockopt(sockfd, level, optname,
2582                                        &tv, &tvlen));
2583             if (ret < 0) {
2584                 return ret;
2585             }
2586             if (len > sizeof(struct target_timeval)) {
2587                 len = sizeof(struct target_timeval);
2588             }
2589             if (copy_to_user_timeval(optval_addr, &tv)) {
2590                 return -TARGET_EFAULT;
2591             }
2592             if (put_user_u32(len, optlen)) {
2593                 return -TARGET_EFAULT;
2594             }
2595             break;
2596         }
2597         case TARGET_SO_SNDTIMEO:
2598             optname = SO_SNDTIMEO;
2599             goto get_timeout;
2600         case TARGET_SO_PEERCRED: {
2601             struct ucred cr;
2602             socklen_t crlen;
2603             struct target_ucred *tcr;
2604 
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611 
2612             crlen = sizeof(cr);
2613             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2614                                        &cr, &crlen));
2615             if (ret < 0) {
2616                 return ret;
2617             }
2618             if (len > crlen) {
2619                 len = crlen;
2620             }
2621             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2622                 return -TARGET_EFAULT;
2623             }
2624             __put_user(cr.pid, &tcr->pid);
2625             __put_user(cr.uid, &tcr->uid);
2626             __put_user(cr.gid, &tcr->gid);
2627             unlock_user_struct(tcr, optval_addr, 1);
2628             if (put_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             break;
2632         }
2633         case TARGET_SO_PEERSEC: {
2634             char *name;
2635 
2636             if (get_user_u32(len, optlen)) {
2637                 return -TARGET_EFAULT;
2638             }
2639             if (len < 0) {
2640                 return -TARGET_EINVAL;
2641             }
2642             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2643             if (!name) {
2644                 return -TARGET_EFAULT;
2645             }
2646             lv = len;
2647             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2648                                        name, &lv));
2649             if (put_user_u32(lv, optlen)) {
2650                 ret = -TARGET_EFAULT;
2651             }
2652             unlock_user(name, optval_addr, lv);
2653             break;
2654         }
2655         case TARGET_SO_LINGER:
2656         {
2657             struct linger lg;
2658             socklen_t lglen;
2659             struct target_linger *tlg;
2660 
2661             if (get_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             if (len < 0) {
2665                 return -TARGET_EINVAL;
2666             }
2667 
2668             lglen = sizeof(lg);
2669             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2670                                        &lg, &lglen));
2671             if (ret < 0) {
2672                 return ret;
2673             }
2674             if (len > lglen) {
2675                 len = lglen;
2676             }
2677             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2678                 return -TARGET_EFAULT;
2679             }
2680             __put_user(lg.l_onoff, &tlg->l_onoff);
2681             __put_user(lg.l_linger, &tlg->l_linger);
2682             unlock_user_struct(tlg, optval_addr, 1);
2683             if (put_user_u32(len, optlen)) {
2684                 return -TARGET_EFAULT;
2685             }
2686             break;
2687         }
2688         /* Options with 'int' argument.  */
2689         case TARGET_SO_DEBUG:
2690             optname = SO_DEBUG;
2691             goto int_case;
2692         case TARGET_SO_REUSEADDR:
2693             optname = SO_REUSEADDR;
2694             goto int_case;
2695 #ifdef SO_REUSEPORT
2696         case TARGET_SO_REUSEPORT:
2697             optname = SO_REUSEPORT;
2698             goto int_case;
2699 #endif
2700         case TARGET_SO_TYPE:
2701             optname = SO_TYPE;
2702             goto int_case;
2703         case TARGET_SO_ERROR:
2704             optname = SO_ERROR;
2705             goto int_case;
2706         case TARGET_SO_DONTROUTE:
2707             optname = SO_DONTROUTE;
2708             goto int_case;
2709         case TARGET_SO_BROADCAST:
2710             optname = SO_BROADCAST;
2711             goto int_case;
2712         case TARGET_SO_SNDBUF:
2713             optname = SO_SNDBUF;
2714             goto int_case;
2715         case TARGET_SO_RCVBUF:
2716             optname = SO_RCVBUF;
2717             goto int_case;
2718         case TARGET_SO_KEEPALIVE:
2719             optname = SO_KEEPALIVE;
2720             goto int_case;
2721         case TARGET_SO_OOBINLINE:
2722             optname = SO_OOBINLINE;
2723             goto int_case;
2724         case TARGET_SO_NO_CHECK:
2725             optname = SO_NO_CHECK;
2726             goto int_case;
2727         case TARGET_SO_PRIORITY:
2728             optname = SO_PRIORITY;
2729             goto int_case;
2730 #ifdef SO_BSDCOMPAT
2731         case TARGET_SO_BSDCOMPAT:
2732             optname = SO_BSDCOMPAT;
2733             goto int_case;
2734 #endif
2735         case TARGET_SO_PASSCRED:
2736             optname = SO_PASSCRED;
2737             goto int_case;
2738         case TARGET_SO_TIMESTAMP:
2739             optname = SO_TIMESTAMP;
2740             goto int_case;
2741         case TARGET_SO_RCVLOWAT:
2742             optname = SO_RCVLOWAT;
2743             goto int_case;
2744         case TARGET_SO_ACCEPTCONN:
2745             optname = SO_ACCEPTCONN;
2746             goto int_case;
2747         case TARGET_SO_PROTOCOL:
2748             optname = SO_PROTOCOL;
2749             goto int_case;
2750         case TARGET_SO_DOMAIN:
2751             optname = SO_DOMAIN;
2752             goto int_case;
2753         default:
2754             goto int_case;
2755         }
2756         break;
2757     case SOL_TCP:
2758     case SOL_UDP:
2759         /* TCP and UDP options all take an 'int' value.  */
2760     int_case:
2761         if (get_user_u32(len, optlen))
2762             return -TARGET_EFAULT;
2763         if (len < 0)
2764             return -TARGET_EINVAL;
2765         lv = sizeof(lv);
2766         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2767         if (ret < 0)
2768             return ret;
2769         switch (optname) {
2770         case SO_TYPE:
2771             val = host_to_target_sock_type(val);
2772             break;
2773         case SO_ERROR:
2774             val = host_to_target_errno(val);
2775             break;
2776         }
2777         if (len > lv)
2778             len = lv;
2779         if (len == 4) {
2780             if (put_user_u32(val, optval_addr))
2781                 return -TARGET_EFAULT;
2782         } else {
2783             if (put_user_u8(val, optval_addr))
2784                 return -TARGET_EFAULT;
2785         }
2786         if (put_user_u32(len, optlen))
2787             return -TARGET_EFAULT;
2788         break;
2789     case SOL_IP:
2790         switch(optname) {
2791         case IP_TOS:
2792         case IP_TTL:
2793         case IP_HDRINCL:
2794         case IP_ROUTER_ALERT:
2795         case IP_RECVOPTS:
2796         case IP_RETOPTS:
2797         case IP_PKTINFO:
2798         case IP_MTU_DISCOVER:
2799         case IP_RECVERR:
2800         case IP_RECVTOS:
2801 #ifdef IP_FREEBIND
2802         case IP_FREEBIND:
2803 #endif
2804         case IP_MULTICAST_TTL:
2805         case IP_MULTICAST_LOOP:
2806             if (get_user_u32(len, optlen))
2807                 return -TARGET_EFAULT;
2808             if (len < 0)
2809                 return -TARGET_EINVAL;
2810             lv = sizeof(lv);
2811             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2812             if (ret < 0)
2813                 return ret;
2814             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2815                 len = 1;
2816                 if (put_user_u32(len, optlen)
2817                     || put_user_u8(val, optval_addr))
2818                     return -TARGET_EFAULT;
2819             } else {
2820                 if (len > sizeof(int))
2821                     len = sizeof(int);
2822                 if (put_user_u32(len, optlen)
2823                     || put_user_u32(val, optval_addr))
2824                     return -TARGET_EFAULT;
2825             }
2826             break;
2827         default:
2828             ret = -TARGET_ENOPROTOOPT;
2829             break;
2830         }
2831         break;
2832     case SOL_IPV6:
2833         switch (optname) {
2834         case IPV6_MTU_DISCOVER:
2835         case IPV6_MTU:
2836         case IPV6_V6ONLY:
2837         case IPV6_RECVPKTINFO:
2838         case IPV6_UNICAST_HOPS:
2839         case IPV6_MULTICAST_HOPS:
2840         case IPV6_MULTICAST_LOOP:
2841         case IPV6_RECVERR:
2842         case IPV6_RECVHOPLIMIT:
2843         case IPV6_2292HOPLIMIT:
2844         case IPV6_CHECKSUM:
2845         case IPV6_ADDRFORM:
2846         case IPV6_2292PKTINFO:
2847         case IPV6_RECVTCLASS:
2848         case IPV6_RECVRTHDR:
2849         case IPV6_2292RTHDR:
2850         case IPV6_RECVHOPOPTS:
2851         case IPV6_2292HOPOPTS:
2852         case IPV6_RECVDSTOPTS:
2853         case IPV6_2292DSTOPTS:
2854         case IPV6_TCLASS:
2855         case IPV6_ADDR_PREFERENCES:
2856 #ifdef IPV6_RECVPATHMTU
2857         case IPV6_RECVPATHMTU:
2858 #endif
2859 #ifdef IPV6_TRANSPARENT
2860         case IPV6_TRANSPARENT:
2861 #endif
2862 #ifdef IPV6_FREEBIND
2863         case IPV6_FREEBIND:
2864 #endif
2865 #ifdef IPV6_RECVORIGDSTADDR
2866         case IPV6_RECVORIGDSTADDR:
2867 #endif
2868             if (get_user_u32(len, optlen))
2869                 return -TARGET_EFAULT;
2870             if (len < 0)
2871                 return -TARGET_EINVAL;
2872             lv = sizeof(lv);
2873             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2874             if (ret < 0)
2875                 return ret;
2876             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2877                 len = 1;
2878                 if (put_user_u32(len, optlen)
2879                     || put_user_u8(val, optval_addr))
2880                     return -TARGET_EFAULT;
2881             } else {
2882                 if (len > sizeof(int))
2883                     len = sizeof(int);
2884                 if (put_user_u32(len, optlen)
2885                     || put_user_u32(val, optval_addr))
2886                     return -TARGET_EFAULT;
2887             }
2888             break;
2889         default:
2890             ret = -TARGET_ENOPROTOOPT;
2891             break;
2892         }
2893         break;
2894 #ifdef SOL_NETLINK
2895     case SOL_NETLINK:
2896         switch (optname) {
2897         case NETLINK_PKTINFO:
2898         case NETLINK_BROADCAST_ERROR:
2899         case NETLINK_NO_ENOBUFS:
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2901         case NETLINK_LISTEN_ALL_NSID:
2902         case NETLINK_CAP_ACK:
2903 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2904 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2905         case NETLINK_EXT_ACK:
2906 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2908         case NETLINK_GET_STRICT_CHK:
2909 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2910             if (get_user_u32(len, optlen)) {
2911                 return -TARGET_EFAULT;
2912             }
2913             if (len != sizeof(val)) {
2914                 return -TARGET_EINVAL;
2915             }
2916             lv = len;
2917             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2918             if (ret < 0) {
2919                 return ret;
2920             }
2921             if (put_user_u32(lv, optlen)
2922                 || put_user_u32(val, optval_addr)) {
2923                 return -TARGET_EFAULT;
2924             }
2925             break;
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2927         case NETLINK_LIST_MEMBERSHIPS:
2928         {
2929             uint32_t *results;
2930             int i;
2931             if (get_user_u32(len, optlen)) {
2932                 return -TARGET_EFAULT;
2933             }
2934             if (len < 0) {
2935                 return -TARGET_EINVAL;
2936             }
2937             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2938             if (!results && len > 0) {
2939                 return -TARGET_EFAULT;
2940             }
2941             lv = len;
2942             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2943             if (ret < 0) {
2944                 unlock_user(results, optval_addr, 0);
2945                 return ret;
2946             }
2947             /* swap host endianness to target endianness. */
2948             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2949                 results[i] = tswap32(results[i]);
2950             }
2951             if (put_user_u32(lv, optlen)) {
2952                 return -TARGET_EFAULT;
2953             }
2954             unlock_user(results, optval_addr, 0);
2955             break;
2956         }
2957 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2958         default:
2959             goto unimplemented;
2960         }
2961         break;
2962 #endif /* SOL_NETLINK */
2963     default:
2964     unimplemented:
2965         qemu_log_mask(LOG_UNIMP,
2966                       "getsockopt level=%d optname=%d not yet supported\n",
2967                       level, optname);
2968         ret = -TARGET_EOPNOTSUPP;
2969         break;
2970     }
2971     return ret;
2972 }
2973 
2974 /* Convert target low/high pair representing file offset into the host
2975  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2976  * as the kernel doesn't handle them either.
2977  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)2978 static void target_to_host_low_high(abi_ulong tlow,
2979                                     abi_ulong thigh,
2980                                     unsigned long *hlow,
2981                                     unsigned long *hhigh)
2982 {
2983     uint64_t off = tlow |
2984         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2985         TARGET_LONG_BITS / 2;
2986 
2987     *hlow = off;
2988     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2989 }
2990 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)2991 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2992                                 abi_ulong count, int copy)
2993 {
2994     struct target_iovec *target_vec;
2995     struct iovec *vec;
2996     abi_ulong total_len, max_len;
2997     int i;
2998     int err = 0;
2999     bool bad_address = false;
3000 
3001     if (count == 0) {
3002         errno = 0;
3003         return NULL;
3004     }
3005     if (count > IOV_MAX) {
3006         errno = EINVAL;
3007         return NULL;
3008     }
3009 
3010     vec = g_try_new0(struct iovec, count);
3011     if (vec == NULL) {
3012         errno = ENOMEM;
3013         return NULL;
3014     }
3015 
3016     target_vec = lock_user(VERIFY_READ, target_addr,
3017                            count * sizeof(struct target_iovec), 1);
3018     if (target_vec == NULL) {
3019         err = EFAULT;
3020         goto fail2;
3021     }
3022 
3023     /* ??? If host page size > target page size, this will result in a
3024        value larger than what we can actually support.  */
3025     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3026     total_len = 0;
3027 
3028     for (i = 0; i < count; i++) {
3029         abi_ulong base = tswapal(target_vec[i].iov_base);
3030         abi_long len = tswapal(target_vec[i].iov_len);
3031 
3032         if (len < 0) {
3033             err = EINVAL;
3034             goto fail;
3035         } else if (len == 0) {
3036             /* Zero length pointer is ignored.  */
3037             vec[i].iov_base = 0;
3038         } else {
3039             vec[i].iov_base = lock_user(type, base, len, copy);
3040             /* If the first buffer pointer is bad, this is a fault.  But
3041              * subsequent bad buffers will result in a partial write; this
3042              * is realized by filling the vector with null pointers and
3043              * zero lengths. */
3044             if (!vec[i].iov_base) {
3045                 if (i == 0) {
3046                     err = EFAULT;
3047                     goto fail;
3048                 } else {
3049                     bad_address = true;
3050                 }
3051             }
3052             if (bad_address) {
3053                 len = 0;
3054             }
3055             if (len > max_len - total_len) {
3056                 len = max_len - total_len;
3057             }
3058         }
3059         vec[i].iov_len = len;
3060         total_len += len;
3061     }
3062 
3063     unlock_user(target_vec, target_addr, 0);
3064     return vec;
3065 
3066  fail:
3067     while (--i >= 0) {
3068         if (tswapal(target_vec[i].iov_len) > 0) {
3069             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3070         }
3071     }
3072     unlock_user(target_vec, target_addr, 0);
3073  fail2:
3074     g_free(vec);
3075     errno = err;
3076     return NULL;
3077 }
3078 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3079 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3080                          abi_ulong count, int copy)
3081 {
3082     struct target_iovec *target_vec;
3083     int i;
3084 
3085     target_vec = lock_user(VERIFY_READ, target_addr,
3086                            count * sizeof(struct target_iovec), 1);
3087     if (target_vec) {
3088         for (i = 0; i < count; i++) {
3089             abi_ulong base = tswapal(target_vec[i].iov_base);
3090             abi_long len = tswapal(target_vec[i].iov_len);
3091             if (len < 0) {
3092                 break;
3093             }
3094             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3095         }
3096         unlock_user(target_vec, target_addr, 0);
3097     }
3098 
3099     g_free(vec);
3100 }
3101 
target_to_host_sock_type(int * type)3102 static inline int target_to_host_sock_type(int *type)
3103 {
3104     int host_type = 0;
3105     int target_type = *type;
3106 
3107     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3108     case TARGET_SOCK_DGRAM:
3109         host_type = SOCK_DGRAM;
3110         break;
3111     case TARGET_SOCK_STREAM:
3112         host_type = SOCK_STREAM;
3113         break;
3114     default:
3115         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3116         break;
3117     }
3118     if (target_type & TARGET_SOCK_CLOEXEC) {
3119 #if defined(SOCK_CLOEXEC)
3120         host_type |= SOCK_CLOEXEC;
3121 #else
3122         return -TARGET_EINVAL;
3123 #endif
3124     }
3125     if (target_type & TARGET_SOCK_NONBLOCK) {
3126 #if defined(SOCK_NONBLOCK)
3127         host_type |= SOCK_NONBLOCK;
3128 #elif !defined(O_NONBLOCK)
3129         return -TARGET_EINVAL;
3130 #endif
3131     }
3132     *type = host_type;
3133     return 0;
3134 }
3135 
3136 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3137 static int sock_flags_fixup(int fd, int target_type)
3138 {
3139 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3140     if (target_type & TARGET_SOCK_NONBLOCK) {
3141         int flags = fcntl(fd, F_GETFL);
3142         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3143             close(fd);
3144             return -TARGET_EINVAL;
3145         }
3146     }
3147 #endif
3148     return fd;
3149 }
3150 
3151 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3152 static abi_long do_socket(int domain, int type, int protocol)
3153 {
3154     int target_type = type;
3155     int ret;
3156 
3157     ret = target_to_host_sock_type(&type);
3158     if (ret) {
3159         return ret;
3160     }
3161 
3162     if (domain == PF_NETLINK && !(
3163 #ifdef CONFIG_RTNETLINK
3164          protocol == NETLINK_ROUTE ||
3165 #endif
3166          protocol == NETLINK_KOBJECT_UEVENT ||
3167          protocol == NETLINK_AUDIT)) {
3168         return -TARGET_EPROTONOSUPPORT;
3169     }
3170 
3171     if (domain == AF_PACKET ||
3172         (domain == AF_INET && type == SOCK_PACKET)) {
3173         protocol = tswap16(protocol);
3174     }
3175 
3176     ret = get_errno(socket(domain, type, protocol));
3177     if (ret >= 0) {
3178         ret = sock_flags_fixup(ret, target_type);
3179         if (type == SOCK_PACKET) {
3180             /* Manage an obsolete case :
3181              * if socket type is SOCK_PACKET, bind by name
3182              */
3183             fd_trans_register(ret, &target_packet_trans);
3184         } else if (domain == PF_NETLINK) {
3185             switch (protocol) {
3186 #ifdef CONFIG_RTNETLINK
3187             case NETLINK_ROUTE:
3188                 fd_trans_register(ret, &target_netlink_route_trans);
3189                 break;
3190 #endif
3191             case NETLINK_KOBJECT_UEVENT:
3192                 /* nothing to do: messages are strings */
3193                 break;
3194             case NETLINK_AUDIT:
3195                 fd_trans_register(ret, &target_netlink_audit_trans);
3196                 break;
3197             default:
3198                 g_assert_not_reached();
3199             }
3200         }
3201     }
3202     return ret;
3203 }
3204 
3205 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3206 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3207                         socklen_t addrlen)
3208 {
3209     void *addr;
3210     abi_long ret;
3211 
3212     if ((int)addrlen < 0) {
3213         return -TARGET_EINVAL;
3214     }
3215 
3216     addr = alloca(addrlen+1);
3217 
3218     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3219     if (ret)
3220         return ret;
3221 
3222     return get_errno(bind(sockfd, addr, addrlen));
3223 }
3224 
3225 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3226 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3227                            socklen_t addrlen)
3228 {
3229     void *addr;
3230     abi_long ret;
3231 
3232     if ((int)addrlen < 0) {
3233         return -TARGET_EINVAL;
3234     }
3235 
3236     addr = alloca(addrlen+1);
3237 
3238     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3239     if (ret)
3240         return ret;
3241 
3242     return get_errno(safe_connect(sockfd, addr, addrlen));
3243 }
3244 
3245 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3246 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3247                                       int flags, int send)
3248 {
3249     abi_long ret, len;
3250     struct msghdr msg;
3251     abi_ulong count;
3252     struct iovec *vec;
3253     abi_ulong target_vec;
3254 
3255     if (msgp->msg_name) {
3256         msg.msg_namelen = tswap32(msgp->msg_namelen);
3257         msg.msg_name = alloca(msg.msg_namelen+1);
3258         ret = target_to_host_sockaddr(fd, msg.msg_name,
3259                                       tswapal(msgp->msg_name),
3260                                       msg.msg_namelen);
3261         if (ret == -TARGET_EFAULT) {
3262             /* For connected sockets msg_name and msg_namelen must
3263              * be ignored, so returning EFAULT immediately is wrong.
3264              * Instead, pass a bad msg_name to the host kernel, and
3265              * let it decide whether to return EFAULT or not.
3266              */
3267             msg.msg_name = (void *)-1;
3268         } else if (ret) {
3269             goto out2;
3270         }
3271     } else {
3272         msg.msg_name = NULL;
3273         msg.msg_namelen = 0;
3274     }
3275     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3276     msg.msg_control = alloca(msg.msg_controllen);
3277     memset(msg.msg_control, 0, msg.msg_controllen);
3278 
3279     msg.msg_flags = tswap32(msgp->msg_flags);
3280 
3281     count = tswapal(msgp->msg_iovlen);
3282     target_vec = tswapal(msgp->msg_iov);
3283 
3284     if (count > IOV_MAX) {
3285         /* sendrcvmsg returns a different errno for this condition than
3286          * readv/writev, so we must catch it here before lock_iovec() does.
3287          */
3288         ret = -TARGET_EMSGSIZE;
3289         goto out2;
3290     }
3291 
3292     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3293                      target_vec, count, send);
3294     if (vec == NULL) {
3295         ret = -host_to_target_errno(errno);
3296         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3297         if (!send || ret) {
3298             goto out2;
3299         }
3300     }
3301     msg.msg_iovlen = count;
3302     msg.msg_iov = vec;
3303 
3304     if (send) {
3305         if (fd_trans_target_to_host_data(fd)) {
3306             void *host_msg;
3307 
3308             host_msg = g_malloc(msg.msg_iov->iov_len);
3309             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3310             ret = fd_trans_target_to_host_data(fd)(host_msg,
3311                                                    msg.msg_iov->iov_len);
3312             if (ret >= 0) {
3313                 msg.msg_iov->iov_base = host_msg;
3314                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3315             }
3316             g_free(host_msg);
3317         } else {
3318             ret = target_to_host_cmsg(&msg, msgp);
3319             if (ret == 0) {
3320                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3321             }
3322         }
3323     } else {
3324         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3325         if (!is_error(ret)) {
3326             len = ret;
3327             if (fd_trans_host_to_target_data(fd)) {
3328                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3329                                                MIN(msg.msg_iov->iov_len, len));
3330             }
3331             if (!is_error(ret)) {
3332                 ret = host_to_target_cmsg(msgp, &msg);
3333             }
3334             if (!is_error(ret)) {
3335                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3336                 msgp->msg_flags = tswap32(msg.msg_flags);
3337                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3338                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3339                                     msg.msg_name, msg.msg_namelen);
3340                     if (ret) {
3341                         goto out;
3342                     }
3343                 }
3344 
3345                 ret = len;
3346             }
3347         }
3348     }
3349 
3350 out:
3351     if (vec) {
3352         unlock_iovec(vec, target_vec, count, !send);
3353     }
3354 out2:
3355     return ret;
3356 }
3357 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3358 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3359                                int flags, int send)
3360 {
3361     abi_long ret;
3362     struct target_msghdr *msgp;
3363 
3364     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3365                           msgp,
3366                           target_msg,
3367                           send ? 1 : 0)) {
3368         return -TARGET_EFAULT;
3369     }
3370     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3371     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3372     return ret;
3373 }
3374 
3375 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3376  * so it might not have this *mmsg-specific flag either.
3377  */
3378 #ifndef MSG_WAITFORONE
3379 #define MSG_WAITFORONE 0x10000
3380 #endif
3381 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3382 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3383                                 unsigned int vlen, unsigned int flags,
3384                                 int send)
3385 {
3386     struct target_mmsghdr *mmsgp;
3387     abi_long ret = 0;
3388     int i;
3389 
3390     if (vlen > UIO_MAXIOV) {
3391         vlen = UIO_MAXIOV;
3392     }
3393 
3394     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3395     if (!mmsgp) {
3396         return -TARGET_EFAULT;
3397     }
3398 
3399     for (i = 0; i < vlen; i++) {
3400         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3401         if (is_error(ret)) {
3402             break;
3403         }
3404         mmsgp[i].msg_len = tswap32(ret);
3405         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3406         if (flags & MSG_WAITFORONE) {
3407             flags |= MSG_DONTWAIT;
3408         }
3409     }
3410 
3411     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3412 
3413     /* Return number of datagrams sent if we sent any at all;
3414      * otherwise return the error.
3415      */
3416     if (i) {
3417         return i;
3418     }
3419     return ret;
3420 }
3421 
3422 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3423 static abi_long do_accept4(int fd, abi_ulong target_addr,
3424                            abi_ulong target_addrlen_addr, int flags)
3425 {
3426     socklen_t addrlen, ret_addrlen;
3427     void *addr;
3428     abi_long ret;
3429     int host_flags;
3430 
3431     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3432         return -TARGET_EINVAL;
3433     }
3434 
3435     host_flags = 0;
3436     if (flags & TARGET_SOCK_NONBLOCK) {
3437         host_flags |= SOCK_NONBLOCK;
3438     }
3439     if (flags & TARGET_SOCK_CLOEXEC) {
3440         host_flags |= SOCK_CLOEXEC;
3441     }
3442 
3443     if (target_addr == 0) {
3444         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3445     }
3446 
3447     /* linux returns EFAULT if addrlen pointer is invalid */
3448     if (get_user_u32(addrlen, target_addrlen_addr))
3449         return -TARGET_EFAULT;
3450 
3451     if ((int)addrlen < 0) {
3452         return -TARGET_EINVAL;
3453     }
3454 
3455     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3456         return -TARGET_EFAULT;
3457     }
3458 
3459     addr = alloca(addrlen);
3460 
3461     ret_addrlen = addrlen;
3462     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3463     if (!is_error(ret)) {
3464         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3465         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3466             ret = -TARGET_EFAULT;
3467         }
3468     }
3469     return ret;
3470 }
3471 
3472 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3473 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3474                                abi_ulong target_addrlen_addr)
3475 {
3476     socklen_t addrlen, ret_addrlen;
3477     void *addr;
3478     abi_long ret;
3479 
3480     if (get_user_u32(addrlen, target_addrlen_addr))
3481         return -TARGET_EFAULT;
3482 
3483     if ((int)addrlen < 0) {
3484         return -TARGET_EINVAL;
3485     }
3486 
3487     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3488         return -TARGET_EFAULT;
3489     }
3490 
3491     addr = alloca(addrlen);
3492 
3493     ret_addrlen = addrlen;
3494     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3495     if (!is_error(ret)) {
3496         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3497         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3498             ret = -TARGET_EFAULT;
3499         }
3500     }
3501     return ret;
3502 }
3503 
3504 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3505 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3506                                abi_ulong target_addrlen_addr)
3507 {
3508     socklen_t addrlen, ret_addrlen;
3509     void *addr;
3510     abi_long ret;
3511 
3512     if (get_user_u32(addrlen, target_addrlen_addr))
3513         return -TARGET_EFAULT;
3514 
3515     if ((int)addrlen < 0) {
3516         return -TARGET_EINVAL;
3517     }
3518 
3519     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3520         return -TARGET_EFAULT;
3521     }
3522 
3523     addr = alloca(addrlen);
3524 
3525     ret_addrlen = addrlen;
3526     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3527     if (!is_error(ret)) {
3528         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3529         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3530             ret = -TARGET_EFAULT;
3531         }
3532     }
3533     return ret;
3534 }
3535 
3536 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3537 static abi_long do_socketpair(int domain, int type, int protocol,
3538                               abi_ulong target_tab_addr)
3539 {
3540     int tab[2];
3541     abi_long ret;
3542 
3543     target_to_host_sock_type(&type);
3544 
3545     ret = get_errno(socketpair(domain, type, protocol, tab));
3546     if (!is_error(ret)) {
3547         if (put_user_s32(tab[0], target_tab_addr)
3548             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3549             ret = -TARGET_EFAULT;
3550     }
3551     return ret;
3552 }
3553 
3554 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3555 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3556                           abi_ulong target_addr, socklen_t addrlen)
3557 {
3558     void *addr;
3559     void *host_msg;
3560     void *copy_msg = NULL;
3561     abi_long ret;
3562 
3563     if ((int)addrlen < 0) {
3564         return -TARGET_EINVAL;
3565     }
3566 
3567     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3568     if (!host_msg)
3569         return -TARGET_EFAULT;
3570     if (fd_trans_target_to_host_data(fd)) {
3571         copy_msg = host_msg;
3572         host_msg = g_malloc(len);
3573         memcpy(host_msg, copy_msg, len);
3574         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3575         if (ret < 0) {
3576             goto fail;
3577         }
3578     }
3579     if (target_addr) {
3580         addr = alloca(addrlen+1);
3581         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3582         if (ret) {
3583             goto fail;
3584         }
3585         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3586     } else {
3587         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3588     }
3589 fail:
3590     if (copy_msg) {
3591         g_free(host_msg);
3592         host_msg = copy_msg;
3593     }
3594     unlock_user(host_msg, msg, 0);
3595     return ret;
3596 }
3597 
3598 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3599 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3600                             abi_ulong target_addr,
3601                             abi_ulong target_addrlen)
3602 {
3603     socklen_t addrlen, ret_addrlen;
3604     void *addr;
3605     void *host_msg;
3606     abi_long ret;
3607 
3608     if (!msg) {
3609         host_msg = NULL;
3610     } else {
3611         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3612         if (!host_msg) {
3613             return -TARGET_EFAULT;
3614         }
3615     }
3616     if (target_addr) {
3617         if (get_user_u32(addrlen, target_addrlen)) {
3618             ret = -TARGET_EFAULT;
3619             goto fail;
3620         }
3621         if ((int)addrlen < 0) {
3622             ret = -TARGET_EINVAL;
3623             goto fail;
3624         }
3625         addr = alloca(addrlen);
3626         ret_addrlen = addrlen;
3627         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3628                                       addr, &ret_addrlen));
3629     } else {
3630         addr = NULL; /* To keep compiler quiet.  */
3631         addrlen = 0; /* To keep compiler quiet.  */
3632         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3633     }
3634     if (!is_error(ret)) {
3635         if (fd_trans_host_to_target_data(fd)) {
3636             abi_long trans;
3637             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3638             if (is_error(trans)) {
3639                 ret = trans;
3640                 goto fail;
3641             }
3642         }
3643         if (target_addr) {
3644             host_to_target_sockaddr(target_addr, addr,
3645                                     MIN(addrlen, ret_addrlen));
3646             if (put_user_u32(ret_addrlen, target_addrlen)) {
3647                 ret = -TARGET_EFAULT;
3648                 goto fail;
3649             }
3650         }
3651         unlock_user(host_msg, msg, len);
3652     } else {
3653 fail:
3654         unlock_user(host_msg, msg, 0);
3655     }
3656     return ret;
3657 }
3658 
3659 #ifdef TARGET_NR_socketcall
3660 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3661 static abi_long do_socketcall(int num, abi_ulong vptr)
3662 {
3663     static const unsigned nargs[] = { /* number of arguments per operation */
3664         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3665         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3666         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3667         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3668         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3669         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3670         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3671         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3672         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3673         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3674         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3675         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3676         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3677         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3678         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3679         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3680         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3681         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3682         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3683         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3684     };
3685     abi_long a[6]; /* max 6 args */
3686     unsigned i;
3687 
3688     /* check the range of the first argument num */
3689     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3690     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3691         return -TARGET_EINVAL;
3692     }
3693     /* ensure we have space for args */
3694     if (nargs[num] > ARRAY_SIZE(a)) {
3695         return -TARGET_EINVAL;
3696     }
3697     /* collect the arguments in a[] according to nargs[] */
3698     for (i = 0; i < nargs[num]; ++i) {
3699         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3700             return -TARGET_EFAULT;
3701         }
3702     }
3703     /* now when we have the args, invoke the appropriate underlying function */
3704     switch (num) {
3705     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3706         return do_socket(a[0], a[1], a[2]);
3707     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3708         return do_bind(a[0], a[1], a[2]);
3709     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3710         return do_connect(a[0], a[1], a[2]);
3711     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3712         return get_errno(listen(a[0], a[1]));
3713     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3714         return do_accept4(a[0], a[1], a[2], 0);
3715     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3716         return do_getsockname(a[0], a[1], a[2]);
3717     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3718         return do_getpeername(a[0], a[1], a[2]);
3719     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3720         return do_socketpair(a[0], a[1], a[2], a[3]);
3721     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3722         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3723     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3724         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3725     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3726         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3727     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3728         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3729     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3730         return get_errno(shutdown(a[0], a[1]));
3731     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3732         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3733     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3734         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3735     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3736         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3737     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3738         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3739     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3740         return do_accept4(a[0], a[1], a[2], a[3]);
3741     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3742         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3743     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3744         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3745     default:
3746         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3747         return -TARGET_EINVAL;
3748     }
3749 }
3750 #endif
3751 
3752 #ifndef TARGET_SEMID64_DS
3753 /* asm-generic version of this struct */
3754 struct target_semid64_ds
3755 {
3756   struct target_ipc_perm sem_perm;
3757   abi_ulong sem_otime;
3758 #if TARGET_ABI_BITS == 32
3759   abi_ulong __unused1;
3760 #endif
3761   abi_ulong sem_ctime;
3762 #if TARGET_ABI_BITS == 32
3763   abi_ulong __unused2;
3764 #endif
3765   abi_ulong sem_nsems;
3766   abi_ulong __unused3;
3767   abi_ulong __unused4;
3768 };
3769 #endif
3770 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3771 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3772                                                abi_ulong target_addr)
3773 {
3774     struct target_ipc_perm *target_ip;
3775     struct target_semid64_ds *target_sd;
3776 
3777     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3778         return -TARGET_EFAULT;
3779     target_ip = &(target_sd->sem_perm);
3780     host_ip->__key = tswap32(target_ip->__key);
3781     host_ip->uid = tswap32(target_ip->uid);
3782     host_ip->gid = tswap32(target_ip->gid);
3783     host_ip->cuid = tswap32(target_ip->cuid);
3784     host_ip->cgid = tswap32(target_ip->cgid);
3785 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3786     host_ip->mode = tswap32(target_ip->mode);
3787 #else
3788     host_ip->mode = tswap16(target_ip->mode);
3789 #endif
3790 #if defined(TARGET_PPC)
3791     host_ip->__seq = tswap32(target_ip->__seq);
3792 #else
3793     host_ip->__seq = tswap16(target_ip->__seq);
3794 #endif
3795     unlock_user_struct(target_sd, target_addr, 0);
3796     return 0;
3797 }
3798 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3799 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3800                                                struct ipc_perm *host_ip)
3801 {
3802     struct target_ipc_perm *target_ip;
3803     struct target_semid64_ds *target_sd;
3804 
3805     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3806         return -TARGET_EFAULT;
3807     target_ip = &(target_sd->sem_perm);
3808     target_ip->__key = tswap32(host_ip->__key);
3809     target_ip->uid = tswap32(host_ip->uid);
3810     target_ip->gid = tswap32(host_ip->gid);
3811     target_ip->cuid = tswap32(host_ip->cuid);
3812     target_ip->cgid = tswap32(host_ip->cgid);
3813 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3814     target_ip->mode = tswap32(host_ip->mode);
3815 #else
3816     target_ip->mode = tswap16(host_ip->mode);
3817 #endif
3818 #if defined(TARGET_PPC)
3819     target_ip->__seq = tswap32(host_ip->__seq);
3820 #else
3821     target_ip->__seq = tswap16(host_ip->__seq);
3822 #endif
3823     unlock_user_struct(target_sd, target_addr, 1);
3824     return 0;
3825 }
3826 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3827 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3828                                                abi_ulong target_addr)
3829 {
3830     struct target_semid64_ds *target_sd;
3831 
3832     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3833         return -TARGET_EFAULT;
3834     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3835         return -TARGET_EFAULT;
3836     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3837     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3838     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3839     unlock_user_struct(target_sd, target_addr, 0);
3840     return 0;
3841 }
3842 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3843 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3844                                                struct semid_ds *host_sd)
3845 {
3846     struct target_semid64_ds *target_sd;
3847 
3848     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3849         return -TARGET_EFAULT;
3850     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3851         return -TARGET_EFAULT;
3852     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3853     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3854     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3855     unlock_user_struct(target_sd, target_addr, 1);
3856     return 0;
3857 }
3858 
3859 struct target_seminfo {
3860     int semmap;
3861     int semmni;
3862     int semmns;
3863     int semmnu;
3864     int semmsl;
3865     int semopm;
3866     int semume;
3867     int semusz;
3868     int semvmx;
3869     int semaem;
3870 };
3871 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3872 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3873                                               struct seminfo *host_seminfo)
3874 {
3875     struct target_seminfo *target_seminfo;
3876     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3877         return -TARGET_EFAULT;
3878     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3879     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3880     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3881     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3882     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3883     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3884     __put_user(host_seminfo->semume, &target_seminfo->semume);
3885     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3886     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3887     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3888     unlock_user_struct(target_seminfo, target_addr, 1);
3889     return 0;
3890 }
3891 
3892 union semun {
3893 	int val;
3894 	struct semid_ds *buf;
3895 	unsigned short *array;
3896 	struct seminfo *__buf;
3897 };
3898 
3899 union target_semun {
3900 	int val;
3901 	abi_ulong buf;
3902 	abi_ulong array;
3903 	abi_ulong __buf;
3904 };
3905 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3906 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3907                                                abi_ulong target_addr)
3908 {
3909     int nsems;
3910     unsigned short *array;
3911     union semun semun;
3912     struct semid_ds semid_ds;
3913     int i, ret;
3914 
3915     semun.buf = &semid_ds;
3916 
3917     ret = semctl(semid, 0, IPC_STAT, semun);
3918     if (ret == -1)
3919         return get_errno(ret);
3920 
3921     nsems = semid_ds.sem_nsems;
3922 
3923     *host_array = g_try_new(unsigned short, nsems);
3924     if (!*host_array) {
3925         return -TARGET_ENOMEM;
3926     }
3927     array = lock_user(VERIFY_READ, target_addr,
3928                       nsems*sizeof(unsigned short), 1);
3929     if (!array) {
3930         g_free(*host_array);
3931         return -TARGET_EFAULT;
3932     }
3933 
3934     for(i=0; i<nsems; i++) {
3935         __get_user((*host_array)[i], &array[i]);
3936     }
3937     unlock_user(array, target_addr, 0);
3938 
3939     return 0;
3940 }
3941 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3942 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3943                                                unsigned short **host_array)
3944 {
3945     int nsems;
3946     unsigned short *array;
3947     union semun semun;
3948     struct semid_ds semid_ds;
3949     int i, ret;
3950 
3951     semun.buf = &semid_ds;
3952 
3953     ret = semctl(semid, 0, IPC_STAT, semun);
3954     if (ret == -1)
3955         return get_errno(ret);
3956 
3957     nsems = semid_ds.sem_nsems;
3958 
3959     array = lock_user(VERIFY_WRITE, target_addr,
3960                       nsems*sizeof(unsigned short), 0);
3961     if (!array)
3962         return -TARGET_EFAULT;
3963 
3964     for(i=0; i<nsems; i++) {
3965         __put_user((*host_array)[i], &array[i]);
3966     }
3967     g_free(*host_array);
3968     unlock_user(array, target_addr, 1);
3969 
3970     return 0;
3971 }
3972 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3973 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3974                                  abi_ulong target_arg)
3975 {
3976     union target_semun target_su = { .buf = target_arg };
3977     union semun arg;
3978     struct semid_ds dsarg;
3979     unsigned short *array = NULL;
3980     struct seminfo seminfo;
3981     abi_long ret = -TARGET_EINVAL;
3982     abi_long err;
3983     cmd &= 0xff;
3984 
3985     switch( cmd ) {
3986 	case GETVAL:
3987 	case SETVAL:
3988             /* In 64 bit cross-endian situations, we will erroneously pick up
3989              * the wrong half of the union for the "val" element.  To rectify
3990              * this, the entire 8-byte structure is byteswapped, followed by
3991 	     * a swap of the 4 byte val field. In other cases, the data is
3992 	     * already in proper host byte order. */
3993 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3994 		target_su.buf = tswapal(target_su.buf);
3995 		arg.val = tswap32(target_su.val);
3996 	    } else {
3997 		arg.val = target_su.val;
3998 	    }
3999             ret = get_errno(semctl(semid, semnum, cmd, arg));
4000             break;
4001 	case GETALL:
4002 	case SETALL:
4003             err = target_to_host_semarray(semid, &array, target_su.array);
4004             if (err)
4005                 return err;
4006             arg.array = array;
4007             ret = get_errno(semctl(semid, semnum, cmd, arg));
4008             err = host_to_target_semarray(semid, target_su.array, &array);
4009             if (err)
4010                 return err;
4011             break;
4012 	case IPC_STAT:
4013 	case IPC_SET:
4014 	case SEM_STAT:
4015             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4016             if (err)
4017                 return err;
4018             arg.buf = &dsarg;
4019             ret = get_errno(semctl(semid, semnum, cmd, arg));
4020             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4021             if (err)
4022                 return err;
4023             break;
4024 	case IPC_INFO:
4025 	case SEM_INFO:
4026             arg.__buf = &seminfo;
4027             ret = get_errno(semctl(semid, semnum, cmd, arg));
4028             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4029             if (err)
4030                 return err;
4031             break;
4032 	case IPC_RMID:
4033 	case GETPID:
4034 	case GETNCNT:
4035 	case GETZCNT:
4036             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4037             break;
4038     }
4039 
4040     return ret;
4041 }
4042 
4043 struct target_sembuf {
4044     unsigned short sem_num;
4045     short sem_op;
4046     short sem_flg;
4047 };
4048 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4049 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4050                                              abi_ulong target_addr,
4051                                              unsigned nsops)
4052 {
4053     struct target_sembuf *target_sembuf;
4054     int i;
4055 
4056     target_sembuf = lock_user(VERIFY_READ, target_addr,
4057                               nsops*sizeof(struct target_sembuf), 1);
4058     if (!target_sembuf)
4059         return -TARGET_EFAULT;
4060 
4061     for(i=0; i<nsops; i++) {
4062         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4063         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4064         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4065     }
4066 
4067     unlock_user(target_sembuf, target_addr, 0);
4068 
4069     return 0;
4070 }
4071 
4072 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4073     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4074 
4075 /*
4076  * This macro is required to handle the s390 variants, which passes the
4077  * arguments in a different order than default.
4078  */
4079 #ifdef __s390x__
4080 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4081   (__nsops), (__timeout), (__sops)
4082 #else
4083 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4084   (__nsops), 0, (__sops), (__timeout)
4085 #endif
4086 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4087 static inline abi_long do_semtimedop(int semid,
4088                                      abi_long ptr,
4089                                      unsigned nsops,
4090                                      abi_long timeout, bool time64)
4091 {
4092     struct sembuf *sops;
4093     struct timespec ts, *pts = NULL;
4094     abi_long ret;
4095 
4096     if (timeout) {
4097         pts = &ts;
4098         if (time64) {
4099             if (target_to_host_timespec64(pts, timeout)) {
4100                 return -TARGET_EFAULT;
4101             }
4102         } else {
4103             if (target_to_host_timespec(pts, timeout)) {
4104                 return -TARGET_EFAULT;
4105             }
4106         }
4107     }
4108 
4109     if (nsops > TARGET_SEMOPM) {
4110         return -TARGET_E2BIG;
4111     }
4112 
4113     sops = g_new(struct sembuf, nsops);
4114 
4115     if (target_to_host_sembuf(sops, ptr, nsops)) {
4116         g_free(sops);
4117         return -TARGET_EFAULT;
4118     }
4119 
4120     ret = -TARGET_ENOSYS;
4121 #ifdef __NR_semtimedop
4122     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4123 #endif
4124 #ifdef __NR_ipc
4125     if (ret == -TARGET_ENOSYS) {
4126         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4127                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4128     }
4129 #endif
4130     g_free(sops);
4131     return ret;
4132 }
4133 #endif
4134 
4135 struct target_msqid_ds
4136 {
4137     struct target_ipc_perm msg_perm;
4138     abi_ulong msg_stime;
4139 #if TARGET_ABI_BITS == 32
4140     abi_ulong __unused1;
4141 #endif
4142     abi_ulong msg_rtime;
4143 #if TARGET_ABI_BITS == 32
4144     abi_ulong __unused2;
4145 #endif
4146     abi_ulong msg_ctime;
4147 #if TARGET_ABI_BITS == 32
4148     abi_ulong __unused3;
4149 #endif
4150     abi_ulong __msg_cbytes;
4151     abi_ulong msg_qnum;
4152     abi_ulong msg_qbytes;
4153     abi_ulong msg_lspid;
4154     abi_ulong msg_lrpid;
4155     abi_ulong __unused4;
4156     abi_ulong __unused5;
4157 };
4158 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4159 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4160                                                abi_ulong target_addr)
4161 {
4162     struct target_msqid_ds *target_md;
4163 
4164     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4165         return -TARGET_EFAULT;
4166     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4167         return -TARGET_EFAULT;
4168     host_md->msg_stime = tswapal(target_md->msg_stime);
4169     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4170     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4171     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4172     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4173     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4174     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4175     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4176     unlock_user_struct(target_md, target_addr, 0);
4177     return 0;
4178 }
4179 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4180 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4181                                                struct msqid_ds *host_md)
4182 {
4183     struct target_msqid_ds *target_md;
4184 
4185     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4186         return -TARGET_EFAULT;
4187     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4188         return -TARGET_EFAULT;
4189     target_md->msg_stime = tswapal(host_md->msg_stime);
4190     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4191     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4192     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4193     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4194     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4195     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4196     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4197     unlock_user_struct(target_md, target_addr, 1);
4198     return 0;
4199 }
4200 
4201 struct target_msginfo {
4202     int msgpool;
4203     int msgmap;
4204     int msgmax;
4205     int msgmnb;
4206     int msgmni;
4207     int msgssz;
4208     int msgtql;
4209     unsigned short int msgseg;
4210 };
4211 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4212 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4213                                               struct msginfo *host_msginfo)
4214 {
4215     struct target_msginfo *target_msginfo;
4216     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4217         return -TARGET_EFAULT;
4218     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4219     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4220     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4221     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4222     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4223     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4224     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4225     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4226     unlock_user_struct(target_msginfo, target_addr, 1);
4227     return 0;
4228 }
4229 
do_msgctl(int msgid,int cmd,abi_long ptr)4230 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4231 {
4232     struct msqid_ds dsarg;
4233     struct msginfo msginfo;
4234     abi_long ret = -TARGET_EINVAL;
4235 
4236     cmd &= 0xff;
4237 
4238     switch (cmd) {
4239     case IPC_STAT:
4240     case IPC_SET:
4241     case MSG_STAT:
4242         if (target_to_host_msqid_ds(&dsarg,ptr))
4243             return -TARGET_EFAULT;
4244         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4245         if (host_to_target_msqid_ds(ptr,&dsarg))
4246             return -TARGET_EFAULT;
4247         break;
4248     case IPC_RMID:
4249         ret = get_errno(msgctl(msgid, cmd, NULL));
4250         break;
4251     case IPC_INFO:
4252     case MSG_INFO:
4253         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4254         if (host_to_target_msginfo(ptr, &msginfo))
4255             return -TARGET_EFAULT;
4256         break;
4257     }
4258 
4259     return ret;
4260 }
4261 
4262 struct target_msgbuf {
4263     abi_long mtype;
4264     char	mtext[1];
4265 };
4266 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4267 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4268                                  ssize_t msgsz, int msgflg)
4269 {
4270     struct target_msgbuf *target_mb;
4271     struct msgbuf *host_mb;
4272     abi_long ret = 0;
4273 
4274     if (msgsz < 0) {
4275         return -TARGET_EINVAL;
4276     }
4277 
4278     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4279         return -TARGET_EFAULT;
4280     host_mb = g_try_malloc(msgsz + sizeof(long));
4281     if (!host_mb) {
4282         unlock_user_struct(target_mb, msgp, 0);
4283         return -TARGET_ENOMEM;
4284     }
4285     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4286     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4287     ret = -TARGET_ENOSYS;
4288 #ifdef __NR_msgsnd
4289     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4290 #endif
4291 #ifdef __NR_ipc
4292     if (ret == -TARGET_ENOSYS) {
4293 #ifdef __s390x__
4294         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4295                                  host_mb));
4296 #else
4297         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4298                                  host_mb, 0));
4299 #endif
4300     }
4301 #endif
4302     g_free(host_mb);
4303     unlock_user_struct(target_mb, msgp, 0);
4304 
4305     return ret;
4306 }
4307 
4308 #ifdef __NR_ipc
4309 #if defined(__sparc__)
4310 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4311 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4312 #elif defined(__s390x__)
4313 /* The s390 sys_ipc variant has only five parameters.  */
4314 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4315     ((long int[]){(long int)__msgp, __msgtyp})
4316 #else
4317 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4318     ((long int[]){(long int)__msgp, __msgtyp}), 0
4319 #endif
4320 #endif
4321 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4322 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4323                                  ssize_t msgsz, abi_long msgtyp,
4324                                  int msgflg)
4325 {
4326     struct target_msgbuf *target_mb;
4327     char *target_mtext;
4328     struct msgbuf *host_mb;
4329     abi_long ret = 0;
4330 
4331     if (msgsz < 0) {
4332         return -TARGET_EINVAL;
4333     }
4334 
4335     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4336         return -TARGET_EFAULT;
4337 
4338     host_mb = g_try_malloc(msgsz + sizeof(long));
4339     if (!host_mb) {
4340         ret = -TARGET_ENOMEM;
4341         goto end;
4342     }
4343     ret = -TARGET_ENOSYS;
4344 #ifdef __NR_msgrcv
4345     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4346 #endif
4347 #ifdef __NR_ipc
4348     if (ret == -TARGET_ENOSYS) {
4349         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4350                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4351     }
4352 #endif
4353 
4354     if (ret > 0) {
4355         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4356         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4357         if (!target_mtext) {
4358             ret = -TARGET_EFAULT;
4359             goto end;
4360         }
4361         memcpy(target_mb->mtext, host_mb->mtext, ret);
4362         unlock_user(target_mtext, target_mtext_addr, ret);
4363     }
4364 
4365     target_mb->mtype = tswapal(host_mb->mtype);
4366 
4367 end:
4368     if (target_mb)
4369         unlock_user_struct(target_mb, msgp, 1);
4370     g_free(host_mb);
4371     return ret;
4372 }
4373 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4374 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4375                                                abi_ulong target_addr)
4376 {
4377     struct target_shmid_ds *target_sd;
4378 
4379     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4380         return -TARGET_EFAULT;
4381     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4382         return -TARGET_EFAULT;
4383     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4384     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4385     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4386     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4387     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4388     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4389     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4390     unlock_user_struct(target_sd, target_addr, 0);
4391     return 0;
4392 }
4393 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4394 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4395                                                struct shmid_ds *host_sd)
4396 {
4397     struct target_shmid_ds *target_sd;
4398 
4399     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4400         return -TARGET_EFAULT;
4401     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4402         return -TARGET_EFAULT;
4403     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4404     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4405     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4406     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4407     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4408     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4409     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4410     unlock_user_struct(target_sd, target_addr, 1);
4411     return 0;
4412 }
4413 
4414 struct  target_shminfo {
4415     abi_ulong shmmax;
4416     abi_ulong shmmin;
4417     abi_ulong shmmni;
4418     abi_ulong shmseg;
4419     abi_ulong shmall;
4420 };
4421 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4422 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4423                                               struct shminfo *host_shminfo)
4424 {
4425     struct target_shminfo *target_shminfo;
4426     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4427         return -TARGET_EFAULT;
4428     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4429     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4430     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4431     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4432     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4433     unlock_user_struct(target_shminfo, target_addr, 1);
4434     return 0;
4435 }
4436 
4437 struct target_shm_info {
4438     int used_ids;
4439     abi_ulong shm_tot;
4440     abi_ulong shm_rss;
4441     abi_ulong shm_swp;
4442     abi_ulong swap_attempts;
4443     abi_ulong swap_successes;
4444 };
4445 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4446 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4447                                                struct shm_info *host_shm_info)
4448 {
4449     struct target_shm_info *target_shm_info;
4450     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4451         return -TARGET_EFAULT;
4452     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4453     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4454     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4455     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4456     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4457     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4458     unlock_user_struct(target_shm_info, target_addr, 1);
4459     return 0;
4460 }
4461 
do_shmctl(int shmid,int cmd,abi_long buf)4462 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4463 {
4464     struct shmid_ds dsarg;
4465     struct shminfo shminfo;
4466     struct shm_info shm_info;
4467     abi_long ret = -TARGET_EINVAL;
4468 
4469     cmd &= 0xff;
4470 
4471     switch(cmd) {
4472     case IPC_STAT:
4473     case IPC_SET:
4474     case SHM_STAT:
4475         if (target_to_host_shmid_ds(&dsarg, buf))
4476             return -TARGET_EFAULT;
4477         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4478         if (host_to_target_shmid_ds(buf, &dsarg))
4479             return -TARGET_EFAULT;
4480         break;
4481     case IPC_INFO:
4482         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4483         if (host_to_target_shminfo(buf, &shminfo))
4484             return -TARGET_EFAULT;
4485         break;
4486     case SHM_INFO:
4487         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4488         if (host_to_target_shm_info(buf, &shm_info))
4489             return -TARGET_EFAULT;
4490         break;
4491     case IPC_RMID:
4492     case SHM_LOCK:
4493     case SHM_UNLOCK:
4494         ret = get_errno(shmctl(shmid, cmd, NULL));
4495         break;
4496     }
4497 
4498     return ret;
4499 }
4500 
4501 #ifdef TARGET_NR_ipc
4502 /* ??? This only works with linear mappings.  */
4503 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4504 static abi_long do_ipc(CPUArchState *cpu_env,
4505                        unsigned int call, abi_long first,
4506                        abi_long second, abi_long third,
4507                        abi_long ptr, abi_long fifth)
4508 {
4509     int version;
4510     abi_long ret = 0;
4511 
4512     version = call >> 16;
4513     call &= 0xffff;
4514 
4515     switch (call) {
4516     case IPCOP_semop:
4517         ret = do_semtimedop(first, ptr, second, 0, false);
4518         break;
4519     case IPCOP_semtimedop:
4520     /*
4521      * The s390 sys_ipc variant has only five parameters instead of six
4522      * (as for default variant) and the only difference is the handling of
4523      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4524      * to a struct timespec where the generic variant uses fifth parameter.
4525      */
4526 #if defined(TARGET_S390X)
4527         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4528 #else
4529         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4530 #endif
4531         break;
4532 
4533     case IPCOP_semget:
4534         ret = get_errno(semget(first, second, third));
4535         break;
4536 
4537     case IPCOP_semctl: {
4538         /* The semun argument to semctl is passed by value, so dereference the
4539          * ptr argument. */
4540         abi_ulong atptr;
4541         get_user_ual(atptr, ptr);
4542         ret = do_semctl(first, second, third, atptr);
4543         break;
4544     }
4545 
4546     case IPCOP_msgget:
4547         ret = get_errno(msgget(first, second));
4548         break;
4549 
4550     case IPCOP_msgsnd:
4551         ret = do_msgsnd(first, ptr, second, third);
4552         break;
4553 
4554     case IPCOP_msgctl:
4555         ret = do_msgctl(first, second, ptr);
4556         break;
4557 
4558     case IPCOP_msgrcv:
4559         switch (version) {
4560         case 0:
4561             {
4562                 struct target_ipc_kludge {
4563                     abi_long msgp;
4564                     abi_long msgtyp;
4565                 } *tmp;
4566 
4567                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4568                     ret = -TARGET_EFAULT;
4569                     break;
4570                 }
4571 
4572                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4573 
4574                 unlock_user_struct(tmp, ptr, 0);
4575                 break;
4576             }
4577         default:
4578             ret = do_msgrcv(first, ptr, second, fifth, third);
4579         }
4580         break;
4581 
4582     case IPCOP_shmat:
4583         switch (version) {
4584         default:
4585         {
4586             abi_ulong raddr;
4587             raddr = target_shmat(cpu_env, first, ptr, second);
4588             if (is_error(raddr))
4589                 return get_errno(raddr);
4590             if (put_user_ual(raddr, third))
4591                 return -TARGET_EFAULT;
4592             break;
4593         }
4594         case 1:
4595             ret = -TARGET_EINVAL;
4596             break;
4597         }
4598 	break;
4599     case IPCOP_shmdt:
4600         ret = target_shmdt(ptr);
4601 	break;
4602 
4603     case IPCOP_shmget:
4604 	/* IPC_* flag values are the same on all linux platforms */
4605 	ret = get_errno(shmget(first, second, third));
4606 	break;
4607 
4608 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4609     case IPCOP_shmctl:
4610         ret = do_shmctl(first, second, ptr);
4611         break;
4612     default:
4613         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4614                       call, version);
4615 	ret = -TARGET_ENOSYS;
4616 	break;
4617     }
4618     return ret;
4619 }
4620 #endif
4621 
4622 /* kernel structure types definitions */
4623 
4624 #define STRUCT(name, ...) STRUCT_ ## name,
4625 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4626 enum {
4627 #include "syscall_types.h"
4628 STRUCT_MAX
4629 };
4630 #undef STRUCT
4631 #undef STRUCT_SPECIAL
4632 
4633 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4634 #define STRUCT_SPECIAL(name)
4635 #include "syscall_types.h"
4636 #undef STRUCT
4637 #undef STRUCT_SPECIAL
4638 
4639 #define MAX_STRUCT_SIZE 4096
4640 
4641 #ifdef CONFIG_FIEMAP
4642 /* So fiemap access checks don't overflow on 32 bit systems.
4643  * This is very slightly smaller than the limit imposed by
4644  * the underlying kernel.
4645  */
4646 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4647                             / sizeof(struct fiemap_extent))
4648 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4649 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4650                                        int fd, int cmd, abi_long arg)
4651 {
4652     /* The parameter for this ioctl is a struct fiemap followed
4653      * by an array of struct fiemap_extent whose size is set
4654      * in fiemap->fm_extent_count. The array is filled in by the
4655      * ioctl.
4656      */
4657     int target_size_in, target_size_out;
4658     struct fiemap *fm;
4659     const argtype *arg_type = ie->arg_type;
4660     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4661     void *argptr, *p;
4662     abi_long ret;
4663     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4664     uint32_t outbufsz;
4665     int free_fm = 0;
4666 
4667     assert(arg_type[0] == TYPE_PTR);
4668     assert(ie->access == IOC_RW);
4669     arg_type++;
4670     target_size_in = thunk_type_size(arg_type, 0);
4671     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4672     if (!argptr) {
4673         return -TARGET_EFAULT;
4674     }
4675     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4676     unlock_user(argptr, arg, 0);
4677     fm = (struct fiemap *)buf_temp;
4678     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4679         return -TARGET_EINVAL;
4680     }
4681 
4682     outbufsz = sizeof (*fm) +
4683         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4684 
4685     if (outbufsz > MAX_STRUCT_SIZE) {
4686         /* We can't fit all the extents into the fixed size buffer.
4687          * Allocate one that is large enough and use it instead.
4688          */
4689         fm = g_try_malloc(outbufsz);
4690         if (!fm) {
4691             return -TARGET_ENOMEM;
4692         }
4693         memcpy(fm, buf_temp, sizeof(struct fiemap));
4694         free_fm = 1;
4695     }
4696     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4697     if (!is_error(ret)) {
4698         target_size_out = target_size_in;
4699         /* An extent_count of 0 means we were only counting the extents
4700          * so there are no structs to copy
4701          */
4702         if (fm->fm_extent_count != 0) {
4703             target_size_out += fm->fm_mapped_extents * extent_size;
4704         }
4705         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4706         if (!argptr) {
4707             ret = -TARGET_EFAULT;
4708         } else {
4709             /* Convert the struct fiemap */
4710             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4711             if (fm->fm_extent_count != 0) {
4712                 p = argptr + target_size_in;
4713                 /* ...and then all the struct fiemap_extents */
4714                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4715                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4716                                   THUNK_TARGET);
4717                     p += extent_size;
4718                 }
4719             }
4720             unlock_user(argptr, arg, target_size_out);
4721         }
4722     }
4723     if (free_fm) {
4724         g_free(fm);
4725     }
4726     return ret;
4727 }
4728 #endif
4729 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4730 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4731                                 int fd, int cmd, abi_long arg)
4732 {
4733     const argtype *arg_type = ie->arg_type;
4734     int target_size;
4735     void *argptr;
4736     int ret;
4737     struct ifconf *host_ifconf;
4738     uint32_t outbufsz;
4739     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4740     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4741     int target_ifreq_size;
4742     int nb_ifreq;
4743     int free_buf = 0;
4744     int i;
4745     int target_ifc_len;
4746     abi_long target_ifc_buf;
4747     int host_ifc_len;
4748     char *host_ifc_buf;
4749 
4750     assert(arg_type[0] == TYPE_PTR);
4751     assert(ie->access == IOC_RW);
4752 
4753     arg_type++;
4754     target_size = thunk_type_size(arg_type, 0);
4755 
4756     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4757     if (!argptr)
4758         return -TARGET_EFAULT;
4759     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4760     unlock_user(argptr, arg, 0);
4761 
4762     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4763     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4764     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4765 
4766     if (target_ifc_buf != 0) {
4767         target_ifc_len = host_ifconf->ifc_len;
4768         nb_ifreq = target_ifc_len / target_ifreq_size;
4769         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4770 
4771         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4772         if (outbufsz > MAX_STRUCT_SIZE) {
4773             /*
4774              * We can't fit all the extents into the fixed size buffer.
4775              * Allocate one that is large enough and use it instead.
4776              */
4777             host_ifconf = g_try_malloc(outbufsz);
4778             if (!host_ifconf) {
4779                 return -TARGET_ENOMEM;
4780             }
4781             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4782             free_buf = 1;
4783         }
4784         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4785 
4786         host_ifconf->ifc_len = host_ifc_len;
4787     } else {
4788       host_ifc_buf = NULL;
4789     }
4790     host_ifconf->ifc_buf = host_ifc_buf;
4791 
4792     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4793     if (!is_error(ret)) {
4794 	/* convert host ifc_len to target ifc_len */
4795 
4796         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4797         target_ifc_len = nb_ifreq * target_ifreq_size;
4798         host_ifconf->ifc_len = target_ifc_len;
4799 
4800 	/* restore target ifc_buf */
4801 
4802         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4803 
4804 	/* copy struct ifconf to target user */
4805 
4806         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4807         if (!argptr)
4808             return -TARGET_EFAULT;
4809         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4810         unlock_user(argptr, arg, target_size);
4811 
4812         if (target_ifc_buf != 0) {
4813             /* copy ifreq[] to target user */
4814             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4815             for (i = 0; i < nb_ifreq ; i++) {
4816                 thunk_convert(argptr + i * target_ifreq_size,
4817                               host_ifc_buf + i * sizeof(struct ifreq),
4818                               ifreq_arg_type, THUNK_TARGET);
4819             }
4820             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4821         }
4822     }
4823 
4824     if (free_buf) {
4825         g_free(host_ifconf);
4826     }
4827 
4828     return ret;
4829 }
4830 
4831 #if defined(CONFIG_USBFS)
4832 #if HOST_LONG_BITS > 64
4833 #error USBDEVFS thunks do not support >64 bit hosts yet.
4834 #endif
4835 struct live_urb {
4836     uint64_t target_urb_adr;
4837     uint64_t target_buf_adr;
4838     char *target_buf_ptr;
4839     struct usbdevfs_urb host_urb;
4840 };
4841 
usbdevfs_urb_hashtable(void)4842 static GHashTable *usbdevfs_urb_hashtable(void)
4843 {
4844     static GHashTable *urb_hashtable;
4845 
4846     if (!urb_hashtable) {
4847         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4848     }
4849     return urb_hashtable;
4850 }
4851 
urb_hashtable_insert(struct live_urb * urb)4852 static void urb_hashtable_insert(struct live_urb *urb)
4853 {
4854     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4855     g_hash_table_insert(urb_hashtable, urb, urb);
4856 }
4857 
urb_hashtable_lookup(uint64_t target_urb_adr)4858 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4859 {
4860     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4861     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4862 }
4863 
urb_hashtable_remove(struct live_urb * urb)4864 static void urb_hashtable_remove(struct live_urb *urb)
4865 {
4866     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4867     g_hash_table_remove(urb_hashtable, urb);
4868 }
4869 
4870 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4871 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4872                           int fd, int cmd, abi_long arg)
4873 {
4874     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4875     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4876     struct live_urb *lurb;
4877     void *argptr;
4878     uint64_t hurb;
4879     int target_size;
4880     uintptr_t target_urb_adr;
4881     abi_long ret;
4882 
4883     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4884 
4885     memset(buf_temp, 0, sizeof(uint64_t));
4886     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4887     if (is_error(ret)) {
4888         return ret;
4889     }
4890 
4891     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4892     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4893     if (!lurb->target_urb_adr) {
4894         return -TARGET_EFAULT;
4895     }
4896     urb_hashtable_remove(lurb);
4897     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4898         lurb->host_urb.buffer_length);
4899     lurb->target_buf_ptr = NULL;
4900 
4901     /* restore the guest buffer pointer */
4902     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4903 
4904     /* update the guest urb struct */
4905     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4906     if (!argptr) {
4907         g_free(lurb);
4908         return -TARGET_EFAULT;
4909     }
4910     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4911     unlock_user(argptr, lurb->target_urb_adr, target_size);
4912 
4913     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4914     /* write back the urb handle */
4915     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4916     if (!argptr) {
4917         g_free(lurb);
4918         return -TARGET_EFAULT;
4919     }
4920 
4921     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4922     target_urb_adr = lurb->target_urb_adr;
4923     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4924     unlock_user(argptr, arg, target_size);
4925 
4926     g_free(lurb);
4927     return ret;
4928 }
4929 
4930 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4931 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4932                              uint8_t *buf_temp __attribute__((unused)),
4933                              int fd, int cmd, abi_long arg)
4934 {
4935     struct live_urb *lurb;
4936 
4937     /* map target address back to host URB with metadata. */
4938     lurb = urb_hashtable_lookup(arg);
4939     if (!lurb) {
4940         return -TARGET_EFAULT;
4941     }
4942     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4943 }
4944 
4945 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4946 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4947                             int fd, int cmd, abi_long arg)
4948 {
4949     const argtype *arg_type = ie->arg_type;
4950     int target_size;
4951     abi_long ret;
4952     void *argptr;
4953     int rw_dir;
4954     struct live_urb *lurb;
4955 
4956     /*
4957      * each submitted URB needs to map to a unique ID for the
4958      * kernel, and that unique ID needs to be a pointer to
4959      * host memory.  hence, we need to malloc for each URB.
4960      * isochronous transfers have a variable length struct.
4961      */
4962     arg_type++;
4963     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4964 
4965     /* construct host copy of urb and metadata */
4966     lurb = g_try_new0(struct live_urb, 1);
4967     if (!lurb) {
4968         return -TARGET_ENOMEM;
4969     }
4970 
4971     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4972     if (!argptr) {
4973         g_free(lurb);
4974         return -TARGET_EFAULT;
4975     }
4976     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4977     unlock_user(argptr, arg, 0);
4978 
4979     lurb->target_urb_adr = arg;
4980     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4981 
4982     /* buffer space used depends on endpoint type so lock the entire buffer */
4983     /* control type urbs should check the buffer contents for true direction */
4984     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4985     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4986         lurb->host_urb.buffer_length, 1);
4987     if (lurb->target_buf_ptr == NULL) {
4988         g_free(lurb);
4989         return -TARGET_EFAULT;
4990     }
4991 
4992     /* update buffer pointer in host copy */
4993     lurb->host_urb.buffer = lurb->target_buf_ptr;
4994 
4995     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4996     if (is_error(ret)) {
4997         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4998         g_free(lurb);
4999     } else {
5000         urb_hashtable_insert(lurb);
5001     }
5002 
5003     return ret;
5004 }
5005 #endif /* CONFIG_USBFS */
5006 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5007 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5008                             int cmd, abi_long arg)
5009 {
5010     void *argptr;
5011     struct dm_ioctl *host_dm;
5012     abi_long guest_data;
5013     uint32_t guest_data_size;
5014     int target_size;
5015     const argtype *arg_type = ie->arg_type;
5016     abi_long ret;
5017     void *big_buf = NULL;
5018     char *host_data;
5019 
5020     arg_type++;
5021     target_size = thunk_type_size(arg_type, 0);
5022     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5023     if (!argptr) {
5024         ret = -TARGET_EFAULT;
5025         goto out;
5026     }
5027     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5028     unlock_user(argptr, arg, 0);
5029 
5030     /* buf_temp is too small, so fetch things into a bigger buffer */
5031     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5032     memcpy(big_buf, buf_temp, target_size);
5033     buf_temp = big_buf;
5034     host_dm = big_buf;
5035 
5036     guest_data = arg + host_dm->data_start;
5037     if ((guest_data - arg) < 0) {
5038         ret = -TARGET_EINVAL;
5039         goto out;
5040     }
5041     guest_data_size = host_dm->data_size - host_dm->data_start;
5042     host_data = (char*)host_dm + host_dm->data_start;
5043 
5044     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5045     if (!argptr) {
5046         ret = -TARGET_EFAULT;
5047         goto out;
5048     }
5049 
5050     switch (ie->host_cmd) {
5051     case DM_REMOVE_ALL:
5052     case DM_LIST_DEVICES:
5053     case DM_DEV_CREATE:
5054     case DM_DEV_REMOVE:
5055     case DM_DEV_SUSPEND:
5056     case DM_DEV_STATUS:
5057     case DM_DEV_WAIT:
5058     case DM_TABLE_STATUS:
5059     case DM_TABLE_CLEAR:
5060     case DM_TABLE_DEPS:
5061     case DM_LIST_VERSIONS:
5062         /* no input data */
5063         break;
5064     case DM_DEV_RENAME:
5065     case DM_DEV_SET_GEOMETRY:
5066         /* data contains only strings */
5067         memcpy(host_data, argptr, guest_data_size);
5068         break;
5069     case DM_TARGET_MSG:
5070         memcpy(host_data, argptr, guest_data_size);
5071         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5072         break;
5073     case DM_TABLE_LOAD:
5074     {
5075         void *gspec = argptr;
5076         void *cur_data = host_data;
5077         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5078         int spec_size = thunk_type_size(dm_arg_type, 0);
5079         int i;
5080 
5081         for (i = 0; i < host_dm->target_count; i++) {
5082             struct dm_target_spec *spec = cur_data;
5083             uint32_t next;
5084             int slen;
5085 
5086             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5087             slen = strlen((char*)gspec + spec_size) + 1;
5088             next = spec->next;
5089             spec->next = sizeof(*spec) + slen;
5090             strcpy((char*)&spec[1], gspec + spec_size);
5091             gspec += next;
5092             cur_data += spec->next;
5093         }
5094         break;
5095     }
5096     default:
5097         ret = -TARGET_EINVAL;
5098         unlock_user(argptr, guest_data, 0);
5099         goto out;
5100     }
5101     unlock_user(argptr, guest_data, 0);
5102 
5103     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5104     if (!is_error(ret)) {
5105         guest_data = arg + host_dm->data_start;
5106         guest_data_size = host_dm->data_size - host_dm->data_start;
5107         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5108         switch (ie->host_cmd) {
5109         case DM_REMOVE_ALL:
5110         case DM_DEV_CREATE:
5111         case DM_DEV_REMOVE:
5112         case DM_DEV_RENAME:
5113         case DM_DEV_SUSPEND:
5114         case DM_DEV_STATUS:
5115         case DM_TABLE_LOAD:
5116         case DM_TABLE_CLEAR:
5117         case DM_TARGET_MSG:
5118         case DM_DEV_SET_GEOMETRY:
5119             /* no return data */
5120             break;
5121         case DM_LIST_DEVICES:
5122         {
5123             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5124             uint32_t remaining_data = guest_data_size;
5125             void *cur_data = argptr;
5126             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5127             int nl_size = 12; /* can't use thunk_size due to alignment */
5128 
5129             while (1) {
5130                 uint32_t next = nl->next;
5131                 if (next) {
5132                     nl->next = nl_size + (strlen(nl->name) + 1);
5133                 }
5134                 if (remaining_data < nl->next) {
5135                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5136                     break;
5137                 }
5138                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5139                 strcpy(cur_data + nl_size, nl->name);
5140                 cur_data += nl->next;
5141                 remaining_data -= nl->next;
5142                 if (!next) {
5143                     break;
5144                 }
5145                 nl = (void*)nl + next;
5146             }
5147             break;
5148         }
5149         case DM_DEV_WAIT:
5150         case DM_TABLE_STATUS:
5151         {
5152             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5153             void *cur_data = argptr;
5154             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5155             int spec_size = thunk_type_size(dm_arg_type, 0);
5156             int i;
5157 
5158             for (i = 0; i < host_dm->target_count; i++) {
5159                 uint32_t next = spec->next;
5160                 int slen = strlen((char*)&spec[1]) + 1;
5161                 spec->next = (cur_data - argptr) + spec_size + slen;
5162                 if (guest_data_size < spec->next) {
5163                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5164                     break;
5165                 }
5166                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5167                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5168                 cur_data = argptr + spec->next;
5169                 spec = (void*)host_dm + host_dm->data_start + next;
5170             }
5171             break;
5172         }
5173         case DM_TABLE_DEPS:
5174         {
5175             void *hdata = (void*)host_dm + host_dm->data_start;
5176             int count = *(uint32_t*)hdata;
5177             uint64_t *hdev = hdata + 8;
5178             uint64_t *gdev = argptr + 8;
5179             int i;
5180 
5181             *(uint32_t*)argptr = tswap32(count);
5182             for (i = 0; i < count; i++) {
5183                 *gdev = tswap64(*hdev);
5184                 gdev++;
5185                 hdev++;
5186             }
5187             break;
5188         }
5189         case DM_LIST_VERSIONS:
5190         {
5191             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5192             uint32_t remaining_data = guest_data_size;
5193             void *cur_data = argptr;
5194             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5195             int vers_size = thunk_type_size(dm_arg_type, 0);
5196 
5197             while (1) {
5198                 uint32_t next = vers->next;
5199                 if (next) {
5200                     vers->next = vers_size + (strlen(vers->name) + 1);
5201                 }
5202                 if (remaining_data < vers->next) {
5203                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5204                     break;
5205                 }
5206                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5207                 strcpy(cur_data + vers_size, vers->name);
5208                 cur_data += vers->next;
5209                 remaining_data -= vers->next;
5210                 if (!next) {
5211                     break;
5212                 }
5213                 vers = (void*)vers + next;
5214             }
5215             break;
5216         }
5217         default:
5218             unlock_user(argptr, guest_data, 0);
5219             ret = -TARGET_EINVAL;
5220             goto out;
5221         }
5222         unlock_user(argptr, guest_data, guest_data_size);
5223 
5224         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5225         if (!argptr) {
5226             ret = -TARGET_EFAULT;
5227             goto out;
5228         }
5229         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5230         unlock_user(argptr, arg, target_size);
5231     }
5232 out:
5233     g_free(big_buf);
5234     return ret;
5235 }
5236 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5237 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5238                                int cmd, abi_long arg)
5239 {
5240     void *argptr;
5241     int target_size;
5242     const argtype *arg_type = ie->arg_type;
5243     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5244     abi_long ret;
5245 
5246     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5247     struct blkpg_partition host_part;
5248 
5249     /* Read and convert blkpg */
5250     arg_type++;
5251     target_size = thunk_type_size(arg_type, 0);
5252     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5253     if (!argptr) {
5254         ret = -TARGET_EFAULT;
5255         goto out;
5256     }
5257     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5258     unlock_user(argptr, arg, 0);
5259 
5260     switch (host_blkpg->op) {
5261     case BLKPG_ADD_PARTITION:
5262     case BLKPG_DEL_PARTITION:
5263         /* payload is struct blkpg_partition */
5264         break;
5265     default:
5266         /* Unknown opcode */
5267         ret = -TARGET_EINVAL;
5268         goto out;
5269     }
5270 
5271     /* Read and convert blkpg->data */
5272     arg = (abi_long)(uintptr_t)host_blkpg->data;
5273     target_size = thunk_type_size(part_arg_type, 0);
5274     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5275     if (!argptr) {
5276         ret = -TARGET_EFAULT;
5277         goto out;
5278     }
5279     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5280     unlock_user(argptr, arg, 0);
5281 
5282     /* Swizzle the data pointer to our local copy and call! */
5283     host_blkpg->data = &host_part;
5284     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5285 
5286 out:
5287     return ret;
5288 }
5289 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5290 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5291                                 int fd, int cmd, abi_long arg)
5292 {
5293     const argtype *arg_type = ie->arg_type;
5294     const StructEntry *se;
5295     const argtype *field_types;
5296     const int *dst_offsets, *src_offsets;
5297     int target_size;
5298     void *argptr;
5299     abi_ulong *target_rt_dev_ptr = NULL;
5300     unsigned long *host_rt_dev_ptr = NULL;
5301     abi_long ret;
5302     int i;
5303 
5304     assert(ie->access == IOC_W);
5305     assert(*arg_type == TYPE_PTR);
5306     arg_type++;
5307     assert(*arg_type == TYPE_STRUCT);
5308     target_size = thunk_type_size(arg_type, 0);
5309     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5310     if (!argptr) {
5311         return -TARGET_EFAULT;
5312     }
5313     arg_type++;
5314     assert(*arg_type == (int)STRUCT_rtentry);
5315     se = struct_entries + *arg_type++;
5316     assert(se->convert[0] == NULL);
5317     /* convert struct here to be able to catch rt_dev string */
5318     field_types = se->field_types;
5319     dst_offsets = se->field_offsets[THUNK_HOST];
5320     src_offsets = se->field_offsets[THUNK_TARGET];
5321     for (i = 0; i < se->nb_fields; i++) {
5322         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5323             assert(*field_types == TYPE_PTRVOID);
5324             target_rt_dev_ptr = argptr + src_offsets[i];
5325             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5326             if (*target_rt_dev_ptr != 0) {
5327                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5328                                                   tswapal(*target_rt_dev_ptr));
5329                 if (!*host_rt_dev_ptr) {
5330                     unlock_user(argptr, arg, 0);
5331                     return -TARGET_EFAULT;
5332                 }
5333             } else {
5334                 *host_rt_dev_ptr = 0;
5335             }
5336             field_types++;
5337             continue;
5338         }
5339         field_types = thunk_convert(buf_temp + dst_offsets[i],
5340                                     argptr + src_offsets[i],
5341                                     field_types, THUNK_HOST);
5342     }
5343     unlock_user(argptr, arg, 0);
5344 
5345     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5346 
5347     assert(host_rt_dev_ptr != NULL);
5348     assert(target_rt_dev_ptr != NULL);
5349     if (*host_rt_dev_ptr != 0) {
5350         unlock_user((void *)*host_rt_dev_ptr,
5351                     *target_rt_dev_ptr, 0);
5352     }
5353     return ret;
5354 }
5355 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5356 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5357                                      int fd, int cmd, abi_long arg)
5358 {
5359     int sig = target_to_host_signal(arg);
5360     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5361 }
5362 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5363 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5364                                     int fd, int cmd, abi_long arg)
5365 {
5366     struct timeval tv;
5367     abi_long ret;
5368 
5369     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5370     if (is_error(ret)) {
5371         return ret;
5372     }
5373 
5374     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5375         if (copy_to_user_timeval(arg, &tv)) {
5376             return -TARGET_EFAULT;
5377         }
5378     } else {
5379         if (copy_to_user_timeval64(arg, &tv)) {
5380             return -TARGET_EFAULT;
5381         }
5382     }
5383 
5384     return ret;
5385 }
5386 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5387 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5388                                       int fd, int cmd, abi_long arg)
5389 {
5390     struct timespec ts;
5391     abi_long ret;
5392 
5393     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5394     if (is_error(ret)) {
5395         return ret;
5396     }
5397 
5398     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5399         if (host_to_target_timespec(arg, &ts)) {
5400             return -TARGET_EFAULT;
5401         }
5402     } else{
5403         if (host_to_target_timespec64(arg, &ts)) {
5404             return -TARGET_EFAULT;
5405         }
5406     }
5407 
5408     return ret;
5409 }
5410 
5411 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5412 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5413                                      int fd, int cmd, abi_long arg)
5414 {
5415     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5416     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5417 }
5418 #endif
5419 
5420 #ifdef HAVE_DRM_H
5421 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5422 static void unlock_drm_version(struct drm_version *host_ver,
5423                                struct target_drm_version *target_ver,
5424                                bool copy)
5425 {
5426     unlock_user(host_ver->name, target_ver->name,
5427                                 copy ? host_ver->name_len : 0);
5428     unlock_user(host_ver->date, target_ver->date,
5429                                 copy ? host_ver->date_len : 0);
5430     unlock_user(host_ver->desc, target_ver->desc,
5431                                 copy ? host_ver->desc_len : 0);
5432 }
5433 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5434 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5435                                           struct target_drm_version *target_ver)
5436 {
5437     memset(host_ver, 0, sizeof(*host_ver));
5438 
5439     __get_user(host_ver->name_len, &target_ver->name_len);
5440     if (host_ver->name_len) {
5441         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5442                                    target_ver->name_len, 0);
5443         if (!host_ver->name) {
5444             return -EFAULT;
5445         }
5446     }
5447 
5448     __get_user(host_ver->date_len, &target_ver->date_len);
5449     if (host_ver->date_len) {
5450         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5451                                    target_ver->date_len, 0);
5452         if (!host_ver->date) {
5453             goto err;
5454         }
5455     }
5456 
5457     __get_user(host_ver->desc_len, &target_ver->desc_len);
5458     if (host_ver->desc_len) {
5459         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5460                                    target_ver->desc_len, 0);
5461         if (!host_ver->desc) {
5462             goto err;
5463         }
5464     }
5465 
5466     return 0;
5467 err:
5468     unlock_drm_version(host_ver, target_ver, false);
5469     return -EFAULT;
5470 }
5471 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5472 static inline void host_to_target_drmversion(
5473                                           struct target_drm_version *target_ver,
5474                                           struct drm_version *host_ver)
5475 {
5476     __put_user(host_ver->version_major, &target_ver->version_major);
5477     __put_user(host_ver->version_minor, &target_ver->version_minor);
5478     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5479     __put_user(host_ver->name_len, &target_ver->name_len);
5480     __put_user(host_ver->date_len, &target_ver->date_len);
5481     __put_user(host_ver->desc_len, &target_ver->desc_len);
5482     unlock_drm_version(host_ver, target_ver, true);
5483 }
5484 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5485 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5486                              int fd, int cmd, abi_long arg)
5487 {
5488     struct drm_version *ver;
5489     struct target_drm_version *target_ver;
5490     abi_long ret;
5491 
5492     switch (ie->host_cmd) {
5493     case DRM_IOCTL_VERSION:
5494         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5495             return -TARGET_EFAULT;
5496         }
5497         ver = (struct drm_version *)buf_temp;
5498         ret = target_to_host_drmversion(ver, target_ver);
5499         if (!is_error(ret)) {
5500             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5501             if (is_error(ret)) {
5502                 unlock_drm_version(ver, target_ver, false);
5503             } else {
5504                 host_to_target_drmversion(target_ver, ver);
5505             }
5506         }
5507         unlock_user_struct(target_ver, arg, 0);
5508         return ret;
5509     }
5510     return -TARGET_ENOSYS;
5511 }
5512 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5513 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5514                                            struct drm_i915_getparam *gparam,
5515                                            int fd, abi_long arg)
5516 {
5517     abi_long ret;
5518     int value;
5519     struct target_drm_i915_getparam *target_gparam;
5520 
5521     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5522         return -TARGET_EFAULT;
5523     }
5524 
5525     __get_user(gparam->param, &target_gparam->param);
5526     gparam->value = &value;
5527     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5528     put_user_s32(value, target_gparam->value);
5529 
5530     unlock_user_struct(target_gparam, arg, 0);
5531     return ret;
5532 }
5533 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5534 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5535                                   int fd, int cmd, abi_long arg)
5536 {
5537     switch (ie->host_cmd) {
5538     case DRM_IOCTL_I915_GETPARAM:
5539         return do_ioctl_drm_i915_getparam(ie,
5540                                           (struct drm_i915_getparam *)buf_temp,
5541                                           fd, arg);
5542     default:
5543         return -TARGET_ENOSYS;
5544     }
5545 }
5546 
5547 #endif
5548 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5549 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5550                                         int fd, int cmd, abi_long arg)
5551 {
5552     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5553     struct tun_filter *target_filter;
5554     char *target_addr;
5555 
5556     assert(ie->access == IOC_W);
5557 
5558     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5559     if (!target_filter) {
5560         return -TARGET_EFAULT;
5561     }
5562     filter->flags = tswap16(target_filter->flags);
5563     filter->count = tswap16(target_filter->count);
5564     unlock_user(target_filter, arg, 0);
5565 
5566     if (filter->count) {
5567         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5568             MAX_STRUCT_SIZE) {
5569             return -TARGET_EFAULT;
5570         }
5571 
5572         target_addr = lock_user(VERIFY_READ,
5573                                 arg + offsetof(struct tun_filter, addr),
5574                                 filter->count * ETH_ALEN, 1);
5575         if (!target_addr) {
5576             return -TARGET_EFAULT;
5577         }
5578         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5579         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5580     }
5581 
5582     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5583 }
5584 
5585 IOCTLEntry ioctl_entries[] = {
5586 #define IOCTL(cmd, access, ...) \
5587     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5588 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5589     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5590 #define IOCTL_IGNORE(cmd) \
5591     { TARGET_ ## cmd, 0, #cmd },
5592 #include "ioctls.h"
5593     { 0, 0, },
5594 };
5595 
5596 /* ??? Implement proper locking for ioctls.  */
5597 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5598 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5599 {
5600     const IOCTLEntry *ie;
5601     const argtype *arg_type;
5602     abi_long ret;
5603     uint8_t buf_temp[MAX_STRUCT_SIZE];
5604     int target_size;
5605     void *argptr;
5606 
5607     ie = ioctl_entries;
5608     for(;;) {
5609         if (ie->target_cmd == 0) {
5610             qemu_log_mask(
5611                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5612             return -TARGET_ENOTTY;
5613         }
5614         if (ie->target_cmd == cmd)
5615             break;
5616         ie++;
5617     }
5618     arg_type = ie->arg_type;
5619     if (ie->do_ioctl) {
5620         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5621     } else if (!ie->host_cmd) {
5622         /* Some architectures define BSD ioctls in their headers
5623            that are not implemented in Linux.  */
5624         return -TARGET_ENOTTY;
5625     }
5626 
5627     switch(arg_type[0]) {
5628     case TYPE_NULL:
5629         /* no argument */
5630         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5631         break;
5632     case TYPE_PTRVOID:
5633     case TYPE_INT:
5634     case TYPE_LONG:
5635     case TYPE_ULONG:
5636         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5637         break;
5638     case TYPE_PTR:
5639         arg_type++;
5640         target_size = thunk_type_size(arg_type, 0);
5641         switch(ie->access) {
5642         case IOC_R:
5643             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5644             if (!is_error(ret)) {
5645                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5646                 if (!argptr)
5647                     return -TARGET_EFAULT;
5648                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5649                 unlock_user(argptr, arg, target_size);
5650             }
5651             break;
5652         case IOC_W:
5653             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5654             if (!argptr)
5655                 return -TARGET_EFAULT;
5656             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5657             unlock_user(argptr, arg, 0);
5658             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5659             break;
5660         default:
5661         case IOC_RW:
5662             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5663             if (!argptr)
5664                 return -TARGET_EFAULT;
5665             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5666             unlock_user(argptr, arg, 0);
5667             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5668             if (!is_error(ret)) {
5669                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5670                 if (!argptr)
5671                     return -TARGET_EFAULT;
5672                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5673                 unlock_user(argptr, arg, target_size);
5674             }
5675             break;
5676         }
5677         break;
5678     default:
5679         qemu_log_mask(LOG_UNIMP,
5680                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5681                       (long)cmd, arg_type[0]);
5682         ret = -TARGET_ENOTTY;
5683         break;
5684     }
5685     return ret;
5686 }
5687 
5688 static const bitmask_transtbl iflag_tbl[] = {
5689         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5690         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5691         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5692         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5693         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5694         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5695         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5696         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5697         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5698         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5699         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5700         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5701         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5702         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5703         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5704 };
5705 
5706 static const bitmask_transtbl oflag_tbl[] = {
5707 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5708 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5709 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5710 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5711 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5712 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5713 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5714 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5715 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5716 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5717 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5718 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5719 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5720 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5721 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5722 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5723 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5724 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5725 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5726 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5727 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5728 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5729 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5730 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5731 };
5732 
5733 static const bitmask_transtbl cflag_tbl[] = {
5734 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5735 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5736 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5737 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5738 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5739 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5740 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5741 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5742 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5743 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5744 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5745 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5746 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5747 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5748 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5749 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5750 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5751 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5752 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5753 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5754 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5755 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5756 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5757 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5758 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5759 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5760 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5761 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5762 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5763 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5764 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5765 };
5766 
5767 static const bitmask_transtbl lflag_tbl[] = {
5768   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5769   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5770   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5771   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5772   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5773   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5774   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5775   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5776   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5777   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5778   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5779   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5780   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5781   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5782   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5783   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5784 };
5785 
target_to_host_termios(void * dst,const void * src)5786 static void target_to_host_termios (void *dst, const void *src)
5787 {
5788     struct host_termios *host = dst;
5789     const struct target_termios *target = src;
5790 
5791     host->c_iflag =
5792         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5793     host->c_oflag =
5794         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5795     host->c_cflag =
5796         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5797     host->c_lflag =
5798         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5799     host->c_line = target->c_line;
5800 
5801     memset(host->c_cc, 0, sizeof(host->c_cc));
5802     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5803     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5804     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5805     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5806     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5807     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5808     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5809     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5810     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5811     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5812     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5813     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5814     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5815     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5816     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5817     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5818     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5819 }
5820 
host_to_target_termios(void * dst,const void * src)5821 static void host_to_target_termios (void *dst, const void *src)
5822 {
5823     struct target_termios *target = dst;
5824     const struct host_termios *host = src;
5825 
5826     target->c_iflag =
5827         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5828     target->c_oflag =
5829         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5830     target->c_cflag =
5831         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5832     target->c_lflag =
5833         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5834     target->c_line = host->c_line;
5835 
5836     memset(target->c_cc, 0, sizeof(target->c_cc));
5837     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5838     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5839     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5840     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5841     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5842     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5843     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5844     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5845     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5846     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5847     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5848     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5849     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5850     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5851     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5852     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5853     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5854 }
5855 
5856 static const StructEntry struct_termios_def = {
5857     .convert = { host_to_target_termios, target_to_host_termios },
5858     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5859     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5860     .print = print_termios,
5861 };
5862 
5863 /* If the host does not provide these bits, they may be safely discarded. */
5864 #ifndef MAP_SYNC
5865 #define MAP_SYNC 0
5866 #endif
5867 #ifndef MAP_UNINITIALIZED
5868 #define MAP_UNINITIALIZED 0
5869 #endif
5870 
5871 static const bitmask_transtbl mmap_flags_tbl[] = {
5872     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5873     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5874       MAP_ANONYMOUS, MAP_ANONYMOUS },
5875     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5876       MAP_GROWSDOWN, MAP_GROWSDOWN },
5877     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5878       MAP_DENYWRITE, MAP_DENYWRITE },
5879     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5880       MAP_EXECUTABLE, MAP_EXECUTABLE },
5881     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5882     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5883       MAP_NORESERVE, MAP_NORESERVE },
5884     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5885     /* MAP_STACK had been ignored by the kernel for quite some time.
5886        Recognize it for the target insofar as we do not want to pass
5887        it through to the host.  */
5888     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5889     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5890     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5891     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5892       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5893     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5894       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5895 };
5896 
5897 /*
5898  * Arrange for legacy / undefined architecture specific flags to be
5899  * ignored by mmap handling code.
5900  */
5901 #ifndef TARGET_MAP_32BIT
5902 #define TARGET_MAP_32BIT 0
5903 #endif
5904 #ifndef TARGET_MAP_HUGE_2MB
5905 #define TARGET_MAP_HUGE_2MB 0
5906 #endif
5907 #ifndef TARGET_MAP_HUGE_1GB
5908 #define TARGET_MAP_HUGE_1GB 0
5909 #endif
5910 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5911 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5912                         int target_flags, int fd, off_t offset)
5913 {
5914     /*
5915      * The historical set of flags that all mmap types implicitly support.
5916      */
5917     enum {
5918         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5919                                | TARGET_MAP_PRIVATE
5920                                | TARGET_MAP_FIXED
5921                                | TARGET_MAP_ANONYMOUS
5922                                | TARGET_MAP_DENYWRITE
5923                                | TARGET_MAP_EXECUTABLE
5924                                | TARGET_MAP_UNINITIALIZED
5925                                | TARGET_MAP_GROWSDOWN
5926                                | TARGET_MAP_LOCKED
5927                                | TARGET_MAP_NORESERVE
5928                                | TARGET_MAP_POPULATE
5929                                | TARGET_MAP_NONBLOCK
5930                                | TARGET_MAP_STACK
5931                                | TARGET_MAP_HUGETLB
5932                                | TARGET_MAP_32BIT
5933                                | TARGET_MAP_HUGE_2MB
5934                                | TARGET_MAP_HUGE_1GB
5935     };
5936     int host_flags;
5937 
5938     switch (target_flags & TARGET_MAP_TYPE) {
5939     case TARGET_MAP_PRIVATE:
5940         host_flags = MAP_PRIVATE;
5941         break;
5942     case TARGET_MAP_SHARED:
5943         host_flags = MAP_SHARED;
5944         break;
5945     case TARGET_MAP_SHARED_VALIDATE:
5946         /*
5947          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5948          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5949          */
5950         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5951             return -TARGET_EOPNOTSUPP;
5952         }
5953         host_flags = MAP_SHARED_VALIDATE;
5954         if (target_flags & TARGET_MAP_SYNC) {
5955             host_flags |= MAP_SYNC;
5956         }
5957         break;
5958     default:
5959         return -TARGET_EINVAL;
5960     }
5961     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5962 
5963     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5964 }
5965 
5966 /*
5967  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5968  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5969  */
5970 #if defined(TARGET_I386)
5971 
5972 /* NOTE: there is really one LDT for all the threads */
5973 static uint8_t *ldt_table;
5974 
read_ldt(abi_ulong ptr,unsigned long bytecount)5975 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5976 {
5977     int size;
5978     void *p;
5979 
5980     if (!ldt_table)
5981         return 0;
5982     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5983     if (size > bytecount)
5984         size = bytecount;
5985     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5986     if (!p)
5987         return -TARGET_EFAULT;
5988     /* ??? Should this by byteswapped?  */
5989     memcpy(p, ldt_table, size);
5990     unlock_user(p, ptr, size);
5991     return size;
5992 }
5993 
5994 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)5995 static abi_long write_ldt(CPUX86State *env,
5996                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5997 {
5998     struct target_modify_ldt_ldt_s ldt_info;
5999     struct target_modify_ldt_ldt_s *target_ldt_info;
6000     int seg_32bit, contents, read_exec_only, limit_in_pages;
6001     int seg_not_present, useable, lm;
6002     uint32_t *lp, entry_1, entry_2;
6003 
6004     if (bytecount != sizeof(ldt_info))
6005         return -TARGET_EINVAL;
6006     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6007         return -TARGET_EFAULT;
6008     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6009     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6010     ldt_info.limit = tswap32(target_ldt_info->limit);
6011     ldt_info.flags = tswap32(target_ldt_info->flags);
6012     unlock_user_struct(target_ldt_info, ptr, 0);
6013 
6014     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6015         return -TARGET_EINVAL;
6016     seg_32bit = ldt_info.flags & 1;
6017     contents = (ldt_info.flags >> 1) & 3;
6018     read_exec_only = (ldt_info.flags >> 3) & 1;
6019     limit_in_pages = (ldt_info.flags >> 4) & 1;
6020     seg_not_present = (ldt_info.flags >> 5) & 1;
6021     useable = (ldt_info.flags >> 6) & 1;
6022 #ifdef TARGET_ABI32
6023     lm = 0;
6024 #else
6025     lm = (ldt_info.flags >> 7) & 1;
6026 #endif
6027     if (contents == 3) {
6028         if (oldmode)
6029             return -TARGET_EINVAL;
6030         if (seg_not_present == 0)
6031             return -TARGET_EINVAL;
6032     }
6033     /* allocate the LDT */
6034     if (!ldt_table) {
6035         env->ldt.base = target_mmap(0,
6036                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6037                                     PROT_READ|PROT_WRITE,
6038                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6039         if (env->ldt.base == -1)
6040             return -TARGET_ENOMEM;
6041         memset(g2h_untagged(env->ldt.base), 0,
6042                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6043         env->ldt.limit = 0xffff;
6044         ldt_table = g2h_untagged(env->ldt.base);
6045     }
6046 
6047     /* NOTE: same code as Linux kernel */
6048     /* Allow LDTs to be cleared by the user. */
6049     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6050         if (oldmode ||
6051             (contents == 0		&&
6052              read_exec_only == 1	&&
6053              seg_32bit == 0		&&
6054              limit_in_pages == 0	&&
6055              seg_not_present == 1	&&
6056              useable == 0 )) {
6057             entry_1 = 0;
6058             entry_2 = 0;
6059             goto install;
6060         }
6061     }
6062 
6063     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6064         (ldt_info.limit & 0x0ffff);
6065     entry_2 = (ldt_info.base_addr & 0xff000000) |
6066         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6067         (ldt_info.limit & 0xf0000) |
6068         ((read_exec_only ^ 1) << 9) |
6069         (contents << 10) |
6070         ((seg_not_present ^ 1) << 15) |
6071         (seg_32bit << 22) |
6072         (limit_in_pages << 23) |
6073         (lm << 21) |
6074         0x7000;
6075     if (!oldmode)
6076         entry_2 |= (useable << 20);
6077 
6078     /* Install the new entry ...  */
6079 install:
6080     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6081     lp[0] = tswap32(entry_1);
6082     lp[1] = tswap32(entry_2);
6083     return 0;
6084 }
6085 
6086 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6087 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6088                               unsigned long bytecount)
6089 {
6090     abi_long ret;
6091 
6092     switch (func) {
6093     case 0:
6094         ret = read_ldt(ptr, bytecount);
6095         break;
6096     case 1:
6097         ret = write_ldt(env, ptr, bytecount, 1);
6098         break;
6099     case 0x11:
6100         ret = write_ldt(env, ptr, bytecount, 0);
6101         break;
6102     default:
6103         ret = -TARGET_ENOSYS;
6104         break;
6105     }
6106     return ret;
6107 }
6108 
6109 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6110 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6111 {
6112     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6113     struct target_modify_ldt_ldt_s ldt_info;
6114     struct target_modify_ldt_ldt_s *target_ldt_info;
6115     int seg_32bit, contents, read_exec_only, limit_in_pages;
6116     int seg_not_present, useable, lm;
6117     uint32_t *lp, entry_1, entry_2;
6118     int i;
6119 
6120     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6121     if (!target_ldt_info)
6122         return -TARGET_EFAULT;
6123     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6124     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6125     ldt_info.limit = tswap32(target_ldt_info->limit);
6126     ldt_info.flags = tswap32(target_ldt_info->flags);
6127     if (ldt_info.entry_number == -1) {
6128         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6129             if (gdt_table[i] == 0) {
6130                 ldt_info.entry_number = i;
6131                 target_ldt_info->entry_number = tswap32(i);
6132                 break;
6133             }
6134         }
6135     }
6136     unlock_user_struct(target_ldt_info, ptr, 1);
6137 
6138     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6139         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6140            return -TARGET_EINVAL;
6141     seg_32bit = ldt_info.flags & 1;
6142     contents = (ldt_info.flags >> 1) & 3;
6143     read_exec_only = (ldt_info.flags >> 3) & 1;
6144     limit_in_pages = (ldt_info.flags >> 4) & 1;
6145     seg_not_present = (ldt_info.flags >> 5) & 1;
6146     useable = (ldt_info.flags >> 6) & 1;
6147 #ifdef TARGET_ABI32
6148     lm = 0;
6149 #else
6150     lm = (ldt_info.flags >> 7) & 1;
6151 #endif
6152 
6153     if (contents == 3) {
6154         if (seg_not_present == 0)
6155             return -TARGET_EINVAL;
6156     }
6157 
6158     /* NOTE: same code as Linux kernel */
6159     /* Allow LDTs to be cleared by the user. */
6160     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6161         if ((contents == 0             &&
6162              read_exec_only == 1       &&
6163              seg_32bit == 0            &&
6164              limit_in_pages == 0       &&
6165              seg_not_present == 1      &&
6166              useable == 0 )) {
6167             entry_1 = 0;
6168             entry_2 = 0;
6169             goto install;
6170         }
6171     }
6172 
6173     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6174         (ldt_info.limit & 0x0ffff);
6175     entry_2 = (ldt_info.base_addr & 0xff000000) |
6176         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6177         (ldt_info.limit & 0xf0000) |
6178         ((read_exec_only ^ 1) << 9) |
6179         (contents << 10) |
6180         ((seg_not_present ^ 1) << 15) |
6181         (seg_32bit << 22) |
6182         (limit_in_pages << 23) |
6183         (useable << 20) |
6184         (lm << 21) |
6185         0x7000;
6186 
6187     /* Install the new entry ...  */
6188 install:
6189     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6190     lp[0] = tswap32(entry_1);
6191     lp[1] = tswap32(entry_2);
6192     return 0;
6193 }
6194 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6195 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6196 {
6197     struct target_modify_ldt_ldt_s *target_ldt_info;
6198     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6199     uint32_t base_addr, limit, flags;
6200     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6201     int seg_not_present, useable, lm;
6202     uint32_t *lp, entry_1, entry_2;
6203 
6204     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6205     if (!target_ldt_info)
6206         return -TARGET_EFAULT;
6207     idx = tswap32(target_ldt_info->entry_number);
6208     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6209         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6210         unlock_user_struct(target_ldt_info, ptr, 1);
6211         return -TARGET_EINVAL;
6212     }
6213     lp = (uint32_t *)(gdt_table + idx);
6214     entry_1 = tswap32(lp[0]);
6215     entry_2 = tswap32(lp[1]);
6216 
6217     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6218     contents = (entry_2 >> 10) & 3;
6219     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6220     seg_32bit = (entry_2 >> 22) & 1;
6221     limit_in_pages = (entry_2 >> 23) & 1;
6222     useable = (entry_2 >> 20) & 1;
6223 #ifdef TARGET_ABI32
6224     lm = 0;
6225 #else
6226     lm = (entry_2 >> 21) & 1;
6227 #endif
6228     flags = (seg_32bit << 0) | (contents << 1) |
6229         (read_exec_only << 3) | (limit_in_pages << 4) |
6230         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6231     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6232     base_addr = (entry_1 >> 16) |
6233         (entry_2 & 0xff000000) |
6234         ((entry_2 & 0xff) << 16);
6235     target_ldt_info->base_addr = tswapal(base_addr);
6236     target_ldt_info->limit = tswap32(limit);
6237     target_ldt_info->flags = tswap32(flags);
6238     unlock_user_struct(target_ldt_info, ptr, 1);
6239     return 0;
6240 }
6241 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6242 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6243 {
6244     return -TARGET_ENOSYS;
6245 }
6246 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6247 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6248 {
6249     abi_long ret = 0;
6250     abi_ulong val;
6251     int idx;
6252 
6253     switch(code) {
6254     case TARGET_ARCH_SET_GS:
6255     case TARGET_ARCH_SET_FS:
6256         if (code == TARGET_ARCH_SET_GS)
6257             idx = R_GS;
6258         else
6259             idx = R_FS;
6260         cpu_x86_load_seg(env, idx, 0);
6261         env->segs[idx].base = addr;
6262         break;
6263     case TARGET_ARCH_GET_GS:
6264     case TARGET_ARCH_GET_FS:
6265         if (code == TARGET_ARCH_GET_GS)
6266             idx = R_GS;
6267         else
6268             idx = R_FS;
6269         val = env->segs[idx].base;
6270         if (put_user(val, addr, abi_ulong))
6271             ret = -TARGET_EFAULT;
6272         break;
6273     default:
6274         ret = -TARGET_EINVAL;
6275         break;
6276     }
6277     return ret;
6278 }
6279 #endif /* defined(TARGET_ABI32 */
6280 #endif /* defined(TARGET_I386) */
6281 
6282 /*
6283  * These constants are generic.  Supply any that are missing from the host.
6284  */
6285 #ifndef PR_SET_NAME
6286 # define PR_SET_NAME    15
6287 # define PR_GET_NAME    16
6288 #endif
6289 #ifndef PR_SET_FP_MODE
6290 # define PR_SET_FP_MODE 45
6291 # define PR_GET_FP_MODE 46
6292 # define PR_FP_MODE_FR   (1 << 0)
6293 # define PR_FP_MODE_FRE  (1 << 1)
6294 #endif
6295 #ifndef PR_SVE_SET_VL
6296 # define PR_SVE_SET_VL  50
6297 # define PR_SVE_GET_VL  51
6298 # define PR_SVE_VL_LEN_MASK  0xffff
6299 # define PR_SVE_VL_INHERIT   (1 << 17)
6300 #endif
6301 #ifndef PR_PAC_RESET_KEYS
6302 # define PR_PAC_RESET_KEYS  54
6303 # define PR_PAC_APIAKEY   (1 << 0)
6304 # define PR_PAC_APIBKEY   (1 << 1)
6305 # define PR_PAC_APDAKEY   (1 << 2)
6306 # define PR_PAC_APDBKEY   (1 << 3)
6307 # define PR_PAC_APGAKEY   (1 << 4)
6308 #endif
6309 #ifndef PR_SET_TAGGED_ADDR_CTRL
6310 # define PR_SET_TAGGED_ADDR_CTRL 55
6311 # define PR_GET_TAGGED_ADDR_CTRL 56
6312 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6313 #endif
6314 #ifndef PR_SET_IO_FLUSHER
6315 # define PR_SET_IO_FLUSHER 57
6316 # define PR_GET_IO_FLUSHER 58
6317 #endif
6318 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6319 # define PR_SET_SYSCALL_USER_DISPATCH 59
6320 #endif
6321 #ifndef PR_SME_SET_VL
6322 # define PR_SME_SET_VL  63
6323 # define PR_SME_GET_VL  64
6324 # define PR_SME_VL_LEN_MASK  0xffff
6325 # define PR_SME_VL_INHERIT   (1 << 17)
6326 #endif
6327 
6328 #include "target_prctl.h"
6329 
do_prctl_inval0(CPUArchState * env)6330 static abi_long do_prctl_inval0(CPUArchState *env)
6331 {
6332     return -TARGET_EINVAL;
6333 }
6334 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6335 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6336 {
6337     return -TARGET_EINVAL;
6338 }
6339 
6340 #ifndef do_prctl_get_fp_mode
6341 #define do_prctl_get_fp_mode do_prctl_inval0
6342 #endif
6343 #ifndef do_prctl_set_fp_mode
6344 #define do_prctl_set_fp_mode do_prctl_inval1
6345 #endif
6346 #ifndef do_prctl_sve_get_vl
6347 #define do_prctl_sve_get_vl do_prctl_inval0
6348 #endif
6349 #ifndef do_prctl_sve_set_vl
6350 #define do_prctl_sve_set_vl do_prctl_inval1
6351 #endif
6352 #ifndef do_prctl_reset_keys
6353 #define do_prctl_reset_keys do_prctl_inval1
6354 #endif
6355 #ifndef do_prctl_set_tagged_addr_ctrl
6356 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6357 #endif
6358 #ifndef do_prctl_get_tagged_addr_ctrl
6359 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6360 #endif
6361 #ifndef do_prctl_get_unalign
6362 #define do_prctl_get_unalign do_prctl_inval1
6363 #endif
6364 #ifndef do_prctl_set_unalign
6365 #define do_prctl_set_unalign do_prctl_inval1
6366 #endif
6367 #ifndef do_prctl_sme_get_vl
6368 #define do_prctl_sme_get_vl do_prctl_inval0
6369 #endif
6370 #ifndef do_prctl_sme_set_vl
6371 #define do_prctl_sme_set_vl do_prctl_inval1
6372 #endif
6373 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6374 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6375                          abi_long arg3, abi_long arg4, abi_long arg5)
6376 {
6377     abi_long ret;
6378 
6379     switch (option) {
6380     case PR_GET_PDEATHSIG:
6381         {
6382             int deathsig;
6383             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6384                                   arg3, arg4, arg5));
6385             if (!is_error(ret) &&
6386                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6387                 return -TARGET_EFAULT;
6388             }
6389             return ret;
6390         }
6391     case PR_SET_PDEATHSIG:
6392         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6393                                arg3, arg4, arg5));
6394     case PR_GET_NAME:
6395         {
6396             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6397             if (!name) {
6398                 return -TARGET_EFAULT;
6399             }
6400             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6401                                   arg3, arg4, arg5));
6402             unlock_user(name, arg2, 16);
6403             return ret;
6404         }
6405     case PR_SET_NAME:
6406         {
6407             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6408             if (!name) {
6409                 return -TARGET_EFAULT;
6410             }
6411             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6412                                   arg3, arg4, arg5));
6413             unlock_user(name, arg2, 0);
6414             return ret;
6415         }
6416     case PR_GET_FP_MODE:
6417         return do_prctl_get_fp_mode(env);
6418     case PR_SET_FP_MODE:
6419         return do_prctl_set_fp_mode(env, arg2);
6420     case PR_SVE_GET_VL:
6421         return do_prctl_sve_get_vl(env);
6422     case PR_SVE_SET_VL:
6423         return do_prctl_sve_set_vl(env, arg2);
6424     case PR_SME_GET_VL:
6425         return do_prctl_sme_get_vl(env);
6426     case PR_SME_SET_VL:
6427         return do_prctl_sme_set_vl(env, arg2);
6428     case PR_PAC_RESET_KEYS:
6429         if (arg3 || arg4 || arg5) {
6430             return -TARGET_EINVAL;
6431         }
6432         return do_prctl_reset_keys(env, arg2);
6433     case PR_SET_TAGGED_ADDR_CTRL:
6434         if (arg3 || arg4 || arg5) {
6435             return -TARGET_EINVAL;
6436         }
6437         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6438     case PR_GET_TAGGED_ADDR_CTRL:
6439         if (arg2 || arg3 || arg4 || arg5) {
6440             return -TARGET_EINVAL;
6441         }
6442         return do_prctl_get_tagged_addr_ctrl(env);
6443 
6444     case PR_GET_UNALIGN:
6445         return do_prctl_get_unalign(env, arg2);
6446     case PR_SET_UNALIGN:
6447         return do_prctl_set_unalign(env, arg2);
6448 
6449     case PR_CAP_AMBIENT:
6450     case PR_CAPBSET_READ:
6451     case PR_CAPBSET_DROP:
6452     case PR_GET_DUMPABLE:
6453     case PR_SET_DUMPABLE:
6454     case PR_GET_KEEPCAPS:
6455     case PR_SET_KEEPCAPS:
6456     case PR_GET_SECUREBITS:
6457     case PR_SET_SECUREBITS:
6458     case PR_GET_TIMING:
6459     case PR_SET_TIMING:
6460     case PR_GET_TIMERSLACK:
6461     case PR_SET_TIMERSLACK:
6462     case PR_MCE_KILL:
6463     case PR_MCE_KILL_GET:
6464     case PR_GET_NO_NEW_PRIVS:
6465     case PR_SET_NO_NEW_PRIVS:
6466     case PR_GET_IO_FLUSHER:
6467     case PR_SET_IO_FLUSHER:
6468     case PR_SET_CHILD_SUBREAPER:
6469     case PR_GET_SPECULATION_CTRL:
6470     case PR_SET_SPECULATION_CTRL:
6471         /* Some prctl options have no pointer arguments and we can pass on. */
6472         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6473 
6474     case PR_GET_CHILD_SUBREAPER:
6475         {
6476             int val;
6477             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6478                                   arg3, arg4, arg5));
6479             if (!is_error(ret) && put_user_s32(val, arg2)) {
6480                 return -TARGET_EFAULT;
6481             }
6482             return ret;
6483         }
6484 
6485     case PR_GET_TID_ADDRESS:
6486         {
6487             TaskState *ts = get_task_state(env_cpu(env));
6488             return put_user_ual(ts->child_tidptr, arg2);
6489         }
6490 
6491     case PR_GET_FPEXC:
6492     case PR_SET_FPEXC:
6493         /* Was used for SPE on PowerPC. */
6494         return -TARGET_EINVAL;
6495 
6496     case PR_GET_ENDIAN:
6497     case PR_SET_ENDIAN:
6498     case PR_GET_FPEMU:
6499     case PR_SET_FPEMU:
6500     case PR_SET_MM:
6501     case PR_GET_SECCOMP:
6502     case PR_SET_SECCOMP:
6503     case PR_SET_SYSCALL_USER_DISPATCH:
6504     case PR_GET_THP_DISABLE:
6505     case PR_SET_THP_DISABLE:
6506     case PR_GET_TSC:
6507     case PR_SET_TSC:
6508         /* Disable to prevent the target disabling stuff we need. */
6509         return -TARGET_EINVAL;
6510 
6511     default:
6512         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6513                       option);
6514         return -TARGET_EINVAL;
6515     }
6516 }
6517 
6518 #define NEW_STACK_SIZE 0x40000
6519 
6520 
6521 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6522 typedef struct {
6523     CPUArchState *env;
6524     pthread_mutex_t mutex;
6525     pthread_cond_t cond;
6526     pthread_t thread;
6527     uint32_t tid;
6528     abi_ulong child_tidptr;
6529     abi_ulong parent_tidptr;
6530     sigset_t sigmask;
6531 } new_thread_info;
6532 
clone_func(void * arg)6533 static void *clone_func(void *arg)
6534 {
6535     new_thread_info *info = arg;
6536     CPUArchState *env;
6537     CPUState *cpu;
6538     TaskState *ts;
6539 
6540     rcu_register_thread();
6541     tcg_register_thread();
6542     env = info->env;
6543     cpu = env_cpu(env);
6544     thread_cpu = cpu;
6545     ts = get_task_state(cpu);
6546     info->tid = sys_gettid();
6547     task_settid(ts);
6548     if (info->child_tidptr)
6549         put_user_u32(info->tid, info->child_tidptr);
6550     if (info->parent_tidptr)
6551         put_user_u32(info->tid, info->parent_tidptr);
6552     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6553     /* Enable signals.  */
6554     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6555     /* Signal to the parent that we're ready.  */
6556     pthread_mutex_lock(&info->mutex);
6557     pthread_cond_broadcast(&info->cond);
6558     pthread_mutex_unlock(&info->mutex);
6559     /* Wait until the parent has finished initializing the tls state.  */
6560     pthread_mutex_lock(&clone_lock);
6561     pthread_mutex_unlock(&clone_lock);
6562     cpu_loop(env);
6563     /* never exits */
6564     return NULL;
6565 }
6566 
6567 /* do_fork() Must return host values and target errnos (unlike most
6568    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6569 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6570                    abi_ulong parent_tidptr, target_ulong newtls,
6571                    abi_ulong child_tidptr)
6572 {
6573     CPUState *cpu = env_cpu(env);
6574     int ret;
6575     TaskState *ts;
6576     CPUState *new_cpu;
6577     CPUArchState *new_env;
6578     sigset_t sigmask;
6579 
6580     flags &= ~CLONE_IGNORED_FLAGS;
6581 
6582     /* Emulate vfork() with fork() */
6583     if (flags & CLONE_VFORK)
6584         flags &= ~(CLONE_VFORK | CLONE_VM);
6585 
6586     if (flags & CLONE_VM) {
6587         TaskState *parent_ts = get_task_state(cpu);
6588         new_thread_info info;
6589         pthread_attr_t attr;
6590 
6591         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6592             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6593             return -TARGET_EINVAL;
6594         }
6595 
6596         ts = g_new0(TaskState, 1);
6597         init_task_state(ts);
6598 
6599         /* Grab a mutex so that thread setup appears atomic.  */
6600         pthread_mutex_lock(&clone_lock);
6601 
6602         /*
6603          * If this is our first additional thread, we need to ensure we
6604          * generate code for parallel execution and flush old translations.
6605          * Do this now so that the copy gets CF_PARALLEL too.
6606          */
6607         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6608             tcg_cflags_set(cpu, CF_PARALLEL);
6609             tb_flush(cpu);
6610         }
6611 
6612         /* we create a new CPU instance. */
6613         new_env = cpu_copy(env);
6614         /* Init regs that differ from the parent.  */
6615         cpu_clone_regs_child(new_env, newsp, flags);
6616         cpu_clone_regs_parent(env, flags);
6617         new_cpu = env_cpu(new_env);
6618         new_cpu->opaque = ts;
6619         ts->bprm = parent_ts->bprm;
6620         ts->info = parent_ts->info;
6621         ts->signal_mask = parent_ts->signal_mask;
6622 
6623         if (flags & CLONE_CHILD_CLEARTID) {
6624             ts->child_tidptr = child_tidptr;
6625         }
6626 
6627         if (flags & CLONE_SETTLS) {
6628             cpu_set_tls (new_env, newtls);
6629         }
6630 
6631         memset(&info, 0, sizeof(info));
6632         pthread_mutex_init(&info.mutex, NULL);
6633         pthread_mutex_lock(&info.mutex);
6634         pthread_cond_init(&info.cond, NULL);
6635         info.env = new_env;
6636         if (flags & CLONE_CHILD_SETTID) {
6637             info.child_tidptr = child_tidptr;
6638         }
6639         if (flags & CLONE_PARENT_SETTID) {
6640             info.parent_tidptr = parent_tidptr;
6641         }
6642 
6643         ret = pthread_attr_init(&attr);
6644         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6645         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6646         /* It is not safe to deliver signals until the child has finished
6647            initializing, so temporarily block all signals.  */
6648         sigfillset(&sigmask);
6649         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6650         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6651 
6652         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6653         /* TODO: Free new CPU state if thread creation failed.  */
6654 
6655         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6656         pthread_attr_destroy(&attr);
6657         if (ret == 0) {
6658             /* Wait for the child to initialize.  */
6659             pthread_cond_wait(&info.cond, &info.mutex);
6660             ret = info.tid;
6661         } else {
6662             ret = -1;
6663         }
6664         pthread_mutex_unlock(&info.mutex);
6665         pthread_cond_destroy(&info.cond);
6666         pthread_mutex_destroy(&info.mutex);
6667         pthread_mutex_unlock(&clone_lock);
6668     } else {
6669         /* if no CLONE_VM, we consider it is a fork */
6670         if (flags & CLONE_INVALID_FORK_FLAGS) {
6671             return -TARGET_EINVAL;
6672         }
6673 
6674         /* We can't support custom termination signals */
6675         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6676             return -TARGET_EINVAL;
6677         }
6678 
6679 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6680         if (flags & CLONE_PIDFD) {
6681             return -TARGET_EINVAL;
6682         }
6683 #endif
6684 
6685         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6686         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6687             return -TARGET_EINVAL;
6688         }
6689 
6690         if (block_signals()) {
6691             return -QEMU_ERESTARTSYS;
6692         }
6693 
6694         fork_start();
6695         ret = fork();
6696         if (ret == 0) {
6697             /* Child Process.  */
6698             cpu_clone_regs_child(env, newsp, flags);
6699             fork_end(ret);
6700             /* There is a race condition here.  The parent process could
6701                theoretically read the TID in the child process before the child
6702                tid is set.  This would require using either ptrace
6703                (not implemented) or having *_tidptr to point at a shared memory
6704                mapping.  We can't repeat the spinlock hack used above because
6705                the child process gets its own copy of the lock.  */
6706             if (flags & CLONE_CHILD_SETTID)
6707                 put_user_u32(sys_gettid(), child_tidptr);
6708             if (flags & CLONE_PARENT_SETTID)
6709                 put_user_u32(sys_gettid(), parent_tidptr);
6710             ts = get_task_state(cpu);
6711             if (flags & CLONE_SETTLS)
6712                 cpu_set_tls (env, newtls);
6713             if (flags & CLONE_CHILD_CLEARTID)
6714                 ts->child_tidptr = child_tidptr;
6715         } else {
6716             cpu_clone_regs_parent(env, flags);
6717             if (flags & CLONE_PIDFD) {
6718                 int pid_fd = 0;
6719 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6720                 int pid_child = ret;
6721                 pid_fd = pidfd_open(pid_child, 0);
6722                 if (pid_fd >= 0) {
6723                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6724                                                | FD_CLOEXEC);
6725                 } else {
6726                         pid_fd = 0;
6727                 }
6728 #endif
6729                 put_user_u32(pid_fd, parent_tidptr);
6730             }
6731             fork_end(ret);
6732         }
6733         g_assert(!cpu_in_exclusive_context(cpu));
6734     }
6735     return ret;
6736 }
6737 
6738 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6739 static int target_to_host_fcntl_cmd(int cmd)
6740 {
6741     int ret;
6742 
6743     switch(cmd) {
6744     case TARGET_F_DUPFD:
6745     case TARGET_F_GETFD:
6746     case TARGET_F_SETFD:
6747     case TARGET_F_GETFL:
6748     case TARGET_F_SETFL:
6749     case TARGET_F_OFD_GETLK:
6750     case TARGET_F_OFD_SETLK:
6751     case TARGET_F_OFD_SETLKW:
6752         ret = cmd;
6753         break;
6754     case TARGET_F_GETLK:
6755         ret = F_GETLK;
6756         break;
6757     case TARGET_F_SETLK:
6758         ret = F_SETLK;
6759         break;
6760     case TARGET_F_SETLKW:
6761         ret = F_SETLKW;
6762         break;
6763     case TARGET_F_GETOWN:
6764         ret = F_GETOWN;
6765         break;
6766     case TARGET_F_SETOWN:
6767         ret = F_SETOWN;
6768         break;
6769     case TARGET_F_GETSIG:
6770         ret = F_GETSIG;
6771         break;
6772     case TARGET_F_SETSIG:
6773         ret = F_SETSIG;
6774         break;
6775 #if TARGET_ABI_BITS == 32
6776     case TARGET_F_GETLK64:
6777         ret = F_GETLK;
6778         break;
6779     case TARGET_F_SETLK64:
6780         ret = F_SETLK;
6781         break;
6782     case TARGET_F_SETLKW64:
6783         ret = F_SETLKW;
6784         break;
6785 #endif
6786     case TARGET_F_SETLEASE:
6787         ret = F_SETLEASE;
6788         break;
6789     case TARGET_F_GETLEASE:
6790         ret = F_GETLEASE;
6791         break;
6792 #ifdef F_DUPFD_CLOEXEC
6793     case TARGET_F_DUPFD_CLOEXEC:
6794         ret = F_DUPFD_CLOEXEC;
6795         break;
6796 #endif
6797     case TARGET_F_NOTIFY:
6798         ret = F_NOTIFY;
6799         break;
6800 #ifdef F_GETOWN_EX
6801     case TARGET_F_GETOWN_EX:
6802         ret = F_GETOWN_EX;
6803         break;
6804 #endif
6805 #ifdef F_SETOWN_EX
6806     case TARGET_F_SETOWN_EX:
6807         ret = F_SETOWN_EX;
6808         break;
6809 #endif
6810 #ifdef F_SETPIPE_SZ
6811     case TARGET_F_SETPIPE_SZ:
6812         ret = F_SETPIPE_SZ;
6813         break;
6814     case TARGET_F_GETPIPE_SZ:
6815         ret = F_GETPIPE_SZ;
6816         break;
6817 #endif
6818 #ifdef F_ADD_SEALS
6819     case TARGET_F_ADD_SEALS:
6820         ret = F_ADD_SEALS;
6821         break;
6822     case TARGET_F_GET_SEALS:
6823         ret = F_GET_SEALS;
6824         break;
6825 #endif
6826     default:
6827         ret = -TARGET_EINVAL;
6828         break;
6829     }
6830 
6831 #if defined(__powerpc64__)
6832     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6833      * is not supported by kernel. The glibc fcntl call actually adjusts
6834      * them to 5, 6 and 7 before making the syscall(). Since we make the
6835      * syscall directly, adjust to what is supported by the kernel.
6836      */
6837     if (ret >= F_GETLK && ret <= F_SETLKW) {
6838         ret -= F_GETLK - 5;
6839     }
6840 #endif
6841 
6842     return ret;
6843 }
6844 
6845 #define FLOCK_TRANSTBL \
6846     switch (type) { \
6847     TRANSTBL_CONVERT(F_RDLCK); \
6848     TRANSTBL_CONVERT(F_WRLCK); \
6849     TRANSTBL_CONVERT(F_UNLCK); \
6850     }
6851 
target_to_host_flock(int type)6852 static int target_to_host_flock(int type)
6853 {
6854 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6855     FLOCK_TRANSTBL
6856 #undef  TRANSTBL_CONVERT
6857     return -TARGET_EINVAL;
6858 }
6859 
host_to_target_flock(int type)6860 static int host_to_target_flock(int type)
6861 {
6862 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6863     FLOCK_TRANSTBL
6864 #undef  TRANSTBL_CONVERT
6865     /* if we don't know how to convert the value coming
6866      * from the host we copy to the target field as-is
6867      */
6868     return type;
6869 }
6870 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6871 static inline abi_long copy_from_user_flock(struct flock *fl,
6872                                             abi_ulong target_flock_addr)
6873 {
6874     struct target_flock *target_fl;
6875     int l_type;
6876 
6877     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6878         return -TARGET_EFAULT;
6879     }
6880 
6881     __get_user(l_type, &target_fl->l_type);
6882     l_type = target_to_host_flock(l_type);
6883     if (l_type < 0) {
6884         return l_type;
6885     }
6886     fl->l_type = l_type;
6887     __get_user(fl->l_whence, &target_fl->l_whence);
6888     __get_user(fl->l_start, &target_fl->l_start);
6889     __get_user(fl->l_len, &target_fl->l_len);
6890     __get_user(fl->l_pid, &target_fl->l_pid);
6891     unlock_user_struct(target_fl, target_flock_addr, 0);
6892     return 0;
6893 }
6894 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6895 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6896                                           const struct flock *fl)
6897 {
6898     struct target_flock *target_fl;
6899     short l_type;
6900 
6901     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6902         return -TARGET_EFAULT;
6903     }
6904 
6905     l_type = host_to_target_flock(fl->l_type);
6906     __put_user(l_type, &target_fl->l_type);
6907     __put_user(fl->l_whence, &target_fl->l_whence);
6908     __put_user(fl->l_start, &target_fl->l_start);
6909     __put_user(fl->l_len, &target_fl->l_len);
6910     __put_user(fl->l_pid, &target_fl->l_pid);
6911     unlock_user_struct(target_fl, target_flock_addr, 1);
6912     return 0;
6913 }
6914 
6915 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6916 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6917 
6918 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6919 struct target_oabi_flock64 {
6920     abi_short l_type;
6921     abi_short l_whence;
6922     abi_llong l_start;
6923     abi_llong l_len;
6924     abi_int   l_pid;
6925 } QEMU_PACKED;
6926 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6927 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6928                                                    abi_ulong target_flock_addr)
6929 {
6930     struct target_oabi_flock64 *target_fl;
6931     int l_type;
6932 
6933     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6934         return -TARGET_EFAULT;
6935     }
6936 
6937     __get_user(l_type, &target_fl->l_type);
6938     l_type = target_to_host_flock(l_type);
6939     if (l_type < 0) {
6940         return l_type;
6941     }
6942     fl->l_type = l_type;
6943     __get_user(fl->l_whence, &target_fl->l_whence);
6944     __get_user(fl->l_start, &target_fl->l_start);
6945     __get_user(fl->l_len, &target_fl->l_len);
6946     __get_user(fl->l_pid, &target_fl->l_pid);
6947     unlock_user_struct(target_fl, target_flock_addr, 0);
6948     return 0;
6949 }
6950 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6951 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6952                                                  const struct flock *fl)
6953 {
6954     struct target_oabi_flock64 *target_fl;
6955     short l_type;
6956 
6957     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6958         return -TARGET_EFAULT;
6959     }
6960 
6961     l_type = host_to_target_flock(fl->l_type);
6962     __put_user(l_type, &target_fl->l_type);
6963     __put_user(fl->l_whence, &target_fl->l_whence);
6964     __put_user(fl->l_start, &target_fl->l_start);
6965     __put_user(fl->l_len, &target_fl->l_len);
6966     __put_user(fl->l_pid, &target_fl->l_pid);
6967     unlock_user_struct(target_fl, target_flock_addr, 1);
6968     return 0;
6969 }
6970 #endif
6971 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6972 static inline abi_long copy_from_user_flock64(struct flock *fl,
6973                                               abi_ulong target_flock_addr)
6974 {
6975     struct target_flock64 *target_fl;
6976     int l_type;
6977 
6978     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6979         return -TARGET_EFAULT;
6980     }
6981 
6982     __get_user(l_type, &target_fl->l_type);
6983     l_type = target_to_host_flock(l_type);
6984     if (l_type < 0) {
6985         return l_type;
6986     }
6987     fl->l_type = l_type;
6988     __get_user(fl->l_whence, &target_fl->l_whence);
6989     __get_user(fl->l_start, &target_fl->l_start);
6990     __get_user(fl->l_len, &target_fl->l_len);
6991     __get_user(fl->l_pid, &target_fl->l_pid);
6992     unlock_user_struct(target_fl, target_flock_addr, 0);
6993     return 0;
6994 }
6995 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)6996 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6997                                             const struct flock *fl)
6998 {
6999     struct target_flock64 *target_fl;
7000     short l_type;
7001 
7002     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7003         return -TARGET_EFAULT;
7004     }
7005 
7006     l_type = host_to_target_flock(fl->l_type);
7007     __put_user(l_type, &target_fl->l_type);
7008     __put_user(fl->l_whence, &target_fl->l_whence);
7009     __put_user(fl->l_start, &target_fl->l_start);
7010     __put_user(fl->l_len, &target_fl->l_len);
7011     __put_user(fl->l_pid, &target_fl->l_pid);
7012     unlock_user_struct(target_fl, target_flock_addr, 1);
7013     return 0;
7014 }
7015 
do_fcntl(int fd,int cmd,abi_ulong arg)7016 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7017 {
7018     struct flock fl;
7019 #ifdef F_GETOWN_EX
7020     struct f_owner_ex fox;
7021     struct target_f_owner_ex *target_fox;
7022 #endif
7023     abi_long ret;
7024     int host_cmd = target_to_host_fcntl_cmd(cmd);
7025 
7026     if (host_cmd == -TARGET_EINVAL)
7027 	    return host_cmd;
7028 
7029     switch(cmd) {
7030     case TARGET_F_GETLK:
7031         ret = copy_from_user_flock(&fl, arg);
7032         if (ret) {
7033             return ret;
7034         }
7035         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7036         if (ret == 0) {
7037             ret = copy_to_user_flock(arg, &fl);
7038         }
7039         break;
7040 
7041     case TARGET_F_SETLK:
7042     case TARGET_F_SETLKW:
7043         ret = copy_from_user_flock(&fl, arg);
7044         if (ret) {
7045             return ret;
7046         }
7047         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7048         break;
7049 
7050     case TARGET_F_GETLK64:
7051     case TARGET_F_OFD_GETLK:
7052         ret = copy_from_user_flock64(&fl, arg);
7053         if (ret) {
7054             return ret;
7055         }
7056         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7057         if (ret == 0) {
7058             ret = copy_to_user_flock64(arg, &fl);
7059         }
7060         break;
7061     case TARGET_F_SETLK64:
7062     case TARGET_F_SETLKW64:
7063     case TARGET_F_OFD_SETLK:
7064     case TARGET_F_OFD_SETLKW:
7065         ret = copy_from_user_flock64(&fl, arg);
7066         if (ret) {
7067             return ret;
7068         }
7069         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7070         break;
7071 
7072     case TARGET_F_GETFL:
7073         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7074         if (ret >= 0) {
7075             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7076             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7077             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7078                 ret |= TARGET_O_LARGEFILE;
7079             }
7080         }
7081         break;
7082 
7083     case TARGET_F_SETFL:
7084         ret = get_errno(safe_fcntl(fd, host_cmd,
7085                                    target_to_host_bitmask(arg,
7086                                                           fcntl_flags_tbl)));
7087         break;
7088 
7089 #ifdef F_GETOWN_EX
7090     case TARGET_F_GETOWN_EX:
7091         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7092         if (ret >= 0) {
7093             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7094                 return -TARGET_EFAULT;
7095             target_fox->type = tswap32(fox.type);
7096             target_fox->pid = tswap32(fox.pid);
7097             unlock_user_struct(target_fox, arg, 1);
7098         }
7099         break;
7100 #endif
7101 
7102 #ifdef F_SETOWN_EX
7103     case TARGET_F_SETOWN_EX:
7104         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7105             return -TARGET_EFAULT;
7106         fox.type = tswap32(target_fox->type);
7107         fox.pid = tswap32(target_fox->pid);
7108         unlock_user_struct(target_fox, arg, 0);
7109         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7110         break;
7111 #endif
7112 
7113     case TARGET_F_SETSIG:
7114         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7115         break;
7116 
7117     case TARGET_F_GETSIG:
7118         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7119         break;
7120 
7121     case TARGET_F_SETOWN:
7122     case TARGET_F_GETOWN:
7123     case TARGET_F_SETLEASE:
7124     case TARGET_F_GETLEASE:
7125     case TARGET_F_SETPIPE_SZ:
7126     case TARGET_F_GETPIPE_SZ:
7127     case TARGET_F_ADD_SEALS:
7128     case TARGET_F_GET_SEALS:
7129         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7130         break;
7131 
7132     default:
7133         ret = get_errno(safe_fcntl(fd, cmd, arg));
7134         break;
7135     }
7136     return ret;
7137 }
7138 
7139 #ifdef USE_UID16
7140 
high2lowuid(int uid)7141 static inline int high2lowuid(int uid)
7142 {
7143     if (uid > 65535)
7144         return 65534;
7145     else
7146         return uid;
7147 }
7148 
high2lowgid(int gid)7149 static inline int high2lowgid(int gid)
7150 {
7151     if (gid > 65535)
7152         return 65534;
7153     else
7154         return gid;
7155 }
7156 
low2highuid(int uid)7157 static inline int low2highuid(int uid)
7158 {
7159     if ((int16_t)uid == -1)
7160         return -1;
7161     else
7162         return uid;
7163 }
7164 
low2highgid(int gid)7165 static inline int low2highgid(int gid)
7166 {
7167     if ((int16_t)gid == -1)
7168         return -1;
7169     else
7170         return gid;
7171 }
tswapid(int id)7172 static inline int tswapid(int id)
7173 {
7174     return tswap16(id);
7175 }
7176 
7177 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7178 
7179 #else /* !USE_UID16 */
high2lowuid(int uid)7180 static inline int high2lowuid(int uid)
7181 {
7182     return uid;
7183 }
high2lowgid(int gid)7184 static inline int high2lowgid(int gid)
7185 {
7186     return gid;
7187 }
low2highuid(int uid)7188 static inline int low2highuid(int uid)
7189 {
7190     return uid;
7191 }
low2highgid(int gid)7192 static inline int low2highgid(int gid)
7193 {
7194     return gid;
7195 }
tswapid(int id)7196 static inline int tswapid(int id)
7197 {
7198     return tswap32(id);
7199 }
7200 
7201 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7202 
7203 #endif /* USE_UID16 */
7204 
7205 /* We must do direct syscalls for setting UID/GID, because we want to
7206  * implement the Linux system call semantics of "change only for this thread",
7207  * not the libc/POSIX semantics of "change for all threads in process".
7208  * (See http://ewontfix.com/17/ for more details.)
7209  * We use the 32-bit version of the syscalls if present; if it is not
7210  * then either the host architecture supports 32-bit UIDs natively with
7211  * the standard syscall, or the 16-bit UID is the best we can do.
7212  */
7213 #ifdef __NR_setuid32
7214 #define __NR_sys_setuid __NR_setuid32
7215 #else
7216 #define __NR_sys_setuid __NR_setuid
7217 #endif
7218 #ifdef __NR_setgid32
7219 #define __NR_sys_setgid __NR_setgid32
7220 #else
7221 #define __NR_sys_setgid __NR_setgid
7222 #endif
7223 #ifdef __NR_setresuid32
7224 #define __NR_sys_setresuid __NR_setresuid32
7225 #else
7226 #define __NR_sys_setresuid __NR_setresuid
7227 #endif
7228 #ifdef __NR_setresgid32
7229 #define __NR_sys_setresgid __NR_setresgid32
7230 #else
7231 #define __NR_sys_setresgid __NR_setresgid
7232 #endif
7233 #ifdef __NR_setgroups32
7234 #define __NR_sys_setgroups __NR_setgroups32
7235 #else
7236 #define __NR_sys_setgroups __NR_setgroups
7237 #endif
7238 #ifdef __NR_sys_setreuid32
7239 #define __NR_sys_setreuid __NR_setreuid32
7240 #else
7241 #define __NR_sys_setreuid __NR_setreuid
7242 #endif
7243 #ifdef __NR_sys_setregid32
7244 #define __NR_sys_setregid __NR_setregid32
7245 #else
7246 #define __NR_sys_setregid __NR_setregid
7247 #endif
7248 
7249 _syscall1(int, sys_setuid, uid_t, uid)
7250 _syscall1(int, sys_setgid, gid_t, gid)
7251 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7252 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7253 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7254 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7255 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7256 
syscall_init(void)7257 void syscall_init(void)
7258 {
7259     IOCTLEntry *ie;
7260     const argtype *arg_type;
7261     int size;
7262 
7263     thunk_init(STRUCT_MAX);
7264 
7265 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7266 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7267 #include "syscall_types.h"
7268 #undef STRUCT
7269 #undef STRUCT_SPECIAL
7270 
7271     /* we patch the ioctl size if necessary. We rely on the fact that
7272        no ioctl has all the bits at '1' in the size field */
7273     ie = ioctl_entries;
7274     while (ie->target_cmd != 0) {
7275         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7276             TARGET_IOC_SIZEMASK) {
7277             arg_type = ie->arg_type;
7278             if (arg_type[0] != TYPE_PTR) {
7279                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7280                         ie->target_cmd);
7281                 exit(1);
7282             }
7283             arg_type++;
7284             size = thunk_type_size(arg_type, 0);
7285             ie->target_cmd = (ie->target_cmd &
7286                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7287                 (size << TARGET_IOC_SIZESHIFT);
7288         }
7289 
7290         /* automatic consistency check if same arch */
7291 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7292     (defined(__x86_64__) && defined(TARGET_X86_64))
7293         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7294             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7295                     ie->name, ie->target_cmd, ie->host_cmd);
7296         }
7297 #endif
7298         ie++;
7299     }
7300 }
7301 
7302 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7303 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7304                                          abi_long arg2,
7305                                          abi_long arg3,
7306                                          abi_long arg4)
7307 {
7308     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7309         arg2 = arg3;
7310         arg3 = arg4;
7311     }
7312     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7313 }
7314 #endif
7315 
7316 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7317 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7318                                           abi_long arg2,
7319                                           abi_long arg3,
7320                                           abi_long arg4)
7321 {
7322     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7323         arg2 = arg3;
7324         arg3 = arg4;
7325     }
7326     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7327 }
7328 #endif
7329 
7330 #if defined(TARGET_NR_timer_settime) || \
7331     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7332 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7333                                                  abi_ulong target_addr)
7334 {
7335     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7336                                 offsetof(struct target_itimerspec,
7337                                          it_interval)) ||
7338         target_to_host_timespec(&host_its->it_value, target_addr +
7339                                 offsetof(struct target_itimerspec,
7340                                          it_value))) {
7341         return -TARGET_EFAULT;
7342     }
7343 
7344     return 0;
7345 }
7346 #endif
7347 
7348 #if defined(TARGET_NR_timer_settime64) || \
7349     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7350 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7351                                                    abi_ulong target_addr)
7352 {
7353     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7354                                   offsetof(struct target__kernel_itimerspec,
7355                                            it_interval)) ||
7356         target_to_host_timespec64(&host_its->it_value, target_addr +
7357                                   offsetof(struct target__kernel_itimerspec,
7358                                            it_value))) {
7359         return -TARGET_EFAULT;
7360     }
7361 
7362     return 0;
7363 }
7364 #endif
7365 
7366 #if ((defined(TARGET_NR_timerfd_gettime) || \
7367       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7368       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7369 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7370                                                  struct itimerspec *host_its)
7371 {
7372     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7373                                                        it_interval),
7374                                 &host_its->it_interval) ||
7375         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7376                                                        it_value),
7377                                 &host_its->it_value)) {
7378         return -TARGET_EFAULT;
7379     }
7380     return 0;
7381 }
7382 #endif
7383 
7384 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7385       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7386       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7387 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7388                                                    struct itimerspec *host_its)
7389 {
7390     if (host_to_target_timespec64(target_addr +
7391                                   offsetof(struct target__kernel_itimerspec,
7392                                            it_interval),
7393                                   &host_its->it_interval) ||
7394         host_to_target_timespec64(target_addr +
7395                                   offsetof(struct target__kernel_itimerspec,
7396                                            it_value),
7397                                   &host_its->it_value)) {
7398         return -TARGET_EFAULT;
7399     }
7400     return 0;
7401 }
7402 #endif
7403 
7404 #if defined(TARGET_NR_adjtimex) || \
7405     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7406 static inline abi_long target_to_host_timex(struct timex *host_tx,
7407                                             abi_long target_addr)
7408 {
7409     struct target_timex *target_tx;
7410 
7411     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7412         return -TARGET_EFAULT;
7413     }
7414 
7415     __get_user(host_tx->modes, &target_tx->modes);
7416     __get_user(host_tx->offset, &target_tx->offset);
7417     __get_user(host_tx->freq, &target_tx->freq);
7418     __get_user(host_tx->maxerror, &target_tx->maxerror);
7419     __get_user(host_tx->esterror, &target_tx->esterror);
7420     __get_user(host_tx->status, &target_tx->status);
7421     __get_user(host_tx->constant, &target_tx->constant);
7422     __get_user(host_tx->precision, &target_tx->precision);
7423     __get_user(host_tx->tolerance, &target_tx->tolerance);
7424     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7425     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7426     __get_user(host_tx->tick, &target_tx->tick);
7427     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7428     __get_user(host_tx->jitter, &target_tx->jitter);
7429     __get_user(host_tx->shift, &target_tx->shift);
7430     __get_user(host_tx->stabil, &target_tx->stabil);
7431     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7432     __get_user(host_tx->calcnt, &target_tx->calcnt);
7433     __get_user(host_tx->errcnt, &target_tx->errcnt);
7434     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7435     __get_user(host_tx->tai, &target_tx->tai);
7436 
7437     unlock_user_struct(target_tx, target_addr, 0);
7438     return 0;
7439 }
7440 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7441 static inline abi_long host_to_target_timex(abi_long target_addr,
7442                                             struct timex *host_tx)
7443 {
7444     struct target_timex *target_tx;
7445 
7446     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7447         return -TARGET_EFAULT;
7448     }
7449 
7450     __put_user(host_tx->modes, &target_tx->modes);
7451     __put_user(host_tx->offset, &target_tx->offset);
7452     __put_user(host_tx->freq, &target_tx->freq);
7453     __put_user(host_tx->maxerror, &target_tx->maxerror);
7454     __put_user(host_tx->esterror, &target_tx->esterror);
7455     __put_user(host_tx->status, &target_tx->status);
7456     __put_user(host_tx->constant, &target_tx->constant);
7457     __put_user(host_tx->precision, &target_tx->precision);
7458     __put_user(host_tx->tolerance, &target_tx->tolerance);
7459     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7460     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7461     __put_user(host_tx->tick, &target_tx->tick);
7462     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7463     __put_user(host_tx->jitter, &target_tx->jitter);
7464     __put_user(host_tx->shift, &target_tx->shift);
7465     __put_user(host_tx->stabil, &target_tx->stabil);
7466     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7467     __put_user(host_tx->calcnt, &target_tx->calcnt);
7468     __put_user(host_tx->errcnt, &target_tx->errcnt);
7469     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7470     __put_user(host_tx->tai, &target_tx->tai);
7471 
7472     unlock_user_struct(target_tx, target_addr, 1);
7473     return 0;
7474 }
7475 #endif
7476 
7477 
7478 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7479 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7480                                               abi_long target_addr)
7481 {
7482     struct target__kernel_timex *target_tx;
7483 
7484     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7485                                  offsetof(struct target__kernel_timex,
7486                                           time))) {
7487         return -TARGET_EFAULT;
7488     }
7489 
7490     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7491         return -TARGET_EFAULT;
7492     }
7493 
7494     __get_user(host_tx->modes, &target_tx->modes);
7495     __get_user(host_tx->offset, &target_tx->offset);
7496     __get_user(host_tx->freq, &target_tx->freq);
7497     __get_user(host_tx->maxerror, &target_tx->maxerror);
7498     __get_user(host_tx->esterror, &target_tx->esterror);
7499     __get_user(host_tx->status, &target_tx->status);
7500     __get_user(host_tx->constant, &target_tx->constant);
7501     __get_user(host_tx->precision, &target_tx->precision);
7502     __get_user(host_tx->tolerance, &target_tx->tolerance);
7503     __get_user(host_tx->tick, &target_tx->tick);
7504     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7505     __get_user(host_tx->jitter, &target_tx->jitter);
7506     __get_user(host_tx->shift, &target_tx->shift);
7507     __get_user(host_tx->stabil, &target_tx->stabil);
7508     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7509     __get_user(host_tx->calcnt, &target_tx->calcnt);
7510     __get_user(host_tx->errcnt, &target_tx->errcnt);
7511     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7512     __get_user(host_tx->tai, &target_tx->tai);
7513 
7514     unlock_user_struct(target_tx, target_addr, 0);
7515     return 0;
7516 }
7517 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7518 static inline abi_long host_to_target_timex64(abi_long target_addr,
7519                                               struct timex *host_tx)
7520 {
7521     struct target__kernel_timex *target_tx;
7522 
7523    if (copy_to_user_timeval64(target_addr +
7524                               offsetof(struct target__kernel_timex, time),
7525                               &host_tx->time)) {
7526         return -TARGET_EFAULT;
7527     }
7528 
7529     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7530         return -TARGET_EFAULT;
7531     }
7532 
7533     __put_user(host_tx->modes, &target_tx->modes);
7534     __put_user(host_tx->offset, &target_tx->offset);
7535     __put_user(host_tx->freq, &target_tx->freq);
7536     __put_user(host_tx->maxerror, &target_tx->maxerror);
7537     __put_user(host_tx->esterror, &target_tx->esterror);
7538     __put_user(host_tx->status, &target_tx->status);
7539     __put_user(host_tx->constant, &target_tx->constant);
7540     __put_user(host_tx->precision, &target_tx->precision);
7541     __put_user(host_tx->tolerance, &target_tx->tolerance);
7542     __put_user(host_tx->tick, &target_tx->tick);
7543     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7544     __put_user(host_tx->jitter, &target_tx->jitter);
7545     __put_user(host_tx->shift, &target_tx->shift);
7546     __put_user(host_tx->stabil, &target_tx->stabil);
7547     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7548     __put_user(host_tx->calcnt, &target_tx->calcnt);
7549     __put_user(host_tx->errcnt, &target_tx->errcnt);
7550     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7551     __put_user(host_tx->tai, &target_tx->tai);
7552 
7553     unlock_user_struct(target_tx, target_addr, 1);
7554     return 0;
7555 }
7556 #endif
7557 
7558 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7559 #define sigev_notify_thread_id _sigev_un._tid
7560 #endif
7561 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7562 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7563                                                abi_ulong target_addr)
7564 {
7565     struct target_sigevent *target_sevp;
7566 
7567     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7568         return -TARGET_EFAULT;
7569     }
7570 
7571     /* This union is awkward on 64 bit systems because it has a 32 bit
7572      * integer and a pointer in it; we follow the conversion approach
7573      * used for handling sigval types in signal.c so the guest should get
7574      * the correct value back even if we did a 64 bit byteswap and it's
7575      * using the 32 bit integer.
7576      */
7577     host_sevp->sigev_value.sival_ptr =
7578         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7579     host_sevp->sigev_signo =
7580         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7581     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7582     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7583 
7584     unlock_user_struct(target_sevp, target_addr, 1);
7585     return 0;
7586 }
7587 
7588 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7589 static inline int target_to_host_mlockall_arg(int arg)
7590 {
7591     int result = 0;
7592 
7593     if (arg & TARGET_MCL_CURRENT) {
7594         result |= MCL_CURRENT;
7595     }
7596     if (arg & TARGET_MCL_FUTURE) {
7597         result |= MCL_FUTURE;
7598     }
7599 #ifdef MCL_ONFAULT
7600     if (arg & TARGET_MCL_ONFAULT) {
7601         result |= MCL_ONFAULT;
7602     }
7603 #endif
7604 
7605     return result;
7606 }
7607 #endif
7608 
target_to_host_msync_arg(abi_long arg)7609 static inline int target_to_host_msync_arg(abi_long arg)
7610 {
7611     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7612            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7613            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7614            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7615 }
7616 
7617 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7618      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7619      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7620 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7621                                              abi_ulong target_addr,
7622                                              struct stat *host_st)
7623 {
7624 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7625     if (cpu_env->eabi) {
7626         struct target_eabi_stat64 *target_st;
7627 
7628         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7629             return -TARGET_EFAULT;
7630         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7631         __put_user(host_st->st_dev, &target_st->st_dev);
7632         __put_user(host_st->st_ino, &target_st->st_ino);
7633 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7634         __put_user(host_st->st_ino, &target_st->__st_ino);
7635 #endif
7636         __put_user(host_st->st_mode, &target_st->st_mode);
7637         __put_user(host_st->st_nlink, &target_st->st_nlink);
7638         __put_user(host_st->st_uid, &target_st->st_uid);
7639         __put_user(host_st->st_gid, &target_st->st_gid);
7640         __put_user(host_st->st_rdev, &target_st->st_rdev);
7641         __put_user(host_st->st_size, &target_st->st_size);
7642         __put_user(host_st->st_blksize, &target_st->st_blksize);
7643         __put_user(host_st->st_blocks, &target_st->st_blocks);
7644         __put_user(host_st->st_atime, &target_st->target_st_atime);
7645         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7646         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7647 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7648         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7649         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7650         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7651 #endif
7652         unlock_user_struct(target_st, target_addr, 1);
7653     } else
7654 #endif
7655     {
7656 #if defined(TARGET_HAS_STRUCT_STAT64)
7657         struct target_stat64 *target_st;
7658 #else
7659         struct target_stat *target_st;
7660 #endif
7661 
7662         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7663             return -TARGET_EFAULT;
7664         memset(target_st, 0, sizeof(*target_st));
7665         __put_user(host_st->st_dev, &target_st->st_dev);
7666         __put_user(host_st->st_ino, &target_st->st_ino);
7667 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7668         __put_user(host_st->st_ino, &target_st->__st_ino);
7669 #endif
7670         __put_user(host_st->st_mode, &target_st->st_mode);
7671         __put_user(host_st->st_nlink, &target_st->st_nlink);
7672         __put_user(host_st->st_uid, &target_st->st_uid);
7673         __put_user(host_st->st_gid, &target_st->st_gid);
7674         __put_user(host_st->st_rdev, &target_st->st_rdev);
7675         /* XXX: better use of kernel struct */
7676         __put_user(host_st->st_size, &target_st->st_size);
7677         __put_user(host_st->st_blksize, &target_st->st_blksize);
7678         __put_user(host_st->st_blocks, &target_st->st_blocks);
7679         __put_user(host_st->st_atime, &target_st->target_st_atime);
7680         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7681         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7682 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7683         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7684         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7685         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7686 #endif
7687         unlock_user_struct(target_st, target_addr, 1);
7688     }
7689 
7690     return 0;
7691 }
7692 #endif
7693 
7694 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7695 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7696                                             abi_ulong target_addr)
7697 {
7698     struct target_statx *target_stx;
7699 
7700     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7701         return -TARGET_EFAULT;
7702     }
7703     memset(target_stx, 0, sizeof(*target_stx));
7704 
7705     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7706     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7707     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7708     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7709     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7710     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7711     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7712     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7713     __put_user(host_stx->stx_size, &target_stx->stx_size);
7714     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7715     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7716     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7717     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7718     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7719     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7720     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7721     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7722     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7723     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7724     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7725     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7726     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7727     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7728 
7729     unlock_user_struct(target_stx, target_addr, 1);
7730 
7731     return 0;
7732 }
7733 #endif
7734 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7735 static int do_sys_futex(int *uaddr, int op, int val,
7736                          const struct timespec *timeout, int *uaddr2,
7737                          int val3)
7738 {
7739 #if HOST_LONG_BITS == 64
7740 #if defined(__NR_futex)
7741     /* always a 64-bit time_t, it doesn't define _time64 version  */
7742     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7743 
7744 #endif
7745 #else /* HOST_LONG_BITS == 64 */
7746 #if defined(__NR_futex_time64)
7747     if (sizeof(timeout->tv_sec) == 8) {
7748         /* _time64 function on 32bit arch */
7749         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7750     }
7751 #endif
7752 #if defined(__NR_futex)
7753     /* old function on 32bit arch */
7754     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7755 #endif
7756 #endif /* HOST_LONG_BITS == 64 */
7757     g_assert_not_reached();
7758 }
7759 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7760 static int do_safe_futex(int *uaddr, int op, int val,
7761                          const struct timespec *timeout, int *uaddr2,
7762                          int val3)
7763 {
7764 #if HOST_LONG_BITS == 64
7765 #if defined(__NR_futex)
7766     /* always a 64-bit time_t, it doesn't define _time64 version  */
7767     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7768 #endif
7769 #else /* HOST_LONG_BITS == 64 */
7770 #if defined(__NR_futex_time64)
7771     if (sizeof(timeout->tv_sec) == 8) {
7772         /* _time64 function on 32bit arch */
7773         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7774                                            val3));
7775     }
7776 #endif
7777 #if defined(__NR_futex)
7778     /* old function on 32bit arch */
7779     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7780 #endif
7781 #endif /* HOST_LONG_BITS == 64 */
7782     return -TARGET_ENOSYS;
7783 }
7784 
7785 /* ??? Using host futex calls even when target atomic operations
7786    are not really atomic probably breaks things.  However implementing
7787    futexes locally would make futexes shared between multiple processes
7788    tricky.  However they're probably useless because guest atomic
7789    operations won't work either.  */
7790 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7791 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7792                     int op, int val, target_ulong timeout,
7793                     target_ulong uaddr2, int val3)
7794 {
7795     struct timespec ts, *pts = NULL;
7796     void *haddr2 = NULL;
7797     int base_op;
7798 
7799     /* We assume FUTEX_* constants are the same on both host and target. */
7800 #ifdef FUTEX_CMD_MASK
7801     base_op = op & FUTEX_CMD_MASK;
7802 #else
7803     base_op = op;
7804 #endif
7805     switch (base_op) {
7806     case FUTEX_WAIT:
7807     case FUTEX_WAIT_BITSET:
7808         val = tswap32(val);
7809         break;
7810     case FUTEX_WAIT_REQUEUE_PI:
7811         val = tswap32(val);
7812         haddr2 = g2h(cpu, uaddr2);
7813         break;
7814     case FUTEX_LOCK_PI:
7815     case FUTEX_LOCK_PI2:
7816         break;
7817     case FUTEX_WAKE:
7818     case FUTEX_WAKE_BITSET:
7819     case FUTEX_TRYLOCK_PI:
7820     case FUTEX_UNLOCK_PI:
7821         timeout = 0;
7822         break;
7823     case FUTEX_FD:
7824         val = target_to_host_signal(val);
7825         timeout = 0;
7826         break;
7827     case FUTEX_CMP_REQUEUE:
7828     case FUTEX_CMP_REQUEUE_PI:
7829         val3 = tswap32(val3);
7830         /* fall through */
7831     case FUTEX_REQUEUE:
7832     case FUTEX_WAKE_OP:
7833         /*
7834          * For these, the 4th argument is not TIMEOUT, but VAL2.
7835          * But the prototype of do_safe_futex takes a pointer, so
7836          * insert casts to satisfy the compiler.  We do not need
7837          * to tswap VAL2 since it's not compared to guest memory.
7838           */
7839         pts = (struct timespec *)(uintptr_t)timeout;
7840         timeout = 0;
7841         haddr2 = g2h(cpu, uaddr2);
7842         break;
7843     default:
7844         return -TARGET_ENOSYS;
7845     }
7846     if (timeout) {
7847         pts = &ts;
7848         if (time64
7849             ? target_to_host_timespec64(pts, timeout)
7850             : target_to_host_timespec(pts, timeout)) {
7851             return -TARGET_EFAULT;
7852         }
7853     }
7854     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7855 }
7856 #endif
7857 
7858 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7859 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7860                                      abi_long handle, abi_long mount_id,
7861                                      abi_long flags)
7862 {
7863     struct file_handle *target_fh;
7864     struct file_handle *fh;
7865     int mid = 0;
7866     abi_long ret;
7867     char *name;
7868     unsigned int size, total_size;
7869 
7870     if (get_user_s32(size, handle)) {
7871         return -TARGET_EFAULT;
7872     }
7873 
7874     name = lock_user_string(pathname);
7875     if (!name) {
7876         return -TARGET_EFAULT;
7877     }
7878 
7879     total_size = sizeof(struct file_handle) + size;
7880     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7881     if (!target_fh) {
7882         unlock_user(name, pathname, 0);
7883         return -TARGET_EFAULT;
7884     }
7885 
7886     fh = g_malloc0(total_size);
7887     fh->handle_bytes = size;
7888 
7889     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7890     unlock_user(name, pathname, 0);
7891 
7892     /* man name_to_handle_at(2):
7893      * Other than the use of the handle_bytes field, the caller should treat
7894      * the file_handle structure as an opaque data type
7895      */
7896 
7897     memcpy(target_fh, fh, total_size);
7898     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7899     target_fh->handle_type = tswap32(fh->handle_type);
7900     g_free(fh);
7901     unlock_user(target_fh, handle, total_size);
7902 
7903     if (put_user_s32(mid, mount_id)) {
7904         return -TARGET_EFAULT;
7905     }
7906 
7907     return ret;
7908 
7909 }
7910 #endif
7911 
7912 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7913 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7914                                      abi_long flags)
7915 {
7916     struct file_handle *target_fh;
7917     struct file_handle *fh;
7918     unsigned int size, total_size;
7919     abi_long ret;
7920 
7921     if (get_user_s32(size, handle)) {
7922         return -TARGET_EFAULT;
7923     }
7924 
7925     total_size = sizeof(struct file_handle) + size;
7926     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7927     if (!target_fh) {
7928         return -TARGET_EFAULT;
7929     }
7930 
7931     fh = g_memdup(target_fh, total_size);
7932     fh->handle_bytes = size;
7933     fh->handle_type = tswap32(target_fh->handle_type);
7934 
7935     ret = get_errno(open_by_handle_at(mount_fd, fh,
7936                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7937 
7938     g_free(fh);
7939 
7940     unlock_user(target_fh, handle, total_size);
7941 
7942     return ret;
7943 }
7944 #endif
7945 
7946 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7947 
do_signalfd4(int fd,abi_long mask,int flags)7948 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7949 {
7950     int host_flags;
7951     target_sigset_t *target_mask;
7952     sigset_t host_mask;
7953     abi_long ret;
7954 
7955     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7956         return -TARGET_EINVAL;
7957     }
7958     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7959         return -TARGET_EFAULT;
7960     }
7961 
7962     target_to_host_sigset(&host_mask, target_mask);
7963 
7964     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7965 
7966     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7967     if (ret >= 0) {
7968         fd_trans_register(ret, &target_signalfd_trans);
7969     }
7970 
7971     unlock_user_struct(target_mask, mask, 0);
7972 
7973     return ret;
7974 }
7975 #endif
7976 
7977 /* Map host to target signal numbers for the wait family of syscalls.
7978    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)7979 int host_to_target_waitstatus(int status)
7980 {
7981     if (WIFSIGNALED(status)) {
7982         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7983     }
7984     if (WIFSTOPPED(status)) {
7985         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7986                | (status & 0xff);
7987     }
7988     return status;
7989 }
7990 
open_self_cmdline(CPUArchState * cpu_env,int fd)7991 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7992 {
7993     CPUState *cpu = env_cpu(cpu_env);
7994     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7995     int i;
7996 
7997     for (i = 0; i < bprm->argc; i++) {
7998         size_t len = strlen(bprm->argv[i]) + 1;
7999 
8000         if (write(fd, bprm->argv[i], len) != len) {
8001             return -1;
8002         }
8003     }
8004 
8005     return 0;
8006 }
8007 
8008 struct open_self_maps_data {
8009     TaskState *ts;
8010     IntervalTreeRoot *host_maps;
8011     int fd;
8012     bool smaps;
8013 };
8014 
8015 /*
8016  * Subroutine to output one line of /proc/self/maps,
8017  * or one region of /proc/self/smaps.
8018  */
8019 
8020 #ifdef TARGET_HPPA
8021 # define test_stack(S, E, L)  (E == L)
8022 #else
8023 # define test_stack(S, E, L)  (S == L)
8024 #endif
8025 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8026 static void open_self_maps_4(const struct open_self_maps_data *d,
8027                              const MapInfo *mi, abi_ptr start,
8028                              abi_ptr end, unsigned flags)
8029 {
8030     const struct image_info *info = d->ts->info;
8031     const char *path = mi->path;
8032     uint64_t offset;
8033     int fd = d->fd;
8034     int count;
8035 
8036     if (test_stack(start, end, info->stack_limit)) {
8037         path = "[stack]";
8038     } else if (start == info->brk) {
8039         path = "[heap]";
8040     } else if (start == info->vdso) {
8041         path = "[vdso]";
8042 #ifdef TARGET_X86_64
8043     } else if (start == TARGET_VSYSCALL_PAGE) {
8044         path = "[vsyscall]";
8045 #endif
8046     }
8047 
8048     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8049     offset = mi->offset;
8050     if (mi->dev) {
8051         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8052         offset += hstart - mi->itree.start;
8053     }
8054 
8055     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8056                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8057                     start, end,
8058                     (flags & PAGE_READ) ? 'r' : '-',
8059                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8060                     (flags & PAGE_EXEC) ? 'x' : '-',
8061                     mi->is_priv ? 'p' : 's',
8062                     offset, major(mi->dev), minor(mi->dev),
8063                     (uint64_t)mi->inode);
8064     if (path) {
8065         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8066     } else {
8067         dprintf(fd, "\n");
8068     }
8069 
8070     if (d->smaps) {
8071         unsigned long size = end - start;
8072         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8073         unsigned long size_kb = size >> 10;
8074 
8075         dprintf(fd, "Size:                  %lu kB\n"
8076                 "KernelPageSize:        %lu kB\n"
8077                 "MMUPageSize:           %lu kB\n"
8078                 "Rss:                   0 kB\n"
8079                 "Pss:                   0 kB\n"
8080                 "Pss_Dirty:             0 kB\n"
8081                 "Shared_Clean:          0 kB\n"
8082                 "Shared_Dirty:          0 kB\n"
8083                 "Private_Clean:         0 kB\n"
8084                 "Private_Dirty:         0 kB\n"
8085                 "Referenced:            0 kB\n"
8086                 "Anonymous:             %lu kB\n"
8087                 "LazyFree:              0 kB\n"
8088                 "AnonHugePages:         0 kB\n"
8089                 "ShmemPmdMapped:        0 kB\n"
8090                 "FilePmdMapped:         0 kB\n"
8091                 "Shared_Hugetlb:        0 kB\n"
8092                 "Private_Hugetlb:       0 kB\n"
8093                 "Swap:                  0 kB\n"
8094                 "SwapPss:               0 kB\n"
8095                 "Locked:                0 kB\n"
8096                 "THPeligible:    0\n"
8097                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8098                 size_kb, page_size_kb, page_size_kb,
8099                 (flags & PAGE_ANON ? size_kb : 0),
8100                 (flags & PAGE_READ) ? " rd" : "",
8101                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8102                 (flags & PAGE_EXEC) ? " ex" : "",
8103                 mi->is_priv ? "" : " sh",
8104                 (flags & PAGE_READ) ? " mr" : "",
8105                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8106                 (flags & PAGE_EXEC) ? " me" : "",
8107                 mi->is_priv ? "" : " ms");
8108     }
8109 }
8110 
8111 /*
8112  * Callback for walk_memory_regions, when read_self_maps() fails.
8113  * Proceed without the benefit of host /proc/self/maps cross-check.
8114  */
open_self_maps_3(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8115 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8116                             target_ulong guest_end, unsigned long flags)
8117 {
8118     static const MapInfo mi = { .is_priv = true };
8119 
8120     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8121     return 0;
8122 }
8123 
8124 /*
8125  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8126  */
open_self_maps_2(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8127 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8128                             target_ulong guest_end, unsigned long flags)
8129 {
8130     const struct open_self_maps_data *d = opaque;
8131     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8132     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8133 
8134 #ifdef TARGET_X86_64
8135     /*
8136      * Because of the extremely high position of the page within the guest
8137      * virtual address space, this is not backed by host memory at all.
8138      * Therefore the loop below would fail.  This is the only instance
8139      * of not having host backing memory.
8140      */
8141     if (guest_start == TARGET_VSYSCALL_PAGE) {
8142         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8143     }
8144 #endif
8145 
8146     while (1) {
8147         IntervalTreeNode *n =
8148             interval_tree_iter_first(d->host_maps, host_start, host_start);
8149         MapInfo *mi = container_of(n, MapInfo, itree);
8150         uintptr_t this_hlast = MIN(host_last, n->last);
8151         target_ulong this_gend = h2g(this_hlast) + 1;
8152 
8153         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8154 
8155         if (this_hlast == host_last) {
8156             return 0;
8157         }
8158         host_start = this_hlast + 1;
8159         guest_start = h2g(host_start);
8160     }
8161 }
8162 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8163 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8164 {
8165     struct open_self_maps_data d = {
8166         .ts = get_task_state(env_cpu(env)),
8167         .fd = fd,
8168         .smaps = smaps
8169     };
8170 
8171     mmap_lock();
8172     d.host_maps = read_self_maps();
8173     if (d.host_maps) {
8174         walk_memory_regions(&d, open_self_maps_2);
8175         free_self_maps(d.host_maps);
8176     } else {
8177         walk_memory_regions(&d, open_self_maps_3);
8178     }
8179     mmap_unlock();
8180     return 0;
8181 }
8182 
open_self_maps(CPUArchState * cpu_env,int fd)8183 static int open_self_maps(CPUArchState *cpu_env, int fd)
8184 {
8185     return open_self_maps_1(cpu_env, fd, false);
8186 }
8187 
open_self_smaps(CPUArchState * cpu_env,int fd)8188 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8189 {
8190     return open_self_maps_1(cpu_env, fd, true);
8191 }
8192 
open_self_stat(CPUArchState * cpu_env,int fd)8193 static int open_self_stat(CPUArchState *cpu_env, int fd)
8194 {
8195     CPUState *cpu = env_cpu(cpu_env);
8196     TaskState *ts = get_task_state(cpu);
8197     g_autoptr(GString) buf = g_string_new(NULL);
8198     int i;
8199 
8200     for (i = 0; i < 44; i++) {
8201         if (i == 0) {
8202             /* pid */
8203             g_string_printf(buf, FMT_pid " ", getpid());
8204         } else if (i == 1) {
8205             /* app name */
8206             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8207             bin = bin ? bin + 1 : ts->bprm->argv[0];
8208             g_string_printf(buf, "(%.15s) ", bin);
8209         } else if (i == 2) {
8210             /* task state */
8211             g_string_assign(buf, "R "); /* we are running right now */
8212         } else if (i == 3) {
8213             /* ppid */
8214             g_string_printf(buf, FMT_pid " ", getppid());
8215         } else if (i == 19) {
8216             /* num_threads */
8217             int cpus = 0;
8218             WITH_RCU_READ_LOCK_GUARD() {
8219                 CPUState *cpu_iter;
8220                 CPU_FOREACH(cpu_iter) {
8221                     cpus++;
8222                 }
8223             }
8224             g_string_printf(buf, "%d ", cpus);
8225         } else if (i == 21) {
8226             /* starttime */
8227             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8228         } else if (i == 27) {
8229             /* stack bottom */
8230             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8231         } else {
8232             /* for the rest, there is MasterCard */
8233             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8234         }
8235 
8236         if (write(fd, buf->str, buf->len) != buf->len) {
8237             return -1;
8238         }
8239     }
8240 
8241     return 0;
8242 }
8243 
open_self_auxv(CPUArchState * cpu_env,int fd)8244 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8245 {
8246     CPUState *cpu = env_cpu(cpu_env);
8247     TaskState *ts = get_task_state(cpu);
8248     abi_ulong auxv = ts->info->saved_auxv;
8249     abi_ulong len = ts->info->auxv_len;
8250     char *ptr;
8251 
8252     /*
8253      * Auxiliary vector is stored in target process stack.
8254      * read in whole auxv vector and copy it to file
8255      */
8256     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8257     if (ptr != NULL) {
8258         while (len > 0) {
8259             ssize_t r;
8260             r = write(fd, ptr, len);
8261             if (r <= 0) {
8262                 break;
8263             }
8264             len -= r;
8265             ptr += r;
8266         }
8267         lseek(fd, 0, SEEK_SET);
8268         unlock_user(ptr, auxv, len);
8269     }
8270 
8271     return 0;
8272 }
8273 
is_proc_myself(const char * filename,const char * entry)8274 static int is_proc_myself(const char *filename, const char *entry)
8275 {
8276     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8277         filename += strlen("/proc/");
8278         if (!strncmp(filename, "self/", strlen("self/"))) {
8279             filename += strlen("self/");
8280         } else if (*filename >= '1' && *filename <= '9') {
8281             char myself[80];
8282             snprintf(myself, sizeof(myself), "%d/", getpid());
8283             if (!strncmp(filename, myself, strlen(myself))) {
8284                 filename += strlen(myself);
8285             } else {
8286                 return 0;
8287             }
8288         } else {
8289             return 0;
8290         }
8291         if (!strcmp(filename, entry)) {
8292             return 1;
8293         }
8294     }
8295     return 0;
8296 }
8297 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8298 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8299                       const char *fmt, int code)
8300 {
8301     if (logfile) {
8302         CPUState *cs = env_cpu(env);
8303 
8304         fprintf(logfile, fmt, code);
8305         fprintf(logfile, "Failing executable: %s\n", exec_path);
8306         cpu_dump_state(cs, logfile, 0);
8307         open_self_maps(env, fileno(logfile));
8308     }
8309 }
8310 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8311 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8312 {
8313     /* dump to console */
8314     excp_dump_file(stderr, env, fmt, code);
8315 
8316     /* dump to log file */
8317     if (qemu_log_separate()) {
8318         FILE *logfile = qemu_log_trylock();
8319 
8320         excp_dump_file(logfile, env, fmt, code);
8321         qemu_log_unlock(logfile);
8322     }
8323 }
8324 
8325 #include "target_proc.h"
8326 
8327 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8328     defined(HAVE_ARCH_PROC_CPUINFO) || \
8329     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8330 static int is_proc(const char *filename, const char *entry)
8331 {
8332     return strcmp(filename, entry) == 0;
8333 }
8334 #endif
8335 
8336 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8337 static int open_net_route(CPUArchState *cpu_env, int fd)
8338 {
8339     FILE *fp;
8340     char *line = NULL;
8341     size_t len = 0;
8342     ssize_t read;
8343 
8344     fp = fopen("/proc/net/route", "r");
8345     if (fp == NULL) {
8346         return -1;
8347     }
8348 
8349     /* read header */
8350 
8351     read = getline(&line, &len, fp);
8352     dprintf(fd, "%s", line);
8353 
8354     /* read routes */
8355 
8356     while ((read = getline(&line, &len, fp)) != -1) {
8357         char iface[16];
8358         uint32_t dest, gw, mask;
8359         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8360         int fields;
8361 
8362         fields = sscanf(line,
8363                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8364                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8365                         &mask, &mtu, &window, &irtt);
8366         if (fields != 11) {
8367             continue;
8368         }
8369         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8370                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8371                 metric, tswap32(mask), mtu, window, irtt);
8372     }
8373 
8374     free(line);
8375     fclose(fp);
8376 
8377     return 0;
8378 }
8379 #endif
8380 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8381 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8382                               const char *fname, int flags, mode_t mode,
8383                               int openat2_resolve, bool safe)
8384 {
8385     g_autofree char *proc_name = NULL;
8386     const char *pathname;
8387     struct fake_open {
8388         const char *filename;
8389         int (*fill)(CPUArchState *cpu_env, int fd);
8390         int (*cmp)(const char *s1, const char *s2);
8391     };
8392     const struct fake_open *fake_open;
8393     static const struct fake_open fakes[] = {
8394         { "maps", open_self_maps, is_proc_myself },
8395         { "smaps", open_self_smaps, is_proc_myself },
8396         { "stat", open_self_stat, is_proc_myself },
8397         { "auxv", open_self_auxv, is_proc_myself },
8398         { "cmdline", open_self_cmdline, is_proc_myself },
8399 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8400         { "/proc/net/route", open_net_route, is_proc },
8401 #endif
8402 #if defined(HAVE_ARCH_PROC_CPUINFO)
8403         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8404 #endif
8405 #if defined(HAVE_ARCH_PROC_HARDWARE)
8406         { "/proc/hardware", open_hardware, is_proc },
8407 #endif
8408         { NULL, NULL, NULL }
8409     };
8410 
8411     /* if this is a file from /proc/ filesystem, expand full name */
8412     proc_name = realpath(fname, NULL);
8413     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8414         pathname = proc_name;
8415     } else {
8416         pathname = fname;
8417     }
8418 
8419     if (is_proc_myself(pathname, "exe")) {
8420         /* Honor openat2 resolve flags */
8421         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8422             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8423             errno = ELOOP;
8424             return -1;
8425         }
8426         if (safe) {
8427             return safe_openat(dirfd, exec_path, flags, mode);
8428         } else {
8429             return openat(dirfd, exec_path, flags, mode);
8430         }
8431     }
8432 
8433     for (fake_open = fakes; fake_open->filename; fake_open++) {
8434         if (fake_open->cmp(pathname, fake_open->filename)) {
8435             break;
8436         }
8437     }
8438 
8439     if (fake_open->filename) {
8440         const char *tmpdir;
8441         char filename[PATH_MAX];
8442         int fd, r;
8443 
8444         fd = memfd_create("qemu-open", 0);
8445         if (fd < 0) {
8446             if (errno != ENOSYS) {
8447                 return fd;
8448             }
8449             /* create temporary file to map stat to */
8450             tmpdir = getenv("TMPDIR");
8451             if (!tmpdir)
8452                 tmpdir = "/tmp";
8453             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8454             fd = mkstemp(filename);
8455             if (fd < 0) {
8456                 return fd;
8457             }
8458             unlink(filename);
8459         }
8460 
8461         if ((r = fake_open->fill(cpu_env, fd))) {
8462             int e = errno;
8463             close(fd);
8464             errno = e;
8465             return r;
8466         }
8467         lseek(fd, 0, SEEK_SET);
8468 
8469         return fd;
8470     }
8471 
8472     return -2;
8473 }
8474 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8475 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8476                     int flags, mode_t mode, bool safe)
8477 {
8478     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8479     if (fd > -2) {
8480         return fd;
8481     }
8482 
8483     if (safe) {
8484         return safe_openat(dirfd, path(pathname), flags, mode);
8485     } else {
8486         return openat(dirfd, path(pathname), flags, mode);
8487     }
8488 }
8489 
8490 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8491 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8492                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8493                       abi_ulong guest_size)
8494 {
8495     struct open_how_ver0 how = {0};
8496     char *pathname;
8497     int ret;
8498 
8499     if (guest_size < sizeof(struct target_open_how_ver0)) {
8500         return -TARGET_EINVAL;
8501     }
8502     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8503     if (ret) {
8504         if (ret == -TARGET_E2BIG) {
8505             qemu_log_mask(LOG_UNIMP,
8506                           "Unimplemented openat2 open_how size: "
8507                           TARGET_ABI_FMT_lu "\n", guest_size);
8508         }
8509         return ret;
8510     }
8511     pathname = lock_user_string(guest_pathname);
8512     if (!pathname) {
8513         return -TARGET_EFAULT;
8514     }
8515 
8516     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8517     how.mode = tswap64(how.mode);
8518     how.resolve = tswap64(how.resolve);
8519     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8520                                 how.resolve, true);
8521     if (fd > -2) {
8522         ret = get_errno(fd);
8523     } else {
8524         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8525                                      sizeof(struct open_how_ver0)));
8526     }
8527 
8528     fd_trans_unregister(ret);
8529     unlock_user(pathname, guest_pathname, 0);
8530     return ret;
8531 }
8532 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8533 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8534 {
8535     ssize_t ret;
8536 
8537     if (!pathname || !buf) {
8538         errno = EFAULT;
8539         return -1;
8540     }
8541 
8542     if (!bufsiz) {
8543         /* Short circuit this for the magic exe check. */
8544         errno = EINVAL;
8545         return -1;
8546     }
8547 
8548     if (is_proc_myself((const char *)pathname, "exe")) {
8549         /*
8550          * Don't worry about sign mismatch as earlier mapping
8551          * logic would have thrown a bad address error.
8552          */
8553         ret = MIN(strlen(exec_path), bufsiz);
8554         /* We cannot NUL terminate the string. */
8555         memcpy(buf, exec_path, ret);
8556     } else {
8557         ret = readlink(path(pathname), buf, bufsiz);
8558     }
8559 
8560     return ret;
8561 }
8562 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8563 static int do_execv(CPUArchState *cpu_env, int dirfd,
8564                     abi_long pathname, abi_long guest_argp,
8565                     abi_long guest_envp, int flags, bool is_execveat)
8566 {
8567     int ret;
8568     char **argp, **envp;
8569     int argc, envc;
8570     abi_ulong gp;
8571     abi_ulong addr;
8572     char **q;
8573     void *p;
8574 
8575     argc = 0;
8576 
8577     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8578         if (get_user_ual(addr, gp)) {
8579             return -TARGET_EFAULT;
8580         }
8581         if (!addr) {
8582             break;
8583         }
8584         argc++;
8585     }
8586     envc = 0;
8587     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8588         if (get_user_ual(addr, gp)) {
8589             return -TARGET_EFAULT;
8590         }
8591         if (!addr) {
8592             break;
8593         }
8594         envc++;
8595     }
8596 
8597     argp = g_new0(char *, argc + 1);
8598     envp = g_new0(char *, envc + 1);
8599 
8600     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8601         if (get_user_ual(addr, gp)) {
8602             goto execve_efault;
8603         }
8604         if (!addr) {
8605             break;
8606         }
8607         *q = lock_user_string(addr);
8608         if (!*q) {
8609             goto execve_efault;
8610         }
8611     }
8612     *q = NULL;
8613 
8614     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8615         if (get_user_ual(addr, gp)) {
8616             goto execve_efault;
8617         }
8618         if (!addr) {
8619             break;
8620         }
8621         *q = lock_user_string(addr);
8622         if (!*q) {
8623             goto execve_efault;
8624         }
8625     }
8626     *q = NULL;
8627 
8628     /*
8629      * Although execve() is not an interruptible syscall it is
8630      * a special case where we must use the safe_syscall wrapper:
8631      * if we allow a signal to happen before we make the host
8632      * syscall then we will 'lose' it, because at the point of
8633      * execve the process leaves QEMU's control. So we use the
8634      * safe syscall wrapper to ensure that we either take the
8635      * signal as a guest signal, or else it does not happen
8636      * before the execve completes and makes it the other
8637      * program's problem.
8638      */
8639     p = lock_user_string(pathname);
8640     if (!p) {
8641         goto execve_efault;
8642     }
8643 
8644     const char *exe = p;
8645     if (is_proc_myself(p, "exe")) {
8646         exe = exec_path;
8647     }
8648     ret = is_execveat
8649         ? safe_execveat(dirfd, exe, argp, envp, flags)
8650         : safe_execve(exe, argp, envp);
8651     ret = get_errno(ret);
8652 
8653     unlock_user(p, pathname, 0);
8654 
8655     goto execve_end;
8656 
8657 execve_efault:
8658     ret = -TARGET_EFAULT;
8659 
8660 execve_end:
8661     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8662         if (get_user_ual(addr, gp) || !addr) {
8663             break;
8664         }
8665         unlock_user(*q, addr, 0);
8666     }
8667     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8668         if (get_user_ual(addr, gp) || !addr) {
8669             break;
8670         }
8671         unlock_user(*q, addr, 0);
8672     }
8673 
8674     g_free(argp);
8675     g_free(envp);
8676     return ret;
8677 }
8678 
8679 #define TIMER_MAGIC 0x0caf0000
8680 #define TIMER_MAGIC_MASK 0xffff0000
8681 
8682 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8683 static target_timer_t get_timer_id(abi_long arg)
8684 {
8685     target_timer_t timerid = arg;
8686 
8687     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8688         return -TARGET_EINVAL;
8689     }
8690 
8691     timerid &= 0xffff;
8692 
8693     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8694         return -TARGET_EINVAL;
8695     }
8696 
8697     return timerid;
8698 }
8699 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8700 static int target_to_host_cpu_mask(unsigned long *host_mask,
8701                                    size_t host_size,
8702                                    abi_ulong target_addr,
8703                                    size_t target_size)
8704 {
8705     unsigned target_bits = sizeof(abi_ulong) * 8;
8706     unsigned host_bits = sizeof(*host_mask) * 8;
8707     abi_ulong *target_mask;
8708     unsigned i, j;
8709 
8710     assert(host_size >= target_size);
8711 
8712     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8713     if (!target_mask) {
8714         return -TARGET_EFAULT;
8715     }
8716     memset(host_mask, 0, host_size);
8717 
8718     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8719         unsigned bit = i * target_bits;
8720         abi_ulong val;
8721 
8722         __get_user(val, &target_mask[i]);
8723         for (j = 0; j < target_bits; j++, bit++) {
8724             if (val & (1UL << j)) {
8725                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8726             }
8727         }
8728     }
8729 
8730     unlock_user(target_mask, target_addr, 0);
8731     return 0;
8732 }
8733 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8734 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8735                                    size_t host_size,
8736                                    abi_ulong target_addr,
8737                                    size_t target_size)
8738 {
8739     unsigned target_bits = sizeof(abi_ulong) * 8;
8740     unsigned host_bits = sizeof(*host_mask) * 8;
8741     abi_ulong *target_mask;
8742     unsigned i, j;
8743 
8744     assert(host_size >= target_size);
8745 
8746     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8747     if (!target_mask) {
8748         return -TARGET_EFAULT;
8749     }
8750 
8751     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8752         unsigned bit = i * target_bits;
8753         abi_ulong val = 0;
8754 
8755         for (j = 0; j < target_bits; j++, bit++) {
8756             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8757                 val |= 1UL << j;
8758             }
8759         }
8760         __put_user(val, &target_mask[i]);
8761     }
8762 
8763     unlock_user(target_mask, target_addr, target_size);
8764     return 0;
8765 }
8766 
8767 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8768 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8769 {
8770     g_autofree void *hdirp = NULL;
8771     void *tdirp;
8772     int hlen, hoff, toff;
8773     int hreclen, treclen;
8774     off_t prev_diroff = 0;
8775 
8776     hdirp = g_try_malloc(count);
8777     if (!hdirp) {
8778         return -TARGET_ENOMEM;
8779     }
8780 
8781 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8782     hlen = sys_getdents(dirfd, hdirp, count);
8783 #else
8784     hlen = sys_getdents64(dirfd, hdirp, count);
8785 #endif
8786 
8787     hlen = get_errno(hlen);
8788     if (is_error(hlen)) {
8789         return hlen;
8790     }
8791 
8792     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8793     if (!tdirp) {
8794         return -TARGET_EFAULT;
8795     }
8796 
8797     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8798 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8799         struct linux_dirent *hde = hdirp + hoff;
8800 #else
8801         struct linux_dirent64 *hde = hdirp + hoff;
8802 #endif
8803         struct target_dirent *tde = tdirp + toff;
8804         int namelen;
8805         uint8_t type;
8806 
8807         namelen = strlen(hde->d_name);
8808         hreclen = hde->d_reclen;
8809         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8810         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8811 
8812         if (toff + treclen > count) {
8813             /*
8814              * If the host struct is smaller than the target struct, or
8815              * requires less alignment and thus packs into less space,
8816              * then the host can return more entries than we can pass
8817              * on to the guest.
8818              */
8819             if (toff == 0) {
8820                 toff = -TARGET_EINVAL; /* result buffer is too small */
8821                 break;
8822             }
8823             /*
8824              * Return what we have, resetting the file pointer to the
8825              * location of the first record not returned.
8826              */
8827             lseek(dirfd, prev_diroff, SEEK_SET);
8828             break;
8829         }
8830 
8831         prev_diroff = hde->d_off;
8832         tde->d_ino = tswapal(hde->d_ino);
8833         tde->d_off = tswapal(hde->d_off);
8834         tde->d_reclen = tswap16(treclen);
8835         memcpy(tde->d_name, hde->d_name, namelen + 1);
8836 
8837         /*
8838          * The getdents type is in what was formerly a padding byte at the
8839          * end of the structure.
8840          */
8841 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8842         type = *((uint8_t *)hde + hreclen - 1);
8843 #else
8844         type = hde->d_type;
8845 #endif
8846         *((uint8_t *)tde + treclen - 1) = type;
8847     }
8848 
8849     unlock_user(tdirp, arg2, toff);
8850     return toff;
8851 }
8852 #endif /* TARGET_NR_getdents */
8853 
8854 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8855 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8856 {
8857     g_autofree void *hdirp = NULL;
8858     void *tdirp;
8859     int hlen, hoff, toff;
8860     int hreclen, treclen;
8861     off_t prev_diroff = 0;
8862 
8863     hdirp = g_try_malloc(count);
8864     if (!hdirp) {
8865         return -TARGET_ENOMEM;
8866     }
8867 
8868     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8869     if (is_error(hlen)) {
8870         return hlen;
8871     }
8872 
8873     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8874     if (!tdirp) {
8875         return -TARGET_EFAULT;
8876     }
8877 
8878     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8879         struct linux_dirent64 *hde = hdirp + hoff;
8880         struct target_dirent64 *tde = tdirp + toff;
8881         int namelen;
8882 
8883         namelen = strlen(hde->d_name) + 1;
8884         hreclen = hde->d_reclen;
8885         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8886         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8887 
8888         if (toff + treclen > count) {
8889             /*
8890              * If the host struct is smaller than the target struct, or
8891              * requires less alignment and thus packs into less space,
8892              * then the host can return more entries than we can pass
8893              * on to the guest.
8894              */
8895             if (toff == 0) {
8896                 toff = -TARGET_EINVAL; /* result buffer is too small */
8897                 break;
8898             }
8899             /*
8900              * Return what we have, resetting the file pointer to the
8901              * location of the first record not returned.
8902              */
8903             lseek(dirfd, prev_diroff, SEEK_SET);
8904             break;
8905         }
8906 
8907         prev_diroff = hde->d_off;
8908         tde->d_ino = tswap64(hde->d_ino);
8909         tde->d_off = tswap64(hde->d_off);
8910         tde->d_reclen = tswap16(treclen);
8911         tde->d_type = hde->d_type;
8912         memcpy(tde->d_name, hde->d_name, namelen);
8913     }
8914 
8915     unlock_user(tdirp, arg2, toff);
8916     return toff;
8917 }
8918 #endif /* TARGET_NR_getdents64 */
8919 
8920 #if defined(TARGET_NR_riscv_hwprobe)
8921 
8922 #define RISCV_HWPROBE_KEY_MVENDORID     0
8923 #define RISCV_HWPROBE_KEY_MARCHID       1
8924 #define RISCV_HWPROBE_KEY_MIMPID        2
8925 
8926 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8927 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8928 
8929 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8930 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8931 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8932 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8933 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8934 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8935 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8936 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8937 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8938 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8939 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8940 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8941 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8942 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8943 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8944 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8945 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8946 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8947 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8948 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8949 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8950 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8951 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8952 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8953 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8954 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8955 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8956 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8957 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8958 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8959 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8960 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8961 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8962 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8963 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8964 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8965 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8966 
8967 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8968 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8969 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8970 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8971 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8972 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8973 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8974 
8975 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8976 
8977 struct riscv_hwprobe {
8978     abi_llong  key;
8979     abi_ullong value;
8980 };
8981 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)8982 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8983                                     struct riscv_hwprobe *pair,
8984                                     size_t pair_count)
8985 {
8986     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8987 
8988     for (; pair_count > 0; pair_count--, pair++) {
8989         abi_llong key;
8990         abi_ullong value;
8991         __put_user(0, &pair->value);
8992         __get_user(key, &pair->key);
8993         switch (key) {
8994         case RISCV_HWPROBE_KEY_MVENDORID:
8995             __put_user(cfg->mvendorid, &pair->value);
8996             break;
8997         case RISCV_HWPROBE_KEY_MARCHID:
8998             __put_user(cfg->marchid, &pair->value);
8999             break;
9000         case RISCV_HWPROBE_KEY_MIMPID:
9001             __put_user(cfg->mimpid, &pair->value);
9002             break;
9003         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9004             value = riscv_has_ext(env, RVI) &&
9005                     riscv_has_ext(env, RVM) &&
9006                     riscv_has_ext(env, RVA) ?
9007                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9008             __put_user(value, &pair->value);
9009             break;
9010         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9011             value = riscv_has_ext(env, RVF) &&
9012                     riscv_has_ext(env, RVD) ?
9013                     RISCV_HWPROBE_IMA_FD : 0;
9014             value |= riscv_has_ext(env, RVC) ?
9015                      RISCV_HWPROBE_IMA_C : 0;
9016             value |= riscv_has_ext(env, RVV) ?
9017                      RISCV_HWPROBE_IMA_V : 0;
9018             value |= cfg->ext_zba ?
9019                      RISCV_HWPROBE_EXT_ZBA : 0;
9020             value |= cfg->ext_zbb ?
9021                      RISCV_HWPROBE_EXT_ZBB : 0;
9022             value |= cfg->ext_zbs ?
9023                      RISCV_HWPROBE_EXT_ZBS : 0;
9024             value |= cfg->ext_zicboz ?
9025                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9026             value |= cfg->ext_zbc ?
9027                      RISCV_HWPROBE_EXT_ZBC : 0;
9028             value |= cfg->ext_zbkb ?
9029                      RISCV_HWPROBE_EXT_ZBKB : 0;
9030             value |= cfg->ext_zbkc ?
9031                      RISCV_HWPROBE_EXT_ZBKC : 0;
9032             value |= cfg->ext_zbkx ?
9033                      RISCV_HWPROBE_EXT_ZBKX : 0;
9034             value |= cfg->ext_zknd ?
9035                      RISCV_HWPROBE_EXT_ZKND : 0;
9036             value |= cfg->ext_zkne ?
9037                      RISCV_HWPROBE_EXT_ZKNE : 0;
9038             value |= cfg->ext_zknh ?
9039                      RISCV_HWPROBE_EXT_ZKNH : 0;
9040             value |= cfg->ext_zksed ?
9041                      RISCV_HWPROBE_EXT_ZKSED : 0;
9042             value |= cfg->ext_zksh ?
9043                      RISCV_HWPROBE_EXT_ZKSH : 0;
9044             value |= cfg->ext_zkt ?
9045                      RISCV_HWPROBE_EXT_ZKT : 0;
9046             value |= cfg->ext_zvbb ?
9047                      RISCV_HWPROBE_EXT_ZVBB : 0;
9048             value |= cfg->ext_zvbc ?
9049                      RISCV_HWPROBE_EXT_ZVBC : 0;
9050             value |= cfg->ext_zvkb ?
9051                      RISCV_HWPROBE_EXT_ZVKB : 0;
9052             value |= cfg->ext_zvkg ?
9053                      RISCV_HWPROBE_EXT_ZVKG : 0;
9054             value |= cfg->ext_zvkned ?
9055                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9056             value |= cfg->ext_zvknha ?
9057                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9058             value |= cfg->ext_zvknhb ?
9059                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9060             value |= cfg->ext_zvksed ?
9061                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9062             value |= cfg->ext_zvksh ?
9063                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9064             value |= cfg->ext_zvkt ?
9065                      RISCV_HWPROBE_EXT_ZVKT : 0;
9066             value |= cfg->ext_zfh ?
9067                      RISCV_HWPROBE_EXT_ZFH : 0;
9068             value |= cfg->ext_zfhmin ?
9069                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9070             value |= cfg->ext_zihintntl ?
9071                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9072             value |= cfg->ext_zvfh ?
9073                      RISCV_HWPROBE_EXT_ZVFH : 0;
9074             value |= cfg->ext_zvfhmin ?
9075                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9076             value |= cfg->ext_zfa ?
9077                      RISCV_HWPROBE_EXT_ZFA : 0;
9078             value |= cfg->ext_ztso ?
9079                      RISCV_HWPROBE_EXT_ZTSO : 0;
9080             value |= cfg->ext_zacas ?
9081                      RISCV_HWPROBE_EXT_ZACAS : 0;
9082             value |= cfg->ext_zicond ?
9083                      RISCV_HWPROBE_EXT_ZICOND : 0;
9084             __put_user(value, &pair->value);
9085             break;
9086         case RISCV_HWPROBE_KEY_CPUPERF_0:
9087             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9088             break;
9089         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9090             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9091             __put_user(value, &pair->value);
9092             break;
9093         default:
9094             __put_user(-1, &pair->key);
9095             break;
9096         }
9097     }
9098 }
9099 
9100 /*
9101  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9102  * If the cpumast_t has no bits set: -EINVAL.
9103  * Otherwise the cpumask_t contains some bit set: 0.
9104  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9105  * nor bound the search by cpumask_size().
9106  */
nonempty_cpu_set(abi_ulong cpusetsize,abi_ptr target_cpus)9107 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9108 {
9109     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9110     int ret = -TARGET_EFAULT;
9111 
9112     if (p) {
9113         ret = -TARGET_EINVAL;
9114         /*
9115          * Since we only care about the empty/non-empty state of the cpumask_t
9116          * not the individual bits, we do not need to repartition the bits
9117          * from target abi_ulong to host unsigned long.
9118          *
9119          * Note that the kernel does not round up cpusetsize to a multiple of
9120          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9121          * it copies exactly cpusetsize bytes into a zeroed buffer.
9122          */
9123         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9124             if (p[i]) {
9125                 ret = 0;
9126                 break;
9127             }
9128         }
9129         unlock_user(p, target_cpus, 0);
9130     }
9131     return ret;
9132 }
9133 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9134 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9135                                  abi_long arg2, abi_long arg3,
9136                                  abi_long arg4, abi_long arg5)
9137 {
9138     int ret;
9139     struct riscv_hwprobe *host_pairs;
9140 
9141     /* flags must be 0 */
9142     if (arg5 != 0) {
9143         return -TARGET_EINVAL;
9144     }
9145 
9146     /* check cpu_set */
9147     if (arg3 != 0) {
9148         ret = nonempty_cpu_set(arg3, arg4);
9149         if (ret != 0) {
9150             return ret;
9151         }
9152     } else if (arg4 != 0) {
9153         return -TARGET_EINVAL;
9154     }
9155 
9156     /* no pairs */
9157     if (arg2 == 0) {
9158         return 0;
9159     }
9160 
9161     host_pairs = lock_user(VERIFY_WRITE, arg1,
9162                            sizeof(*host_pairs) * (size_t)arg2, 0);
9163     if (host_pairs == NULL) {
9164         return -TARGET_EFAULT;
9165     }
9166     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9167     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9168     return 0;
9169 }
9170 #endif /* TARGET_NR_riscv_hwprobe */
9171 
9172 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9173 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9174 #endif
9175 
9176 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9177 #define __NR_sys_open_tree __NR_open_tree
9178 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9179           unsigned int, __flags)
9180 #endif
9181 
9182 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9183 #define __NR_sys_move_mount __NR_move_mount
9184 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9185            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9186 #endif
9187 
9188 /* This is an internal helper for do_syscall so that it is easier
9189  * to have a single return point, so that actions, such as logging
9190  * of syscall results, can be performed.
9191  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9192  */
9193 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9194                             abi_long arg2, abi_long arg3, abi_long arg4,
9195                             abi_long arg5, abi_long arg6, abi_long arg7,
9196                             abi_long arg8)
9197 {
9198     CPUState *cpu = env_cpu(cpu_env);
9199     abi_long ret;
9200 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9201     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9202     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9203     || defined(TARGET_NR_statx)
9204     struct stat st;
9205 #endif
9206 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9207     || defined(TARGET_NR_fstatfs)
9208     struct statfs stfs;
9209 #endif
9210     void *p;
9211 
9212     switch(num) {
9213     case TARGET_NR_exit:
9214         /* In old applications this may be used to implement _exit(2).
9215            However in threaded applications it is used for thread termination,
9216            and _exit_group is used for application termination.
9217            Do thread termination if we have more then one thread.  */
9218 
9219         if (block_signals()) {
9220             return -QEMU_ERESTARTSYS;
9221         }
9222 
9223         pthread_mutex_lock(&clone_lock);
9224 
9225         if (CPU_NEXT(first_cpu)) {
9226             TaskState *ts = get_task_state(cpu);
9227 
9228             if (ts->child_tidptr) {
9229                 put_user_u32(0, ts->child_tidptr);
9230                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9231                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9232             }
9233 
9234             object_unparent(OBJECT(cpu));
9235             object_unref(OBJECT(cpu));
9236             /*
9237              * At this point the CPU should be unrealized and removed
9238              * from cpu lists. We can clean-up the rest of the thread
9239              * data without the lock held.
9240              */
9241 
9242             pthread_mutex_unlock(&clone_lock);
9243 
9244             thread_cpu = NULL;
9245             g_free(ts);
9246             rcu_unregister_thread();
9247             pthread_exit(NULL);
9248         }
9249 
9250         pthread_mutex_unlock(&clone_lock);
9251         preexit_cleanup(cpu_env, arg1);
9252         _exit(arg1);
9253         return 0; /* avoid warning */
9254     case TARGET_NR_read:
9255         if (arg2 == 0 && arg3 == 0) {
9256             return get_errno(safe_read(arg1, 0, 0));
9257         } else {
9258             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9259                 return -TARGET_EFAULT;
9260             ret = get_errno(safe_read(arg1, p, arg3));
9261             if (ret >= 0 &&
9262                 fd_trans_host_to_target_data(arg1)) {
9263                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9264             }
9265             unlock_user(p, arg2, ret);
9266         }
9267         return ret;
9268     case TARGET_NR_write:
9269         if (arg2 == 0 && arg3 == 0) {
9270             return get_errno(safe_write(arg1, 0, 0));
9271         }
9272         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9273             return -TARGET_EFAULT;
9274         if (fd_trans_target_to_host_data(arg1)) {
9275             void *copy = g_malloc(arg3);
9276             memcpy(copy, p, arg3);
9277             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9278             if (ret >= 0) {
9279                 ret = get_errno(safe_write(arg1, copy, ret));
9280             }
9281             g_free(copy);
9282         } else {
9283             ret = get_errno(safe_write(arg1, p, arg3));
9284         }
9285         unlock_user(p, arg2, 0);
9286         return ret;
9287 
9288 #ifdef TARGET_NR_open
9289     case TARGET_NR_open:
9290         if (!(p = lock_user_string(arg1)))
9291             return -TARGET_EFAULT;
9292         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9293                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9294                                   arg3, true));
9295         fd_trans_unregister(ret);
9296         unlock_user(p, arg1, 0);
9297         return ret;
9298 #endif
9299     case TARGET_NR_openat:
9300         if (!(p = lock_user_string(arg2)))
9301             return -TARGET_EFAULT;
9302         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9303                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9304                                   arg4, true));
9305         fd_trans_unregister(ret);
9306         unlock_user(p, arg2, 0);
9307         return ret;
9308     case TARGET_NR_openat2:
9309         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9310         return ret;
9311 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9312     case TARGET_NR_name_to_handle_at:
9313         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9314         return ret;
9315 #endif
9316 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9317     case TARGET_NR_open_by_handle_at:
9318         ret = do_open_by_handle_at(arg1, arg2, arg3);
9319         fd_trans_unregister(ret);
9320         return ret;
9321 #endif
9322 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9323     case TARGET_NR_pidfd_open:
9324         return get_errno(pidfd_open(arg1, arg2));
9325 #endif
9326 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9327     case TARGET_NR_pidfd_send_signal:
9328         {
9329             siginfo_t uinfo, *puinfo;
9330 
9331             if (arg3) {
9332                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9333                 if (!p) {
9334                     return -TARGET_EFAULT;
9335                  }
9336                  target_to_host_siginfo(&uinfo, p);
9337                  unlock_user(p, arg3, 0);
9338                  puinfo = &uinfo;
9339             } else {
9340                  puinfo = NULL;
9341             }
9342             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9343                                               puinfo, arg4));
9344         }
9345         return ret;
9346 #endif
9347 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9348     case TARGET_NR_pidfd_getfd:
9349         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9350 #endif
9351     case TARGET_NR_close:
9352         fd_trans_unregister(arg1);
9353         return get_errno(close(arg1));
9354 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9355     case TARGET_NR_close_range:
9356         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9357         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9358             abi_long fd, maxfd;
9359             maxfd = MIN(arg2, target_fd_max);
9360             for (fd = arg1; fd < maxfd; fd++) {
9361                 fd_trans_unregister(fd);
9362             }
9363         }
9364         return ret;
9365 #endif
9366 
9367     case TARGET_NR_brk:
9368         return do_brk(arg1);
9369 #ifdef TARGET_NR_fork
9370     case TARGET_NR_fork:
9371         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9372 #endif
9373 #ifdef TARGET_NR_waitpid
9374     case TARGET_NR_waitpid:
9375         {
9376             int status;
9377             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9378             if (!is_error(ret) && arg2 && ret
9379                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9380                 return -TARGET_EFAULT;
9381         }
9382         return ret;
9383 #endif
9384 #ifdef TARGET_NR_waitid
9385     case TARGET_NR_waitid:
9386         {
9387             struct rusage ru;
9388             siginfo_t info;
9389 
9390             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9391                                         arg4, (arg5 ? &ru : NULL)));
9392             if (!is_error(ret)) {
9393                 if (arg3) {
9394                     p = lock_user(VERIFY_WRITE, arg3,
9395                                   sizeof(target_siginfo_t), 0);
9396                     if (!p) {
9397                         return -TARGET_EFAULT;
9398                     }
9399                     host_to_target_siginfo(p, &info);
9400                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9401                 }
9402                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9403                     return -TARGET_EFAULT;
9404                 }
9405             }
9406         }
9407         return ret;
9408 #endif
9409 #ifdef TARGET_NR_creat /* not on alpha */
9410     case TARGET_NR_creat:
9411         if (!(p = lock_user_string(arg1)))
9412             return -TARGET_EFAULT;
9413         ret = get_errno(creat(p, arg2));
9414         fd_trans_unregister(ret);
9415         unlock_user(p, arg1, 0);
9416         return ret;
9417 #endif
9418 #ifdef TARGET_NR_link
9419     case TARGET_NR_link:
9420         {
9421             void * p2;
9422             p = lock_user_string(arg1);
9423             p2 = lock_user_string(arg2);
9424             if (!p || !p2)
9425                 ret = -TARGET_EFAULT;
9426             else
9427                 ret = get_errno(link(p, p2));
9428             unlock_user(p2, arg2, 0);
9429             unlock_user(p, arg1, 0);
9430         }
9431         return ret;
9432 #endif
9433 #if defined(TARGET_NR_linkat)
9434     case TARGET_NR_linkat:
9435         {
9436             void * p2 = NULL;
9437             if (!arg2 || !arg4)
9438                 return -TARGET_EFAULT;
9439             p  = lock_user_string(arg2);
9440             p2 = lock_user_string(arg4);
9441             if (!p || !p2)
9442                 ret = -TARGET_EFAULT;
9443             else
9444                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9445             unlock_user(p, arg2, 0);
9446             unlock_user(p2, arg4, 0);
9447         }
9448         return ret;
9449 #endif
9450 #ifdef TARGET_NR_unlink
9451     case TARGET_NR_unlink:
9452         if (!(p = lock_user_string(arg1)))
9453             return -TARGET_EFAULT;
9454         ret = get_errno(unlink(p));
9455         unlock_user(p, arg1, 0);
9456         return ret;
9457 #endif
9458 #if defined(TARGET_NR_unlinkat)
9459     case TARGET_NR_unlinkat:
9460         if (!(p = lock_user_string(arg2)))
9461             return -TARGET_EFAULT;
9462         ret = get_errno(unlinkat(arg1, p, arg3));
9463         unlock_user(p, arg2, 0);
9464         return ret;
9465 #endif
9466     case TARGET_NR_execveat:
9467         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9468     case TARGET_NR_execve:
9469         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9470     case TARGET_NR_chdir:
9471         if (!(p = lock_user_string(arg1)))
9472             return -TARGET_EFAULT;
9473         ret = get_errno(chdir(p));
9474         unlock_user(p, arg1, 0);
9475         return ret;
9476 #ifdef TARGET_NR_time
9477     case TARGET_NR_time:
9478         {
9479             time_t host_time;
9480             ret = get_errno(time(&host_time));
9481             if (!is_error(ret)
9482                 && arg1
9483                 && put_user_sal(host_time, arg1))
9484                 return -TARGET_EFAULT;
9485         }
9486         return ret;
9487 #endif
9488 #ifdef TARGET_NR_mknod
9489     case TARGET_NR_mknod:
9490         if (!(p = lock_user_string(arg1)))
9491             return -TARGET_EFAULT;
9492         ret = get_errno(mknod(p, arg2, arg3));
9493         unlock_user(p, arg1, 0);
9494         return ret;
9495 #endif
9496 #if defined(TARGET_NR_mknodat)
9497     case TARGET_NR_mknodat:
9498         if (!(p = lock_user_string(arg2)))
9499             return -TARGET_EFAULT;
9500         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9501         unlock_user(p, arg2, 0);
9502         return ret;
9503 #endif
9504 #ifdef TARGET_NR_chmod
9505     case TARGET_NR_chmod:
9506         if (!(p = lock_user_string(arg1)))
9507             return -TARGET_EFAULT;
9508         ret = get_errno(chmod(p, arg2));
9509         unlock_user(p, arg1, 0);
9510         return ret;
9511 #endif
9512 #ifdef TARGET_NR_lseek
9513     case TARGET_NR_lseek:
9514         return get_errno(lseek(arg1, arg2, arg3));
9515 #endif
9516 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9517     /* Alpha specific */
9518     case TARGET_NR_getxpid:
9519         cpu_env->ir[IR_A4] = getppid();
9520         return get_errno(getpid());
9521 #endif
9522 #ifdef TARGET_NR_getpid
9523     case TARGET_NR_getpid:
9524         return get_errno(getpid());
9525 #endif
9526     case TARGET_NR_mount:
9527         {
9528             /* need to look at the data field */
9529             void *p2, *p3;
9530 
9531             if (arg1) {
9532                 p = lock_user_string(arg1);
9533                 if (!p) {
9534                     return -TARGET_EFAULT;
9535                 }
9536             } else {
9537                 p = NULL;
9538             }
9539 
9540             p2 = lock_user_string(arg2);
9541             if (!p2) {
9542                 if (arg1) {
9543                     unlock_user(p, arg1, 0);
9544                 }
9545                 return -TARGET_EFAULT;
9546             }
9547 
9548             if (arg3) {
9549                 p3 = lock_user_string(arg3);
9550                 if (!p3) {
9551                     if (arg1) {
9552                         unlock_user(p, arg1, 0);
9553                     }
9554                     unlock_user(p2, arg2, 0);
9555                     return -TARGET_EFAULT;
9556                 }
9557             } else {
9558                 p3 = NULL;
9559             }
9560 
9561             /* FIXME - arg5 should be locked, but it isn't clear how to
9562              * do that since it's not guaranteed to be a NULL-terminated
9563              * string.
9564              */
9565             if (!arg5) {
9566                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9567             } else {
9568                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9569             }
9570             ret = get_errno(ret);
9571 
9572             if (arg1) {
9573                 unlock_user(p, arg1, 0);
9574             }
9575             unlock_user(p2, arg2, 0);
9576             if (arg3) {
9577                 unlock_user(p3, arg3, 0);
9578             }
9579         }
9580         return ret;
9581 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9582 #if defined(TARGET_NR_umount)
9583     case TARGET_NR_umount:
9584 #endif
9585 #if defined(TARGET_NR_oldumount)
9586     case TARGET_NR_oldumount:
9587 #endif
9588         if (!(p = lock_user_string(arg1)))
9589             return -TARGET_EFAULT;
9590         ret = get_errno(umount(p));
9591         unlock_user(p, arg1, 0);
9592         return ret;
9593 #endif
9594 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9595     case TARGET_NR_move_mount:
9596         {
9597             void *p2, *p4;
9598 
9599             if (!arg2 || !arg4) {
9600                 return -TARGET_EFAULT;
9601             }
9602 
9603             p2 = lock_user_string(arg2);
9604             if (!p2) {
9605                 return -TARGET_EFAULT;
9606             }
9607 
9608             p4 = lock_user_string(arg4);
9609             if (!p4) {
9610                 unlock_user(p2, arg2, 0);
9611                 return -TARGET_EFAULT;
9612             }
9613             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9614 
9615             unlock_user(p2, arg2, 0);
9616             unlock_user(p4, arg4, 0);
9617 
9618             return ret;
9619         }
9620 #endif
9621 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9622     case TARGET_NR_open_tree:
9623         {
9624             void *p2;
9625             int host_flags;
9626 
9627             if (!arg2) {
9628                 return -TARGET_EFAULT;
9629             }
9630 
9631             p2 = lock_user_string(arg2);
9632             if (!p2) {
9633                 return -TARGET_EFAULT;
9634             }
9635 
9636             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9637             if (arg3 & TARGET_O_CLOEXEC) {
9638                 host_flags |= O_CLOEXEC;
9639             }
9640 
9641             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9642 
9643             unlock_user(p2, arg2, 0);
9644 
9645             return ret;
9646         }
9647 #endif
9648 #ifdef TARGET_NR_stime /* not on alpha */
9649     case TARGET_NR_stime:
9650         {
9651             struct timespec ts;
9652             ts.tv_nsec = 0;
9653             if (get_user_sal(ts.tv_sec, arg1)) {
9654                 return -TARGET_EFAULT;
9655             }
9656             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9657         }
9658 #endif
9659 #ifdef TARGET_NR_alarm /* not on alpha */
9660     case TARGET_NR_alarm:
9661         return alarm(arg1);
9662 #endif
9663 #ifdef TARGET_NR_pause /* not on alpha */
9664     case TARGET_NR_pause:
9665         if (!block_signals()) {
9666             sigsuspend(&get_task_state(cpu)->signal_mask);
9667         }
9668         return -TARGET_EINTR;
9669 #endif
9670 #ifdef TARGET_NR_utime
9671     case TARGET_NR_utime:
9672         {
9673             struct utimbuf tbuf, *host_tbuf;
9674             struct target_utimbuf *target_tbuf;
9675             if (arg2) {
9676                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9677                     return -TARGET_EFAULT;
9678                 tbuf.actime = tswapal(target_tbuf->actime);
9679                 tbuf.modtime = tswapal(target_tbuf->modtime);
9680                 unlock_user_struct(target_tbuf, arg2, 0);
9681                 host_tbuf = &tbuf;
9682             } else {
9683                 host_tbuf = NULL;
9684             }
9685             if (!(p = lock_user_string(arg1)))
9686                 return -TARGET_EFAULT;
9687             ret = get_errno(utime(p, host_tbuf));
9688             unlock_user(p, arg1, 0);
9689         }
9690         return ret;
9691 #endif
9692 #ifdef TARGET_NR_utimes
9693     case TARGET_NR_utimes:
9694         {
9695             struct timeval *tvp, tv[2];
9696             if (arg2) {
9697                 if (copy_from_user_timeval(&tv[0], arg2)
9698                     || copy_from_user_timeval(&tv[1],
9699                                               arg2 + sizeof(struct target_timeval)))
9700                     return -TARGET_EFAULT;
9701                 tvp = tv;
9702             } else {
9703                 tvp = NULL;
9704             }
9705             if (!(p = lock_user_string(arg1)))
9706                 return -TARGET_EFAULT;
9707             ret = get_errno(utimes(p, tvp));
9708             unlock_user(p, arg1, 0);
9709         }
9710         return ret;
9711 #endif
9712 #if defined(TARGET_NR_futimesat)
9713     case TARGET_NR_futimesat:
9714         {
9715             struct timeval *tvp, tv[2];
9716             if (arg3) {
9717                 if (copy_from_user_timeval(&tv[0], arg3)
9718                     || copy_from_user_timeval(&tv[1],
9719                                               arg3 + sizeof(struct target_timeval)))
9720                     return -TARGET_EFAULT;
9721                 tvp = tv;
9722             } else {
9723                 tvp = NULL;
9724             }
9725             if (!(p = lock_user_string(arg2))) {
9726                 return -TARGET_EFAULT;
9727             }
9728             ret = get_errno(futimesat(arg1, path(p), tvp));
9729             unlock_user(p, arg2, 0);
9730         }
9731         return ret;
9732 #endif
9733 #ifdef TARGET_NR_access
9734     case TARGET_NR_access:
9735         if (!(p = lock_user_string(arg1))) {
9736             return -TARGET_EFAULT;
9737         }
9738         ret = get_errno(access(path(p), arg2));
9739         unlock_user(p, arg1, 0);
9740         return ret;
9741 #endif
9742 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9743     case TARGET_NR_faccessat:
9744         if (!(p = lock_user_string(arg2))) {
9745             return -TARGET_EFAULT;
9746         }
9747         ret = get_errno(faccessat(arg1, p, arg3, 0));
9748         unlock_user(p, arg2, 0);
9749         return ret;
9750 #endif
9751 #if defined(TARGET_NR_faccessat2)
9752     case TARGET_NR_faccessat2:
9753         if (!(p = lock_user_string(arg2))) {
9754             return -TARGET_EFAULT;
9755         }
9756         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9757         unlock_user(p, arg2, 0);
9758         return ret;
9759 #endif
9760 #ifdef TARGET_NR_nice /* not on alpha */
9761     case TARGET_NR_nice:
9762         return get_errno(nice(arg1));
9763 #endif
9764     case TARGET_NR_sync:
9765         sync();
9766         return 0;
9767 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9768     case TARGET_NR_syncfs:
9769         return get_errno(syncfs(arg1));
9770 #endif
9771     case TARGET_NR_kill:
9772         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9773 #ifdef TARGET_NR_rename
9774     case TARGET_NR_rename:
9775         {
9776             void *p2;
9777             p = lock_user_string(arg1);
9778             p2 = lock_user_string(arg2);
9779             if (!p || !p2)
9780                 ret = -TARGET_EFAULT;
9781             else
9782                 ret = get_errno(rename(p, p2));
9783             unlock_user(p2, arg2, 0);
9784             unlock_user(p, arg1, 0);
9785         }
9786         return ret;
9787 #endif
9788 #if defined(TARGET_NR_renameat)
9789     case TARGET_NR_renameat:
9790         {
9791             void *p2;
9792             p  = lock_user_string(arg2);
9793             p2 = lock_user_string(arg4);
9794             if (!p || !p2)
9795                 ret = -TARGET_EFAULT;
9796             else
9797                 ret = get_errno(renameat(arg1, p, arg3, p2));
9798             unlock_user(p2, arg4, 0);
9799             unlock_user(p, arg2, 0);
9800         }
9801         return ret;
9802 #endif
9803 #if defined(TARGET_NR_renameat2)
9804     case TARGET_NR_renameat2:
9805         {
9806             void *p2;
9807             p  = lock_user_string(arg2);
9808             p2 = lock_user_string(arg4);
9809             if (!p || !p2) {
9810                 ret = -TARGET_EFAULT;
9811             } else {
9812                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9813             }
9814             unlock_user(p2, arg4, 0);
9815             unlock_user(p, arg2, 0);
9816         }
9817         return ret;
9818 #endif
9819 #ifdef TARGET_NR_mkdir
9820     case TARGET_NR_mkdir:
9821         if (!(p = lock_user_string(arg1)))
9822             return -TARGET_EFAULT;
9823         ret = get_errno(mkdir(p, arg2));
9824         unlock_user(p, arg1, 0);
9825         return ret;
9826 #endif
9827 #if defined(TARGET_NR_mkdirat)
9828     case TARGET_NR_mkdirat:
9829         if (!(p = lock_user_string(arg2)))
9830             return -TARGET_EFAULT;
9831         ret = get_errno(mkdirat(arg1, p, arg3));
9832         unlock_user(p, arg2, 0);
9833         return ret;
9834 #endif
9835 #ifdef TARGET_NR_rmdir
9836     case TARGET_NR_rmdir:
9837         if (!(p = lock_user_string(arg1)))
9838             return -TARGET_EFAULT;
9839         ret = get_errno(rmdir(p));
9840         unlock_user(p, arg1, 0);
9841         return ret;
9842 #endif
9843     case TARGET_NR_dup:
9844         ret = get_errno(dup(arg1));
9845         if (ret >= 0) {
9846             fd_trans_dup(arg1, ret);
9847         }
9848         return ret;
9849 #ifdef TARGET_NR_pipe
9850     case TARGET_NR_pipe:
9851         return do_pipe(cpu_env, arg1, 0, 0);
9852 #endif
9853 #ifdef TARGET_NR_pipe2
9854     case TARGET_NR_pipe2:
9855         return do_pipe(cpu_env, arg1,
9856                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9857 #endif
9858     case TARGET_NR_times:
9859         {
9860             struct target_tms *tmsp;
9861             struct tms tms;
9862             ret = get_errno(times(&tms));
9863             if (arg1) {
9864                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9865                 if (!tmsp)
9866                     return -TARGET_EFAULT;
9867                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9868                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9869                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9870                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9871             }
9872             if (!is_error(ret))
9873                 ret = host_to_target_clock_t(ret);
9874         }
9875         return ret;
9876     case TARGET_NR_acct:
9877         if (arg1 == 0) {
9878             ret = get_errno(acct(NULL));
9879         } else {
9880             if (!(p = lock_user_string(arg1))) {
9881                 return -TARGET_EFAULT;
9882             }
9883             ret = get_errno(acct(path(p)));
9884             unlock_user(p, arg1, 0);
9885         }
9886         return ret;
9887 #ifdef TARGET_NR_umount2
9888     case TARGET_NR_umount2:
9889         if (!(p = lock_user_string(arg1)))
9890             return -TARGET_EFAULT;
9891         ret = get_errno(umount2(p, arg2));
9892         unlock_user(p, arg1, 0);
9893         return ret;
9894 #endif
9895     case TARGET_NR_ioctl:
9896         return do_ioctl(arg1, arg2, arg3);
9897 #ifdef TARGET_NR_fcntl
9898     case TARGET_NR_fcntl:
9899         return do_fcntl(arg1, arg2, arg3);
9900 #endif
9901     case TARGET_NR_setpgid:
9902         return get_errno(setpgid(arg1, arg2));
9903     case TARGET_NR_umask:
9904         return get_errno(umask(arg1));
9905     case TARGET_NR_chroot:
9906         if (!(p = lock_user_string(arg1)))
9907             return -TARGET_EFAULT;
9908         ret = get_errno(chroot(p));
9909         unlock_user(p, arg1, 0);
9910         return ret;
9911 #ifdef TARGET_NR_dup2
9912     case TARGET_NR_dup2:
9913         ret = get_errno(dup2(arg1, arg2));
9914         if (ret >= 0) {
9915             fd_trans_dup(arg1, arg2);
9916         }
9917         return ret;
9918 #endif
9919 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9920     case TARGET_NR_dup3:
9921     {
9922         int host_flags;
9923 
9924         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9925             return -EINVAL;
9926         }
9927         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9928         ret = get_errno(dup3(arg1, arg2, host_flags));
9929         if (ret >= 0) {
9930             fd_trans_dup(arg1, arg2);
9931         }
9932         return ret;
9933     }
9934 #endif
9935 #ifdef TARGET_NR_getppid /* not on alpha */
9936     case TARGET_NR_getppid:
9937         return get_errno(getppid());
9938 #endif
9939 #ifdef TARGET_NR_getpgrp
9940     case TARGET_NR_getpgrp:
9941         return get_errno(getpgrp());
9942 #endif
9943     case TARGET_NR_setsid:
9944         return get_errno(setsid());
9945 #ifdef TARGET_NR_sigaction
9946     case TARGET_NR_sigaction:
9947         {
9948 #if defined(TARGET_MIPS)
9949 	    struct target_sigaction act, oact, *pact, *old_act;
9950 
9951 	    if (arg2) {
9952                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9953                     return -TARGET_EFAULT;
9954 		act._sa_handler = old_act->_sa_handler;
9955 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9956 		act.sa_flags = old_act->sa_flags;
9957 		unlock_user_struct(old_act, arg2, 0);
9958 		pact = &act;
9959 	    } else {
9960 		pact = NULL;
9961 	    }
9962 
9963         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9964 
9965 	    if (!is_error(ret) && arg3) {
9966                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9967                     return -TARGET_EFAULT;
9968 		old_act->_sa_handler = oact._sa_handler;
9969 		old_act->sa_flags = oact.sa_flags;
9970 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9971 		old_act->sa_mask.sig[1] = 0;
9972 		old_act->sa_mask.sig[2] = 0;
9973 		old_act->sa_mask.sig[3] = 0;
9974 		unlock_user_struct(old_act, arg3, 1);
9975 	    }
9976 #else
9977             struct target_old_sigaction *old_act;
9978             struct target_sigaction act, oact, *pact;
9979             if (arg2) {
9980                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9981                     return -TARGET_EFAULT;
9982                 act._sa_handler = old_act->_sa_handler;
9983                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9984                 act.sa_flags = old_act->sa_flags;
9985 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9986                 act.sa_restorer = old_act->sa_restorer;
9987 #endif
9988                 unlock_user_struct(old_act, arg2, 0);
9989                 pact = &act;
9990             } else {
9991                 pact = NULL;
9992             }
9993             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9994             if (!is_error(ret) && arg3) {
9995                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9996                     return -TARGET_EFAULT;
9997                 old_act->_sa_handler = oact._sa_handler;
9998                 old_act->sa_mask = oact.sa_mask.sig[0];
9999                 old_act->sa_flags = oact.sa_flags;
10000 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10001                 old_act->sa_restorer = oact.sa_restorer;
10002 #endif
10003                 unlock_user_struct(old_act, arg3, 1);
10004             }
10005 #endif
10006         }
10007         return ret;
10008 #endif
10009     case TARGET_NR_rt_sigaction:
10010         {
10011             /*
10012              * For Alpha and SPARC this is a 5 argument syscall, with
10013              * a 'restorer' parameter which must be copied into the
10014              * sa_restorer field of the sigaction struct.
10015              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10016              * and arg5 is the sigsetsize.
10017              */
10018 #if defined(TARGET_ALPHA)
10019             target_ulong sigsetsize = arg4;
10020             target_ulong restorer = arg5;
10021 #elif defined(TARGET_SPARC)
10022             target_ulong restorer = arg4;
10023             target_ulong sigsetsize = arg5;
10024 #else
10025             target_ulong sigsetsize = arg4;
10026             target_ulong restorer = 0;
10027 #endif
10028             struct target_sigaction *act = NULL;
10029             struct target_sigaction *oact = NULL;
10030 
10031             if (sigsetsize != sizeof(target_sigset_t)) {
10032                 return -TARGET_EINVAL;
10033             }
10034             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10035                 return -TARGET_EFAULT;
10036             }
10037             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10038                 ret = -TARGET_EFAULT;
10039             } else {
10040                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10041                 if (oact) {
10042                     unlock_user_struct(oact, arg3, 1);
10043                 }
10044             }
10045             if (act) {
10046                 unlock_user_struct(act, arg2, 0);
10047             }
10048         }
10049         return ret;
10050 #ifdef TARGET_NR_sgetmask /* not on alpha */
10051     case TARGET_NR_sgetmask:
10052         {
10053             sigset_t cur_set;
10054             abi_ulong target_set;
10055             ret = do_sigprocmask(0, NULL, &cur_set);
10056             if (!ret) {
10057                 host_to_target_old_sigset(&target_set, &cur_set);
10058                 ret = target_set;
10059             }
10060         }
10061         return ret;
10062 #endif
10063 #ifdef TARGET_NR_ssetmask /* not on alpha */
10064     case TARGET_NR_ssetmask:
10065         {
10066             sigset_t set, oset;
10067             abi_ulong target_set = arg1;
10068             target_to_host_old_sigset(&set, &target_set);
10069             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10070             if (!ret) {
10071                 host_to_target_old_sigset(&target_set, &oset);
10072                 ret = target_set;
10073             }
10074         }
10075         return ret;
10076 #endif
10077 #ifdef TARGET_NR_sigprocmask
10078     case TARGET_NR_sigprocmask:
10079         {
10080 #if defined(TARGET_ALPHA)
10081             sigset_t set, oldset;
10082             abi_ulong mask;
10083             int how;
10084 
10085             switch (arg1) {
10086             case TARGET_SIG_BLOCK:
10087                 how = SIG_BLOCK;
10088                 break;
10089             case TARGET_SIG_UNBLOCK:
10090                 how = SIG_UNBLOCK;
10091                 break;
10092             case TARGET_SIG_SETMASK:
10093                 how = SIG_SETMASK;
10094                 break;
10095             default:
10096                 return -TARGET_EINVAL;
10097             }
10098             mask = arg2;
10099             target_to_host_old_sigset(&set, &mask);
10100 
10101             ret = do_sigprocmask(how, &set, &oldset);
10102             if (!is_error(ret)) {
10103                 host_to_target_old_sigset(&mask, &oldset);
10104                 ret = mask;
10105                 cpu_env->ir[IR_V0] = 0; /* force no error */
10106             }
10107 #else
10108             sigset_t set, oldset, *set_ptr;
10109             int how;
10110 
10111             if (arg2) {
10112                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10113                 if (!p) {
10114                     return -TARGET_EFAULT;
10115                 }
10116                 target_to_host_old_sigset(&set, p);
10117                 unlock_user(p, arg2, 0);
10118                 set_ptr = &set;
10119                 switch (arg1) {
10120                 case TARGET_SIG_BLOCK:
10121                     how = SIG_BLOCK;
10122                     break;
10123                 case TARGET_SIG_UNBLOCK:
10124                     how = SIG_UNBLOCK;
10125                     break;
10126                 case TARGET_SIG_SETMASK:
10127                     how = SIG_SETMASK;
10128                     break;
10129                 default:
10130                     return -TARGET_EINVAL;
10131                 }
10132             } else {
10133                 how = 0;
10134                 set_ptr = NULL;
10135             }
10136             ret = do_sigprocmask(how, set_ptr, &oldset);
10137             if (!is_error(ret) && arg3) {
10138                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10139                     return -TARGET_EFAULT;
10140                 host_to_target_old_sigset(p, &oldset);
10141                 unlock_user(p, arg3, sizeof(target_sigset_t));
10142             }
10143 #endif
10144         }
10145         return ret;
10146 #endif
10147     case TARGET_NR_rt_sigprocmask:
10148         {
10149             int how = arg1;
10150             sigset_t set, oldset, *set_ptr;
10151 
10152             if (arg4 != sizeof(target_sigset_t)) {
10153                 return -TARGET_EINVAL;
10154             }
10155 
10156             if (arg2) {
10157                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10158                 if (!p) {
10159                     return -TARGET_EFAULT;
10160                 }
10161                 target_to_host_sigset(&set, p);
10162                 unlock_user(p, arg2, 0);
10163                 set_ptr = &set;
10164                 switch(how) {
10165                 case TARGET_SIG_BLOCK:
10166                     how = SIG_BLOCK;
10167                     break;
10168                 case TARGET_SIG_UNBLOCK:
10169                     how = SIG_UNBLOCK;
10170                     break;
10171                 case TARGET_SIG_SETMASK:
10172                     how = SIG_SETMASK;
10173                     break;
10174                 default:
10175                     return -TARGET_EINVAL;
10176                 }
10177             } else {
10178                 how = 0;
10179                 set_ptr = NULL;
10180             }
10181             ret = do_sigprocmask(how, set_ptr, &oldset);
10182             if (!is_error(ret) && arg3) {
10183                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10184                     return -TARGET_EFAULT;
10185                 host_to_target_sigset(p, &oldset);
10186                 unlock_user(p, arg3, sizeof(target_sigset_t));
10187             }
10188         }
10189         return ret;
10190 #ifdef TARGET_NR_sigpending
10191     case TARGET_NR_sigpending:
10192         {
10193             sigset_t set;
10194             ret = get_errno(sigpending(&set));
10195             if (!is_error(ret)) {
10196                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10197                     return -TARGET_EFAULT;
10198                 host_to_target_old_sigset(p, &set);
10199                 unlock_user(p, arg1, sizeof(target_sigset_t));
10200             }
10201         }
10202         return ret;
10203 #endif
10204     case TARGET_NR_rt_sigpending:
10205         {
10206             sigset_t set;
10207 
10208             /* Yes, this check is >, not != like most. We follow the kernel's
10209              * logic and it does it like this because it implements
10210              * NR_sigpending through the same code path, and in that case
10211              * the old_sigset_t is smaller in size.
10212              */
10213             if (arg2 > sizeof(target_sigset_t)) {
10214                 return -TARGET_EINVAL;
10215             }
10216 
10217             ret = get_errno(sigpending(&set));
10218             if (!is_error(ret)) {
10219                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10220                     return -TARGET_EFAULT;
10221                 host_to_target_sigset(p, &set);
10222                 unlock_user(p, arg1, sizeof(target_sigset_t));
10223             }
10224         }
10225         return ret;
10226 #ifdef TARGET_NR_sigsuspend
10227     case TARGET_NR_sigsuspend:
10228         {
10229             sigset_t *set;
10230 
10231 #if defined(TARGET_ALPHA)
10232             TaskState *ts = get_task_state(cpu);
10233             /* target_to_host_old_sigset will bswap back */
10234             abi_ulong mask = tswapal(arg1);
10235             set = &ts->sigsuspend_mask;
10236             target_to_host_old_sigset(set, &mask);
10237 #else
10238             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10239             if (ret != 0) {
10240                 return ret;
10241             }
10242 #endif
10243             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10244             finish_sigsuspend_mask(ret);
10245         }
10246         return ret;
10247 #endif
10248     case TARGET_NR_rt_sigsuspend:
10249         {
10250             sigset_t *set;
10251 
10252             ret = process_sigsuspend_mask(&set, arg1, arg2);
10253             if (ret != 0) {
10254                 return ret;
10255             }
10256             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10257             finish_sigsuspend_mask(ret);
10258         }
10259         return ret;
10260 #ifdef TARGET_NR_rt_sigtimedwait
10261     case TARGET_NR_rt_sigtimedwait:
10262         {
10263             sigset_t set;
10264             struct timespec uts, *puts;
10265             siginfo_t uinfo;
10266 
10267             if (arg4 != sizeof(target_sigset_t)) {
10268                 return -TARGET_EINVAL;
10269             }
10270 
10271             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10272                 return -TARGET_EFAULT;
10273             target_to_host_sigset(&set, p);
10274             unlock_user(p, arg1, 0);
10275             if (arg3) {
10276                 puts = &uts;
10277                 if (target_to_host_timespec(puts, arg3)) {
10278                     return -TARGET_EFAULT;
10279                 }
10280             } else {
10281                 puts = NULL;
10282             }
10283             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10284                                                  SIGSET_T_SIZE));
10285             if (!is_error(ret)) {
10286                 if (arg2) {
10287                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10288                                   0);
10289                     if (!p) {
10290                         return -TARGET_EFAULT;
10291                     }
10292                     host_to_target_siginfo(p, &uinfo);
10293                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10294                 }
10295                 ret = host_to_target_signal(ret);
10296             }
10297         }
10298         return ret;
10299 #endif
10300 #ifdef TARGET_NR_rt_sigtimedwait_time64
10301     case TARGET_NR_rt_sigtimedwait_time64:
10302         {
10303             sigset_t set;
10304             struct timespec uts, *puts;
10305             siginfo_t uinfo;
10306 
10307             if (arg4 != sizeof(target_sigset_t)) {
10308                 return -TARGET_EINVAL;
10309             }
10310 
10311             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10312             if (!p) {
10313                 return -TARGET_EFAULT;
10314             }
10315             target_to_host_sigset(&set, p);
10316             unlock_user(p, arg1, 0);
10317             if (arg3) {
10318                 puts = &uts;
10319                 if (target_to_host_timespec64(puts, arg3)) {
10320                     return -TARGET_EFAULT;
10321                 }
10322             } else {
10323                 puts = NULL;
10324             }
10325             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10326                                                  SIGSET_T_SIZE));
10327             if (!is_error(ret)) {
10328                 if (arg2) {
10329                     p = lock_user(VERIFY_WRITE, arg2,
10330                                   sizeof(target_siginfo_t), 0);
10331                     if (!p) {
10332                         return -TARGET_EFAULT;
10333                     }
10334                     host_to_target_siginfo(p, &uinfo);
10335                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10336                 }
10337                 ret = host_to_target_signal(ret);
10338             }
10339         }
10340         return ret;
10341 #endif
10342     case TARGET_NR_rt_sigqueueinfo:
10343         {
10344             siginfo_t uinfo;
10345 
10346             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10347             if (!p) {
10348                 return -TARGET_EFAULT;
10349             }
10350             target_to_host_siginfo(&uinfo, p);
10351             unlock_user(p, arg3, 0);
10352             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10353         }
10354         return ret;
10355     case TARGET_NR_rt_tgsigqueueinfo:
10356         {
10357             siginfo_t uinfo;
10358 
10359             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10360             if (!p) {
10361                 return -TARGET_EFAULT;
10362             }
10363             target_to_host_siginfo(&uinfo, p);
10364             unlock_user(p, arg4, 0);
10365             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10366         }
10367         return ret;
10368 #ifdef TARGET_NR_sigreturn
10369     case TARGET_NR_sigreturn:
10370         if (block_signals()) {
10371             return -QEMU_ERESTARTSYS;
10372         }
10373         return do_sigreturn(cpu_env);
10374 #endif
10375     case TARGET_NR_rt_sigreturn:
10376         if (block_signals()) {
10377             return -QEMU_ERESTARTSYS;
10378         }
10379         return do_rt_sigreturn(cpu_env);
10380     case TARGET_NR_sethostname:
10381         if (!(p = lock_user_string(arg1)))
10382             return -TARGET_EFAULT;
10383         ret = get_errno(sethostname(p, arg2));
10384         unlock_user(p, arg1, 0);
10385         return ret;
10386 #ifdef TARGET_NR_setrlimit
10387     case TARGET_NR_setrlimit:
10388         {
10389             int resource = target_to_host_resource(arg1);
10390             struct target_rlimit *target_rlim;
10391             struct rlimit rlim;
10392             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10393                 return -TARGET_EFAULT;
10394             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10395             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10396             unlock_user_struct(target_rlim, arg2, 0);
10397             /*
10398              * If we just passed through resource limit settings for memory then
10399              * they would also apply to QEMU's own allocations, and QEMU will
10400              * crash or hang or die if its allocations fail. Ideally we would
10401              * track the guest allocations in QEMU and apply the limits ourselves.
10402              * For now, just tell the guest the call succeeded but don't actually
10403              * limit anything.
10404              */
10405             if (resource != RLIMIT_AS &&
10406                 resource != RLIMIT_DATA &&
10407                 resource != RLIMIT_STACK) {
10408                 return get_errno(setrlimit(resource, &rlim));
10409             } else {
10410                 return 0;
10411             }
10412         }
10413 #endif
10414 #ifdef TARGET_NR_getrlimit
10415     case TARGET_NR_getrlimit:
10416         {
10417             int resource = target_to_host_resource(arg1);
10418             struct target_rlimit *target_rlim;
10419             struct rlimit rlim;
10420 
10421             ret = get_errno(getrlimit(resource, &rlim));
10422             if (!is_error(ret)) {
10423                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10424                     return -TARGET_EFAULT;
10425                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10426                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10427                 unlock_user_struct(target_rlim, arg2, 1);
10428             }
10429         }
10430         return ret;
10431 #endif
10432     case TARGET_NR_getrusage:
10433         {
10434             struct rusage rusage;
10435             ret = get_errno(getrusage(arg1, &rusage));
10436             if (!is_error(ret)) {
10437                 ret = host_to_target_rusage(arg2, &rusage);
10438             }
10439         }
10440         return ret;
10441 #if defined(TARGET_NR_gettimeofday)
10442     case TARGET_NR_gettimeofday:
10443         {
10444             struct timeval tv;
10445             struct timezone tz;
10446 
10447             ret = get_errno(gettimeofday(&tv, &tz));
10448             if (!is_error(ret)) {
10449                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10450                     return -TARGET_EFAULT;
10451                 }
10452                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10453                     return -TARGET_EFAULT;
10454                 }
10455             }
10456         }
10457         return ret;
10458 #endif
10459 #if defined(TARGET_NR_settimeofday)
10460     case TARGET_NR_settimeofday:
10461         {
10462             struct timeval tv, *ptv = NULL;
10463             struct timezone tz, *ptz = NULL;
10464 
10465             if (arg1) {
10466                 if (copy_from_user_timeval(&tv, arg1)) {
10467                     return -TARGET_EFAULT;
10468                 }
10469                 ptv = &tv;
10470             }
10471 
10472             if (arg2) {
10473                 if (copy_from_user_timezone(&tz, arg2)) {
10474                     return -TARGET_EFAULT;
10475                 }
10476                 ptz = &tz;
10477             }
10478 
10479             return get_errno(settimeofday(ptv, ptz));
10480         }
10481 #endif
10482 #if defined(TARGET_NR_select)
10483     case TARGET_NR_select:
10484 #if defined(TARGET_WANT_NI_OLD_SELECT)
10485         /* some architectures used to have old_select here
10486          * but now ENOSYS it.
10487          */
10488         ret = -TARGET_ENOSYS;
10489 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10490         ret = do_old_select(arg1);
10491 #else
10492         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10493 #endif
10494         return ret;
10495 #endif
10496 #ifdef TARGET_NR_pselect6
10497     case TARGET_NR_pselect6:
10498         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10499 #endif
10500 #ifdef TARGET_NR_pselect6_time64
10501     case TARGET_NR_pselect6_time64:
10502         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10503 #endif
10504 #ifdef TARGET_NR_symlink
10505     case TARGET_NR_symlink:
10506         {
10507             void *p2;
10508             p = lock_user_string(arg1);
10509             p2 = lock_user_string(arg2);
10510             if (!p || !p2)
10511                 ret = -TARGET_EFAULT;
10512             else
10513                 ret = get_errno(symlink(p, p2));
10514             unlock_user(p2, arg2, 0);
10515             unlock_user(p, arg1, 0);
10516         }
10517         return ret;
10518 #endif
10519 #if defined(TARGET_NR_symlinkat)
10520     case TARGET_NR_symlinkat:
10521         {
10522             void *p2;
10523             p  = lock_user_string(arg1);
10524             p2 = lock_user_string(arg3);
10525             if (!p || !p2)
10526                 ret = -TARGET_EFAULT;
10527             else
10528                 ret = get_errno(symlinkat(p, arg2, p2));
10529             unlock_user(p2, arg3, 0);
10530             unlock_user(p, arg1, 0);
10531         }
10532         return ret;
10533 #endif
10534 #ifdef TARGET_NR_readlink
10535     case TARGET_NR_readlink:
10536         {
10537             void *p2;
10538             p = lock_user_string(arg1);
10539             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10540             ret = get_errno(do_guest_readlink(p, p2, arg3));
10541             unlock_user(p2, arg2, ret);
10542             unlock_user(p, arg1, 0);
10543         }
10544         return ret;
10545 #endif
10546 #if defined(TARGET_NR_readlinkat)
10547     case TARGET_NR_readlinkat:
10548         {
10549             void *p2;
10550             p  = lock_user_string(arg2);
10551             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10552             if (!p || !p2) {
10553                 ret = -TARGET_EFAULT;
10554             } else if (!arg4) {
10555                 /* Short circuit this for the magic exe check. */
10556                 ret = -TARGET_EINVAL;
10557             } else if (is_proc_myself((const char *)p, "exe")) {
10558                 /*
10559                  * Don't worry about sign mismatch as earlier mapping
10560                  * logic would have thrown a bad address error.
10561                  */
10562                 ret = MIN(strlen(exec_path), arg4);
10563                 /* We cannot NUL terminate the string. */
10564                 memcpy(p2, exec_path, ret);
10565             } else {
10566                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10567             }
10568             unlock_user(p2, arg3, ret);
10569             unlock_user(p, arg2, 0);
10570         }
10571         return ret;
10572 #endif
10573 #ifdef TARGET_NR_swapon
10574     case TARGET_NR_swapon:
10575         if (!(p = lock_user_string(arg1)))
10576             return -TARGET_EFAULT;
10577         ret = get_errno(swapon(p, arg2));
10578         unlock_user(p, arg1, 0);
10579         return ret;
10580 #endif
10581     case TARGET_NR_reboot:
10582         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10583            /* arg4 must be ignored in all other cases */
10584            p = lock_user_string(arg4);
10585            if (!p) {
10586                return -TARGET_EFAULT;
10587            }
10588            ret = get_errno(reboot(arg1, arg2, arg3, p));
10589            unlock_user(p, arg4, 0);
10590         } else {
10591            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10592         }
10593         return ret;
10594 #ifdef TARGET_NR_mmap
10595     case TARGET_NR_mmap:
10596 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10597         {
10598             abi_ulong *v;
10599             abi_ulong v1, v2, v3, v4, v5, v6;
10600             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10601                 return -TARGET_EFAULT;
10602             v1 = tswapal(v[0]);
10603             v2 = tswapal(v[1]);
10604             v3 = tswapal(v[2]);
10605             v4 = tswapal(v[3]);
10606             v5 = tswapal(v[4]);
10607             v6 = tswapal(v[5]);
10608             unlock_user(v, arg1, 0);
10609             return do_mmap(v1, v2, v3, v4, v5, v6);
10610         }
10611 #else
10612         /* mmap pointers are always untagged */
10613         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10614 #endif
10615 #endif
10616 #ifdef TARGET_NR_mmap2
10617     case TARGET_NR_mmap2:
10618 #ifndef MMAP_SHIFT
10619 #define MMAP_SHIFT 12
10620 #endif
10621         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10622                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10623 #endif
10624     case TARGET_NR_munmap:
10625         arg1 = cpu_untagged_addr(cpu, arg1);
10626         return get_errno(target_munmap(arg1, arg2));
10627     case TARGET_NR_mprotect:
10628         arg1 = cpu_untagged_addr(cpu, arg1);
10629         {
10630             TaskState *ts = get_task_state(cpu);
10631             /* Special hack to detect libc making the stack executable.  */
10632             if ((arg3 & PROT_GROWSDOWN)
10633                 && arg1 >= ts->info->stack_limit
10634                 && arg1 <= ts->info->start_stack) {
10635                 arg3 &= ~PROT_GROWSDOWN;
10636                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10637                 arg1 = ts->info->stack_limit;
10638             }
10639         }
10640         return get_errno(target_mprotect(arg1, arg2, arg3));
10641 #ifdef TARGET_NR_mremap
10642     case TARGET_NR_mremap:
10643         arg1 = cpu_untagged_addr(cpu, arg1);
10644         /* mremap new_addr (arg5) is always untagged */
10645         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10646 #endif
10647         /* ??? msync/mlock/munlock are broken for softmmu.  */
10648 #ifdef TARGET_NR_msync
10649     case TARGET_NR_msync:
10650         return get_errno(msync(g2h(cpu, arg1), arg2,
10651                                target_to_host_msync_arg(arg3)));
10652 #endif
10653 #ifdef TARGET_NR_mlock
10654     case TARGET_NR_mlock:
10655         return get_errno(mlock(g2h(cpu, arg1), arg2));
10656 #endif
10657 #ifdef TARGET_NR_munlock
10658     case TARGET_NR_munlock:
10659         return get_errno(munlock(g2h(cpu, arg1), arg2));
10660 #endif
10661 #ifdef TARGET_NR_mlockall
10662     case TARGET_NR_mlockall:
10663         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10664 #endif
10665 #ifdef TARGET_NR_munlockall
10666     case TARGET_NR_munlockall:
10667         return get_errno(munlockall());
10668 #endif
10669 #ifdef TARGET_NR_truncate
10670     case TARGET_NR_truncate:
10671         if (!(p = lock_user_string(arg1)))
10672             return -TARGET_EFAULT;
10673         ret = get_errno(truncate(p, arg2));
10674         unlock_user(p, arg1, 0);
10675         return ret;
10676 #endif
10677 #ifdef TARGET_NR_ftruncate
10678     case TARGET_NR_ftruncate:
10679         return get_errno(ftruncate(arg1, arg2));
10680 #endif
10681     case TARGET_NR_fchmod:
10682         return get_errno(fchmod(arg1, arg2));
10683 #if defined(TARGET_NR_fchmodat)
10684     case TARGET_NR_fchmodat:
10685         if (!(p = lock_user_string(arg2)))
10686             return -TARGET_EFAULT;
10687         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10688         unlock_user(p, arg2, 0);
10689         return ret;
10690 #endif
10691     case TARGET_NR_getpriority:
10692         /* Note that negative values are valid for getpriority, so we must
10693            differentiate based on errno settings.  */
10694         errno = 0;
10695         ret = getpriority(arg1, arg2);
10696         if (ret == -1 && errno != 0) {
10697             return -host_to_target_errno(errno);
10698         }
10699 #ifdef TARGET_ALPHA
10700         /* Return value is the unbiased priority.  Signal no error.  */
10701         cpu_env->ir[IR_V0] = 0;
10702 #else
10703         /* Return value is a biased priority to avoid negative numbers.  */
10704         ret = 20 - ret;
10705 #endif
10706         return ret;
10707     case TARGET_NR_setpriority:
10708         return get_errno(setpriority(arg1, arg2, arg3));
10709 #ifdef TARGET_NR_statfs
10710     case TARGET_NR_statfs:
10711         if (!(p = lock_user_string(arg1))) {
10712             return -TARGET_EFAULT;
10713         }
10714         ret = get_errno(statfs(path(p), &stfs));
10715         unlock_user(p, arg1, 0);
10716     convert_statfs:
10717         if (!is_error(ret)) {
10718             struct target_statfs *target_stfs;
10719 
10720             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10721                 return -TARGET_EFAULT;
10722             __put_user(stfs.f_type, &target_stfs->f_type);
10723             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10724             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10725             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10726             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10727             __put_user(stfs.f_files, &target_stfs->f_files);
10728             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10729             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10730             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10731             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10732             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10733 #ifdef _STATFS_F_FLAGS
10734             __put_user(stfs.f_flags, &target_stfs->f_flags);
10735 #else
10736             __put_user(0, &target_stfs->f_flags);
10737 #endif
10738             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10739             unlock_user_struct(target_stfs, arg2, 1);
10740         }
10741         return ret;
10742 #endif
10743 #ifdef TARGET_NR_fstatfs
10744     case TARGET_NR_fstatfs:
10745         ret = get_errno(fstatfs(arg1, &stfs));
10746         goto convert_statfs;
10747 #endif
10748 #ifdef TARGET_NR_statfs64
10749     case TARGET_NR_statfs64:
10750         if (!(p = lock_user_string(arg1))) {
10751             return -TARGET_EFAULT;
10752         }
10753         ret = get_errno(statfs(path(p), &stfs));
10754         unlock_user(p, arg1, 0);
10755     convert_statfs64:
10756         if (!is_error(ret)) {
10757             struct target_statfs64 *target_stfs;
10758 
10759             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10760                 return -TARGET_EFAULT;
10761             __put_user(stfs.f_type, &target_stfs->f_type);
10762             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10763             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10764             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10765             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10766             __put_user(stfs.f_files, &target_stfs->f_files);
10767             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10768             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10769             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10770             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10771             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10772 #ifdef _STATFS_F_FLAGS
10773             __put_user(stfs.f_flags, &target_stfs->f_flags);
10774 #else
10775             __put_user(0, &target_stfs->f_flags);
10776 #endif
10777             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10778             unlock_user_struct(target_stfs, arg3, 1);
10779         }
10780         return ret;
10781     case TARGET_NR_fstatfs64:
10782         ret = get_errno(fstatfs(arg1, &stfs));
10783         goto convert_statfs64;
10784 #endif
10785 #ifdef TARGET_NR_socketcall
10786     case TARGET_NR_socketcall:
10787         return do_socketcall(arg1, arg2);
10788 #endif
10789 #ifdef TARGET_NR_accept
10790     case TARGET_NR_accept:
10791         return do_accept4(arg1, arg2, arg3, 0);
10792 #endif
10793 #ifdef TARGET_NR_accept4
10794     case TARGET_NR_accept4:
10795         return do_accept4(arg1, arg2, arg3, arg4);
10796 #endif
10797 #ifdef TARGET_NR_bind
10798     case TARGET_NR_bind:
10799         return do_bind(arg1, arg2, arg3);
10800 #endif
10801 #ifdef TARGET_NR_connect
10802     case TARGET_NR_connect:
10803         return do_connect(arg1, arg2, arg3);
10804 #endif
10805 #ifdef TARGET_NR_getpeername
10806     case TARGET_NR_getpeername:
10807         return do_getpeername(arg1, arg2, arg3);
10808 #endif
10809 #ifdef TARGET_NR_getsockname
10810     case TARGET_NR_getsockname:
10811         return do_getsockname(arg1, arg2, arg3);
10812 #endif
10813 #ifdef TARGET_NR_getsockopt
10814     case TARGET_NR_getsockopt:
10815         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10816 #endif
10817 #ifdef TARGET_NR_listen
10818     case TARGET_NR_listen:
10819         return get_errno(listen(arg1, arg2));
10820 #endif
10821 #ifdef TARGET_NR_recv
10822     case TARGET_NR_recv:
10823         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10824 #endif
10825 #ifdef TARGET_NR_recvfrom
10826     case TARGET_NR_recvfrom:
10827         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10828 #endif
10829 #ifdef TARGET_NR_recvmsg
10830     case TARGET_NR_recvmsg:
10831         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10832 #endif
10833 #ifdef TARGET_NR_send
10834     case TARGET_NR_send:
10835         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10836 #endif
10837 #ifdef TARGET_NR_sendmsg
10838     case TARGET_NR_sendmsg:
10839         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10840 #endif
10841 #ifdef TARGET_NR_sendmmsg
10842     case TARGET_NR_sendmmsg:
10843         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10844 #endif
10845 #ifdef TARGET_NR_recvmmsg
10846     case TARGET_NR_recvmmsg:
10847         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10848 #endif
10849 #ifdef TARGET_NR_sendto
10850     case TARGET_NR_sendto:
10851         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10852 #endif
10853 #ifdef TARGET_NR_shutdown
10854     case TARGET_NR_shutdown:
10855         return get_errno(shutdown(arg1, arg2));
10856 #endif
10857 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10858     case TARGET_NR_getrandom:
10859         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10860         if (!p) {
10861             return -TARGET_EFAULT;
10862         }
10863         ret = get_errno(getrandom(p, arg2, arg3));
10864         unlock_user(p, arg1, ret);
10865         return ret;
10866 #endif
10867 #ifdef TARGET_NR_socket
10868     case TARGET_NR_socket:
10869         return do_socket(arg1, arg2, arg3);
10870 #endif
10871 #ifdef TARGET_NR_socketpair
10872     case TARGET_NR_socketpair:
10873         return do_socketpair(arg1, arg2, arg3, arg4);
10874 #endif
10875 #ifdef TARGET_NR_setsockopt
10876     case TARGET_NR_setsockopt:
10877         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10878 #endif
10879 #if defined(TARGET_NR_syslog)
10880     case TARGET_NR_syslog:
10881         {
10882             int len = arg2;
10883 
10884             switch (arg1) {
10885             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10886             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10887             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10888             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10889             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10890             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10891             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10892             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10893                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10894             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10895             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10896             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10897                 {
10898                     if (len < 0) {
10899                         return -TARGET_EINVAL;
10900                     }
10901                     if (len == 0) {
10902                         return 0;
10903                     }
10904                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10905                     if (!p) {
10906                         return -TARGET_EFAULT;
10907                     }
10908                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10909                     unlock_user(p, arg2, arg3);
10910                 }
10911                 return ret;
10912             default:
10913                 return -TARGET_EINVAL;
10914             }
10915         }
10916         break;
10917 #endif
10918     case TARGET_NR_setitimer:
10919         {
10920             struct itimerval value, ovalue, *pvalue;
10921 
10922             if (arg2) {
10923                 pvalue = &value;
10924                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10925                     || copy_from_user_timeval(&pvalue->it_value,
10926                                               arg2 + sizeof(struct target_timeval)))
10927                     return -TARGET_EFAULT;
10928             } else {
10929                 pvalue = NULL;
10930             }
10931             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10932             if (!is_error(ret) && arg3) {
10933                 if (copy_to_user_timeval(arg3,
10934                                          &ovalue.it_interval)
10935                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10936                                             &ovalue.it_value))
10937                     return -TARGET_EFAULT;
10938             }
10939         }
10940         return ret;
10941     case TARGET_NR_getitimer:
10942         {
10943             struct itimerval value;
10944 
10945             ret = get_errno(getitimer(arg1, &value));
10946             if (!is_error(ret) && arg2) {
10947                 if (copy_to_user_timeval(arg2,
10948                                          &value.it_interval)
10949                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10950                                             &value.it_value))
10951                     return -TARGET_EFAULT;
10952             }
10953         }
10954         return ret;
10955 #ifdef TARGET_NR_stat
10956     case TARGET_NR_stat:
10957         if (!(p = lock_user_string(arg1))) {
10958             return -TARGET_EFAULT;
10959         }
10960         ret = get_errno(stat(path(p), &st));
10961         unlock_user(p, arg1, 0);
10962         goto do_stat;
10963 #endif
10964 #ifdef TARGET_NR_lstat
10965     case TARGET_NR_lstat:
10966         if (!(p = lock_user_string(arg1))) {
10967             return -TARGET_EFAULT;
10968         }
10969         ret = get_errno(lstat(path(p), &st));
10970         unlock_user(p, arg1, 0);
10971         goto do_stat;
10972 #endif
10973 #ifdef TARGET_NR_fstat
10974     case TARGET_NR_fstat:
10975         {
10976             ret = get_errno(fstat(arg1, &st));
10977 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10978         do_stat:
10979 #endif
10980             if (!is_error(ret)) {
10981                 struct target_stat *target_st;
10982 
10983                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10984                     return -TARGET_EFAULT;
10985                 memset(target_st, 0, sizeof(*target_st));
10986                 __put_user(st.st_dev, &target_st->st_dev);
10987                 __put_user(st.st_ino, &target_st->st_ino);
10988                 __put_user(st.st_mode, &target_st->st_mode);
10989                 __put_user(st.st_uid, &target_st->st_uid);
10990                 __put_user(st.st_gid, &target_st->st_gid);
10991                 __put_user(st.st_nlink, &target_st->st_nlink);
10992                 __put_user(st.st_rdev, &target_st->st_rdev);
10993                 __put_user(st.st_size, &target_st->st_size);
10994                 __put_user(st.st_blksize, &target_st->st_blksize);
10995                 __put_user(st.st_blocks, &target_st->st_blocks);
10996                 __put_user(st.st_atime, &target_st->target_st_atime);
10997                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10998                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10999 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11000                 __put_user(st.st_atim.tv_nsec,
11001                            &target_st->target_st_atime_nsec);
11002                 __put_user(st.st_mtim.tv_nsec,
11003                            &target_st->target_st_mtime_nsec);
11004                 __put_user(st.st_ctim.tv_nsec,
11005                            &target_st->target_st_ctime_nsec);
11006 #endif
11007                 unlock_user_struct(target_st, arg2, 1);
11008             }
11009         }
11010         return ret;
11011 #endif
11012     case TARGET_NR_vhangup:
11013         return get_errno(vhangup());
11014 #ifdef TARGET_NR_syscall
11015     case TARGET_NR_syscall:
11016         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11017                           arg6, arg7, arg8, 0);
11018 #endif
11019 #if defined(TARGET_NR_wait4)
11020     case TARGET_NR_wait4:
11021         {
11022             int status;
11023             abi_long status_ptr = arg2;
11024             struct rusage rusage, *rusage_ptr;
11025             abi_ulong target_rusage = arg4;
11026             abi_long rusage_err;
11027             if (target_rusage)
11028                 rusage_ptr = &rusage;
11029             else
11030                 rusage_ptr = NULL;
11031             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11032             if (!is_error(ret)) {
11033                 if (status_ptr && ret) {
11034                     status = host_to_target_waitstatus(status);
11035                     if (put_user_s32(status, status_ptr))
11036                         return -TARGET_EFAULT;
11037                 }
11038                 if (target_rusage) {
11039                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11040                     if (rusage_err) {
11041                         ret = rusage_err;
11042                     }
11043                 }
11044             }
11045         }
11046         return ret;
11047 #endif
11048 #ifdef TARGET_NR_swapoff
11049     case TARGET_NR_swapoff:
11050         if (!(p = lock_user_string(arg1)))
11051             return -TARGET_EFAULT;
11052         ret = get_errno(swapoff(p));
11053         unlock_user(p, arg1, 0);
11054         return ret;
11055 #endif
11056     case TARGET_NR_sysinfo:
11057         {
11058             struct target_sysinfo *target_value;
11059             struct sysinfo value;
11060             ret = get_errno(sysinfo(&value));
11061             if (!is_error(ret) && arg1)
11062             {
11063                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11064                     return -TARGET_EFAULT;
11065                 __put_user(value.uptime, &target_value->uptime);
11066                 __put_user(value.loads[0], &target_value->loads[0]);
11067                 __put_user(value.loads[1], &target_value->loads[1]);
11068                 __put_user(value.loads[2], &target_value->loads[2]);
11069                 __put_user(value.totalram, &target_value->totalram);
11070                 __put_user(value.freeram, &target_value->freeram);
11071                 __put_user(value.sharedram, &target_value->sharedram);
11072                 __put_user(value.bufferram, &target_value->bufferram);
11073                 __put_user(value.totalswap, &target_value->totalswap);
11074                 __put_user(value.freeswap, &target_value->freeswap);
11075                 __put_user(value.procs, &target_value->procs);
11076                 __put_user(value.totalhigh, &target_value->totalhigh);
11077                 __put_user(value.freehigh, &target_value->freehigh);
11078                 __put_user(value.mem_unit, &target_value->mem_unit);
11079                 unlock_user_struct(target_value, arg1, 1);
11080             }
11081         }
11082         return ret;
11083 #ifdef TARGET_NR_ipc
11084     case TARGET_NR_ipc:
11085         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11086 #endif
11087 #ifdef TARGET_NR_semget
11088     case TARGET_NR_semget:
11089         return get_errno(semget(arg1, arg2, arg3));
11090 #endif
11091 #ifdef TARGET_NR_semop
11092     case TARGET_NR_semop:
11093         return do_semtimedop(arg1, arg2, arg3, 0, false);
11094 #endif
11095 #ifdef TARGET_NR_semtimedop
11096     case TARGET_NR_semtimedop:
11097         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11098 #endif
11099 #ifdef TARGET_NR_semtimedop_time64
11100     case TARGET_NR_semtimedop_time64:
11101         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11102 #endif
11103 #ifdef TARGET_NR_semctl
11104     case TARGET_NR_semctl:
11105         return do_semctl(arg1, arg2, arg3, arg4);
11106 #endif
11107 #ifdef TARGET_NR_msgctl
11108     case TARGET_NR_msgctl:
11109         return do_msgctl(arg1, arg2, arg3);
11110 #endif
11111 #ifdef TARGET_NR_msgget
11112     case TARGET_NR_msgget:
11113         return get_errno(msgget(arg1, arg2));
11114 #endif
11115 #ifdef TARGET_NR_msgrcv
11116     case TARGET_NR_msgrcv:
11117         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11118 #endif
11119 #ifdef TARGET_NR_msgsnd
11120     case TARGET_NR_msgsnd:
11121         return do_msgsnd(arg1, arg2, arg3, arg4);
11122 #endif
11123 #ifdef TARGET_NR_shmget
11124     case TARGET_NR_shmget:
11125         return get_errno(shmget(arg1, arg2, arg3));
11126 #endif
11127 #ifdef TARGET_NR_shmctl
11128     case TARGET_NR_shmctl:
11129         return do_shmctl(arg1, arg2, arg3);
11130 #endif
11131 #ifdef TARGET_NR_shmat
11132     case TARGET_NR_shmat:
11133         return target_shmat(cpu_env, arg1, arg2, arg3);
11134 #endif
11135 #ifdef TARGET_NR_shmdt
11136     case TARGET_NR_shmdt:
11137         return target_shmdt(arg1);
11138 #endif
11139     case TARGET_NR_fsync:
11140         return get_errno(fsync(arg1));
11141     case TARGET_NR_clone:
11142         /* Linux manages to have three different orderings for its
11143          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11144          * match the kernel's CONFIG_CLONE_* settings.
11145          * Microblaze is further special in that it uses a sixth
11146          * implicit argument to clone for the TLS pointer.
11147          */
11148 #if defined(TARGET_MICROBLAZE)
11149         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11150 #elif defined(TARGET_CLONE_BACKWARDS)
11151         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11152 #elif defined(TARGET_CLONE_BACKWARDS2)
11153         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11154 #else
11155         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11156 #endif
11157         return ret;
11158 #ifdef __NR_exit_group
11159         /* new thread calls */
11160     case TARGET_NR_exit_group:
11161         preexit_cleanup(cpu_env, arg1);
11162         return get_errno(exit_group(arg1));
11163 #endif
11164     case TARGET_NR_setdomainname:
11165         if (!(p = lock_user_string(arg1)))
11166             return -TARGET_EFAULT;
11167         ret = get_errno(setdomainname(p, arg2));
11168         unlock_user(p, arg1, 0);
11169         return ret;
11170     case TARGET_NR_uname:
11171         /* no need to transcode because we use the linux syscall */
11172         {
11173             struct new_utsname * buf;
11174 
11175             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11176                 return -TARGET_EFAULT;
11177             ret = get_errno(sys_uname(buf));
11178             if (!is_error(ret)) {
11179                 /* Overwrite the native machine name with whatever is being
11180                    emulated. */
11181                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11182                           sizeof(buf->machine));
11183                 /* Allow the user to override the reported release.  */
11184                 if (qemu_uname_release && *qemu_uname_release) {
11185                     g_strlcpy(buf->release, qemu_uname_release,
11186                               sizeof(buf->release));
11187                 }
11188             }
11189             unlock_user_struct(buf, arg1, 1);
11190         }
11191         return ret;
11192 #ifdef TARGET_I386
11193     case TARGET_NR_modify_ldt:
11194         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11195 #if !defined(TARGET_X86_64)
11196     case TARGET_NR_vm86:
11197         return do_vm86(cpu_env, arg1, arg2);
11198 #endif
11199 #endif
11200 #if defined(TARGET_NR_adjtimex)
11201     case TARGET_NR_adjtimex:
11202         {
11203             struct timex host_buf;
11204 
11205             if (target_to_host_timex(&host_buf, arg1) != 0) {
11206                 return -TARGET_EFAULT;
11207             }
11208             ret = get_errno(adjtimex(&host_buf));
11209             if (!is_error(ret)) {
11210                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11211                     return -TARGET_EFAULT;
11212                 }
11213             }
11214         }
11215         return ret;
11216 #endif
11217 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11218     case TARGET_NR_clock_adjtime:
11219         {
11220             struct timex htx;
11221 
11222             if (target_to_host_timex(&htx, arg2) != 0) {
11223                 return -TARGET_EFAULT;
11224             }
11225             ret = get_errno(clock_adjtime(arg1, &htx));
11226             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11227                 return -TARGET_EFAULT;
11228             }
11229         }
11230         return ret;
11231 #endif
11232 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11233     case TARGET_NR_clock_adjtime64:
11234         {
11235             struct timex htx;
11236 
11237             if (target_to_host_timex64(&htx, arg2) != 0) {
11238                 return -TARGET_EFAULT;
11239             }
11240             ret = get_errno(clock_adjtime(arg1, &htx));
11241             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11242                     return -TARGET_EFAULT;
11243             }
11244         }
11245         return ret;
11246 #endif
11247     case TARGET_NR_getpgid:
11248         return get_errno(getpgid(arg1));
11249     case TARGET_NR_fchdir:
11250         return get_errno(fchdir(arg1));
11251     case TARGET_NR_personality:
11252         return get_errno(personality(arg1));
11253 #ifdef TARGET_NR__llseek /* Not on alpha */
11254     case TARGET_NR__llseek:
11255         {
11256             int64_t res;
11257 #if !defined(__NR_llseek)
11258             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11259             if (res == -1) {
11260                 ret = get_errno(res);
11261             } else {
11262                 ret = 0;
11263             }
11264 #else
11265             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11266 #endif
11267             if ((ret == 0) && put_user_s64(res, arg4)) {
11268                 return -TARGET_EFAULT;
11269             }
11270         }
11271         return ret;
11272 #endif
11273 #ifdef TARGET_NR_getdents
11274     case TARGET_NR_getdents:
11275         return do_getdents(arg1, arg2, arg3);
11276 #endif /* TARGET_NR_getdents */
11277 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11278     case TARGET_NR_getdents64:
11279         return do_getdents64(arg1, arg2, arg3);
11280 #endif /* TARGET_NR_getdents64 */
11281 #if defined(TARGET_NR__newselect)
11282     case TARGET_NR__newselect:
11283         return do_select(arg1, arg2, arg3, arg4, arg5);
11284 #endif
11285 #ifdef TARGET_NR_poll
11286     case TARGET_NR_poll:
11287         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11288 #endif
11289 #ifdef TARGET_NR_ppoll
11290     case TARGET_NR_ppoll:
11291         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11292 #endif
11293 #ifdef TARGET_NR_ppoll_time64
11294     case TARGET_NR_ppoll_time64:
11295         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11296 #endif
11297     case TARGET_NR_flock:
11298         /* NOTE: the flock constant seems to be the same for every
11299            Linux platform */
11300         return get_errno(safe_flock(arg1, arg2));
11301     case TARGET_NR_readv:
11302         {
11303             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11304             if (vec != NULL) {
11305                 ret = get_errno(safe_readv(arg1, vec, arg3));
11306                 unlock_iovec(vec, arg2, arg3, 1);
11307             } else {
11308                 ret = -host_to_target_errno(errno);
11309             }
11310         }
11311         return ret;
11312     case TARGET_NR_writev:
11313         {
11314             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11315             if (vec != NULL) {
11316                 ret = get_errno(safe_writev(arg1, vec, arg3));
11317                 unlock_iovec(vec, arg2, arg3, 0);
11318             } else {
11319                 ret = -host_to_target_errno(errno);
11320             }
11321         }
11322         return ret;
11323 #if defined(TARGET_NR_preadv)
11324     case TARGET_NR_preadv:
11325         {
11326             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11327             if (vec != NULL) {
11328                 unsigned long low, high;
11329 
11330                 target_to_host_low_high(arg4, arg5, &low, &high);
11331                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11332                 unlock_iovec(vec, arg2, arg3, 1);
11333             } else {
11334                 ret = -host_to_target_errno(errno);
11335            }
11336         }
11337         return ret;
11338 #endif
11339 #if defined(TARGET_NR_pwritev)
11340     case TARGET_NR_pwritev:
11341         {
11342             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11343             if (vec != NULL) {
11344                 unsigned long low, high;
11345 
11346                 target_to_host_low_high(arg4, arg5, &low, &high);
11347                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11348                 unlock_iovec(vec, arg2, arg3, 0);
11349             } else {
11350                 ret = -host_to_target_errno(errno);
11351            }
11352         }
11353         return ret;
11354 #endif
11355     case TARGET_NR_getsid:
11356         return get_errno(getsid(arg1));
11357 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11358     case TARGET_NR_fdatasync:
11359         return get_errno(fdatasync(arg1));
11360 #endif
11361     case TARGET_NR_sched_getaffinity:
11362         {
11363             unsigned int mask_size;
11364             unsigned long *mask;
11365 
11366             /*
11367              * sched_getaffinity needs multiples of ulong, so need to take
11368              * care of mismatches between target ulong and host ulong sizes.
11369              */
11370             if (arg2 & (sizeof(abi_ulong) - 1)) {
11371                 return -TARGET_EINVAL;
11372             }
11373             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11374 
11375             mask = alloca(mask_size);
11376             memset(mask, 0, mask_size);
11377             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11378 
11379             if (!is_error(ret)) {
11380                 if (ret > arg2) {
11381                     /* More data returned than the caller's buffer will fit.
11382                      * This only happens if sizeof(abi_long) < sizeof(long)
11383                      * and the caller passed us a buffer holding an odd number
11384                      * of abi_longs. If the host kernel is actually using the
11385                      * extra 4 bytes then fail EINVAL; otherwise we can just
11386                      * ignore them and only copy the interesting part.
11387                      */
11388                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11389                     if (numcpus > arg2 * 8) {
11390                         return -TARGET_EINVAL;
11391                     }
11392                     ret = arg2;
11393                 }
11394 
11395                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11396                     return -TARGET_EFAULT;
11397                 }
11398             }
11399         }
11400         return ret;
11401     case TARGET_NR_sched_setaffinity:
11402         {
11403             unsigned int mask_size;
11404             unsigned long *mask;
11405 
11406             /*
11407              * sched_setaffinity needs multiples of ulong, so need to take
11408              * care of mismatches between target ulong and host ulong sizes.
11409              */
11410             if (arg2 & (sizeof(abi_ulong) - 1)) {
11411                 return -TARGET_EINVAL;
11412             }
11413             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11414             mask = alloca(mask_size);
11415 
11416             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11417             if (ret) {
11418                 return ret;
11419             }
11420 
11421             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11422         }
11423     case TARGET_NR_getcpu:
11424         {
11425             unsigned cpuid, node;
11426             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11427                                        arg2 ? &node : NULL,
11428                                        NULL));
11429             if (is_error(ret)) {
11430                 return ret;
11431             }
11432             if (arg1 && put_user_u32(cpuid, arg1)) {
11433                 return -TARGET_EFAULT;
11434             }
11435             if (arg2 && put_user_u32(node, arg2)) {
11436                 return -TARGET_EFAULT;
11437             }
11438         }
11439         return ret;
11440     case TARGET_NR_sched_setparam:
11441         {
11442             struct target_sched_param *target_schp;
11443             struct sched_param schp;
11444 
11445             if (arg2 == 0) {
11446                 return -TARGET_EINVAL;
11447             }
11448             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11449                 return -TARGET_EFAULT;
11450             }
11451             schp.sched_priority = tswap32(target_schp->sched_priority);
11452             unlock_user_struct(target_schp, arg2, 0);
11453             return get_errno(sys_sched_setparam(arg1, &schp));
11454         }
11455     case TARGET_NR_sched_getparam:
11456         {
11457             struct target_sched_param *target_schp;
11458             struct sched_param schp;
11459 
11460             if (arg2 == 0) {
11461                 return -TARGET_EINVAL;
11462             }
11463             ret = get_errno(sys_sched_getparam(arg1, &schp));
11464             if (!is_error(ret)) {
11465                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11466                     return -TARGET_EFAULT;
11467                 }
11468                 target_schp->sched_priority = tswap32(schp.sched_priority);
11469                 unlock_user_struct(target_schp, arg2, 1);
11470             }
11471         }
11472         return ret;
11473     case TARGET_NR_sched_setscheduler:
11474         {
11475             struct target_sched_param *target_schp;
11476             struct sched_param schp;
11477             if (arg3 == 0) {
11478                 return -TARGET_EINVAL;
11479             }
11480             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11481                 return -TARGET_EFAULT;
11482             }
11483             schp.sched_priority = tswap32(target_schp->sched_priority);
11484             unlock_user_struct(target_schp, arg3, 0);
11485             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11486         }
11487     case TARGET_NR_sched_getscheduler:
11488         return get_errno(sys_sched_getscheduler(arg1));
11489     case TARGET_NR_sched_getattr:
11490         {
11491             struct target_sched_attr *target_scha;
11492             struct sched_attr scha;
11493             if (arg2 == 0) {
11494                 return -TARGET_EINVAL;
11495             }
11496             if (arg3 > sizeof(scha)) {
11497                 arg3 = sizeof(scha);
11498             }
11499             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11500             if (!is_error(ret)) {
11501                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11502                 if (!target_scha) {
11503                     return -TARGET_EFAULT;
11504                 }
11505                 target_scha->size = tswap32(scha.size);
11506                 target_scha->sched_policy = tswap32(scha.sched_policy);
11507                 target_scha->sched_flags = tswap64(scha.sched_flags);
11508                 target_scha->sched_nice = tswap32(scha.sched_nice);
11509                 target_scha->sched_priority = tswap32(scha.sched_priority);
11510                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11511                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11512                 target_scha->sched_period = tswap64(scha.sched_period);
11513                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11514                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11515                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11516                 }
11517                 unlock_user(target_scha, arg2, arg3);
11518             }
11519             return ret;
11520         }
11521     case TARGET_NR_sched_setattr:
11522         {
11523             struct target_sched_attr *target_scha;
11524             struct sched_attr scha;
11525             uint32_t size;
11526             int zeroed;
11527             if (arg2 == 0) {
11528                 return -TARGET_EINVAL;
11529             }
11530             if (get_user_u32(size, arg2)) {
11531                 return -TARGET_EFAULT;
11532             }
11533             if (!size) {
11534                 size = offsetof(struct target_sched_attr, sched_util_min);
11535             }
11536             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11537                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11538                     return -TARGET_EFAULT;
11539                 }
11540                 return -TARGET_E2BIG;
11541             }
11542 
11543             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11544             if (zeroed < 0) {
11545                 return zeroed;
11546             } else if (zeroed == 0) {
11547                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11548                     return -TARGET_EFAULT;
11549                 }
11550                 return -TARGET_E2BIG;
11551             }
11552             if (size > sizeof(struct target_sched_attr)) {
11553                 size = sizeof(struct target_sched_attr);
11554             }
11555 
11556             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11557             if (!target_scha) {
11558                 return -TARGET_EFAULT;
11559             }
11560             scha.size = size;
11561             scha.sched_policy = tswap32(target_scha->sched_policy);
11562             scha.sched_flags = tswap64(target_scha->sched_flags);
11563             scha.sched_nice = tswap32(target_scha->sched_nice);
11564             scha.sched_priority = tswap32(target_scha->sched_priority);
11565             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11566             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11567             scha.sched_period = tswap64(target_scha->sched_period);
11568             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11569                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11570                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11571             }
11572             unlock_user(target_scha, arg2, 0);
11573             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11574         }
11575     case TARGET_NR_sched_yield:
11576         return get_errno(sched_yield());
11577     case TARGET_NR_sched_get_priority_max:
11578         return get_errno(sched_get_priority_max(arg1));
11579     case TARGET_NR_sched_get_priority_min:
11580         return get_errno(sched_get_priority_min(arg1));
11581 #ifdef TARGET_NR_sched_rr_get_interval
11582     case TARGET_NR_sched_rr_get_interval:
11583         {
11584             struct timespec ts;
11585             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11586             if (!is_error(ret)) {
11587                 ret = host_to_target_timespec(arg2, &ts);
11588             }
11589         }
11590         return ret;
11591 #endif
11592 #ifdef TARGET_NR_sched_rr_get_interval_time64
11593     case TARGET_NR_sched_rr_get_interval_time64:
11594         {
11595             struct timespec ts;
11596             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11597             if (!is_error(ret)) {
11598                 ret = host_to_target_timespec64(arg2, &ts);
11599             }
11600         }
11601         return ret;
11602 #endif
11603 #if defined(TARGET_NR_nanosleep)
11604     case TARGET_NR_nanosleep:
11605         {
11606             struct timespec req, rem;
11607             target_to_host_timespec(&req, arg1);
11608             ret = get_errno(safe_nanosleep(&req, &rem));
11609             if (is_error(ret) && arg2) {
11610                 host_to_target_timespec(arg2, &rem);
11611             }
11612         }
11613         return ret;
11614 #endif
11615     case TARGET_NR_prctl:
11616         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11617         break;
11618 #ifdef TARGET_NR_arch_prctl
11619     case TARGET_NR_arch_prctl:
11620         return do_arch_prctl(cpu_env, arg1, arg2);
11621 #endif
11622 #ifdef TARGET_NR_pread64
11623     case TARGET_NR_pread64:
11624         if (regpairs_aligned(cpu_env, num)) {
11625             arg4 = arg5;
11626             arg5 = arg6;
11627         }
11628         if (arg2 == 0 && arg3 == 0) {
11629             /* Special-case NULL buffer and zero length, which should succeed */
11630             p = 0;
11631         } else {
11632             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11633             if (!p) {
11634                 return -TARGET_EFAULT;
11635             }
11636         }
11637         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11638         unlock_user(p, arg2, ret);
11639         return ret;
11640     case TARGET_NR_pwrite64:
11641         if (regpairs_aligned(cpu_env, num)) {
11642             arg4 = arg5;
11643             arg5 = arg6;
11644         }
11645         if (arg2 == 0 && arg3 == 0) {
11646             /* Special-case NULL buffer and zero length, which should succeed */
11647             p = 0;
11648         } else {
11649             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11650             if (!p) {
11651                 return -TARGET_EFAULT;
11652             }
11653         }
11654         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11655         unlock_user(p, arg2, 0);
11656         return ret;
11657 #endif
11658     case TARGET_NR_getcwd:
11659         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11660             return -TARGET_EFAULT;
11661         ret = get_errno(sys_getcwd1(p, arg2));
11662         unlock_user(p, arg1, ret);
11663         return ret;
11664     case TARGET_NR_capget:
11665     case TARGET_NR_capset:
11666     {
11667         struct target_user_cap_header *target_header;
11668         struct target_user_cap_data *target_data = NULL;
11669         struct __user_cap_header_struct header;
11670         struct __user_cap_data_struct data[2];
11671         struct __user_cap_data_struct *dataptr = NULL;
11672         int i, target_datalen;
11673         int data_items = 1;
11674 
11675         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11676             return -TARGET_EFAULT;
11677         }
11678         header.version = tswap32(target_header->version);
11679         header.pid = tswap32(target_header->pid);
11680 
11681         if (header.version != _LINUX_CAPABILITY_VERSION) {
11682             /* Version 2 and up takes pointer to two user_data structs */
11683             data_items = 2;
11684         }
11685 
11686         target_datalen = sizeof(*target_data) * data_items;
11687 
11688         if (arg2) {
11689             if (num == TARGET_NR_capget) {
11690                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11691             } else {
11692                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11693             }
11694             if (!target_data) {
11695                 unlock_user_struct(target_header, arg1, 0);
11696                 return -TARGET_EFAULT;
11697             }
11698 
11699             if (num == TARGET_NR_capset) {
11700                 for (i = 0; i < data_items; i++) {
11701                     data[i].effective = tswap32(target_data[i].effective);
11702                     data[i].permitted = tswap32(target_data[i].permitted);
11703                     data[i].inheritable = tswap32(target_data[i].inheritable);
11704                 }
11705             }
11706 
11707             dataptr = data;
11708         }
11709 
11710         if (num == TARGET_NR_capget) {
11711             ret = get_errno(capget(&header, dataptr));
11712         } else {
11713             ret = get_errno(capset(&header, dataptr));
11714         }
11715 
11716         /* The kernel always updates version for both capget and capset */
11717         target_header->version = tswap32(header.version);
11718         unlock_user_struct(target_header, arg1, 1);
11719 
11720         if (arg2) {
11721             if (num == TARGET_NR_capget) {
11722                 for (i = 0; i < data_items; i++) {
11723                     target_data[i].effective = tswap32(data[i].effective);
11724                     target_data[i].permitted = tswap32(data[i].permitted);
11725                     target_data[i].inheritable = tswap32(data[i].inheritable);
11726                 }
11727                 unlock_user(target_data, arg2, target_datalen);
11728             } else {
11729                 unlock_user(target_data, arg2, 0);
11730             }
11731         }
11732         return ret;
11733     }
11734     case TARGET_NR_sigaltstack:
11735         return do_sigaltstack(arg1, arg2, cpu_env);
11736 
11737 #ifdef CONFIG_SENDFILE
11738 #ifdef TARGET_NR_sendfile
11739     case TARGET_NR_sendfile:
11740     {
11741         off_t *offp = NULL;
11742         off_t off;
11743         if (arg3) {
11744             ret = get_user_sal(off, arg3);
11745             if (is_error(ret)) {
11746                 return ret;
11747             }
11748             offp = &off;
11749         }
11750         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11751         if (!is_error(ret) && arg3) {
11752             abi_long ret2 = put_user_sal(off, arg3);
11753             if (is_error(ret2)) {
11754                 ret = ret2;
11755             }
11756         }
11757         return ret;
11758     }
11759 #endif
11760 #ifdef TARGET_NR_sendfile64
11761     case TARGET_NR_sendfile64:
11762     {
11763         off_t *offp = NULL;
11764         off_t off;
11765         if (arg3) {
11766             ret = get_user_s64(off, arg3);
11767             if (is_error(ret)) {
11768                 return ret;
11769             }
11770             offp = &off;
11771         }
11772         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11773         if (!is_error(ret) && arg3) {
11774             abi_long ret2 = put_user_s64(off, arg3);
11775             if (is_error(ret2)) {
11776                 ret = ret2;
11777             }
11778         }
11779         return ret;
11780     }
11781 #endif
11782 #endif
11783 #ifdef TARGET_NR_vfork
11784     case TARGET_NR_vfork:
11785         return get_errno(do_fork(cpu_env,
11786                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11787                          0, 0, 0, 0));
11788 #endif
11789 #ifdef TARGET_NR_ugetrlimit
11790     case TARGET_NR_ugetrlimit:
11791     {
11792 	struct rlimit rlim;
11793 	int resource = target_to_host_resource(arg1);
11794 	ret = get_errno(getrlimit(resource, &rlim));
11795 	if (!is_error(ret)) {
11796 	    struct target_rlimit *target_rlim;
11797             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11798                 return -TARGET_EFAULT;
11799 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11800 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11801             unlock_user_struct(target_rlim, arg2, 1);
11802 	}
11803         return ret;
11804     }
11805 #endif
11806 #ifdef TARGET_NR_truncate64
11807     case TARGET_NR_truncate64:
11808         if (!(p = lock_user_string(arg1)))
11809             return -TARGET_EFAULT;
11810 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11811         unlock_user(p, arg1, 0);
11812         return ret;
11813 #endif
11814 #ifdef TARGET_NR_ftruncate64
11815     case TARGET_NR_ftruncate64:
11816         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11817 #endif
11818 #ifdef TARGET_NR_stat64
11819     case TARGET_NR_stat64:
11820         if (!(p = lock_user_string(arg1))) {
11821             return -TARGET_EFAULT;
11822         }
11823         ret = get_errno(stat(path(p), &st));
11824         unlock_user(p, arg1, 0);
11825         if (!is_error(ret))
11826             ret = host_to_target_stat64(cpu_env, arg2, &st);
11827         return ret;
11828 #endif
11829 #ifdef TARGET_NR_lstat64
11830     case TARGET_NR_lstat64:
11831         if (!(p = lock_user_string(arg1))) {
11832             return -TARGET_EFAULT;
11833         }
11834         ret = get_errno(lstat(path(p), &st));
11835         unlock_user(p, arg1, 0);
11836         if (!is_error(ret))
11837             ret = host_to_target_stat64(cpu_env, arg2, &st);
11838         return ret;
11839 #endif
11840 #ifdef TARGET_NR_fstat64
11841     case TARGET_NR_fstat64:
11842         ret = get_errno(fstat(arg1, &st));
11843         if (!is_error(ret))
11844             ret = host_to_target_stat64(cpu_env, arg2, &st);
11845         return ret;
11846 #endif
11847 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11848 #ifdef TARGET_NR_fstatat64
11849     case TARGET_NR_fstatat64:
11850 #endif
11851 #ifdef TARGET_NR_newfstatat
11852     case TARGET_NR_newfstatat:
11853 #endif
11854         if (!(p = lock_user_string(arg2))) {
11855             return -TARGET_EFAULT;
11856         }
11857         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11858         unlock_user(p, arg2, 0);
11859         if (!is_error(ret))
11860             ret = host_to_target_stat64(cpu_env, arg3, &st);
11861         return ret;
11862 #endif
11863 #if defined(TARGET_NR_statx)
11864     case TARGET_NR_statx:
11865         {
11866             struct target_statx *target_stx;
11867             int dirfd = arg1;
11868             int flags = arg3;
11869 
11870             p = lock_user_string(arg2);
11871             if (p == NULL) {
11872                 return -TARGET_EFAULT;
11873             }
11874 #if defined(__NR_statx)
11875             {
11876                 /*
11877                  * It is assumed that struct statx is architecture independent.
11878                  */
11879                 struct target_statx host_stx;
11880                 int mask = arg4;
11881 
11882                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11883                 if (!is_error(ret)) {
11884                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11885                         unlock_user(p, arg2, 0);
11886                         return -TARGET_EFAULT;
11887                     }
11888                 }
11889 
11890                 if (ret != -TARGET_ENOSYS) {
11891                     unlock_user(p, arg2, 0);
11892                     return ret;
11893                 }
11894             }
11895 #endif
11896             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11897             unlock_user(p, arg2, 0);
11898 
11899             if (!is_error(ret)) {
11900                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11901                     return -TARGET_EFAULT;
11902                 }
11903                 memset(target_stx, 0, sizeof(*target_stx));
11904                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11905                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11906                 __put_user(st.st_ino, &target_stx->stx_ino);
11907                 __put_user(st.st_mode, &target_stx->stx_mode);
11908                 __put_user(st.st_uid, &target_stx->stx_uid);
11909                 __put_user(st.st_gid, &target_stx->stx_gid);
11910                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11911                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11912                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11913                 __put_user(st.st_size, &target_stx->stx_size);
11914                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11915                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11916                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11917                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11918                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11919                 unlock_user_struct(target_stx, arg5, 1);
11920             }
11921         }
11922         return ret;
11923 #endif
11924 #ifdef TARGET_NR_lchown
11925     case TARGET_NR_lchown:
11926         if (!(p = lock_user_string(arg1)))
11927             return -TARGET_EFAULT;
11928         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11929         unlock_user(p, arg1, 0);
11930         return ret;
11931 #endif
11932 #ifdef TARGET_NR_getuid
11933     case TARGET_NR_getuid:
11934         return get_errno(high2lowuid(getuid()));
11935 #endif
11936 #ifdef TARGET_NR_getgid
11937     case TARGET_NR_getgid:
11938         return get_errno(high2lowgid(getgid()));
11939 #endif
11940 #ifdef TARGET_NR_geteuid
11941     case TARGET_NR_geteuid:
11942         return get_errno(high2lowuid(geteuid()));
11943 #endif
11944 #ifdef TARGET_NR_getegid
11945     case TARGET_NR_getegid:
11946         return get_errno(high2lowgid(getegid()));
11947 #endif
11948     case TARGET_NR_setreuid:
11949         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11950     case TARGET_NR_setregid:
11951         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11952     case TARGET_NR_getgroups:
11953         { /* the same code as for TARGET_NR_getgroups32 */
11954             int gidsetsize = arg1;
11955             target_id *target_grouplist;
11956             g_autofree gid_t *grouplist = NULL;
11957             int i;
11958 
11959             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11960                 return -TARGET_EINVAL;
11961             }
11962             if (gidsetsize > 0) {
11963                 grouplist = g_try_new(gid_t, gidsetsize);
11964                 if (!grouplist) {
11965                     return -TARGET_ENOMEM;
11966                 }
11967             }
11968             ret = get_errno(getgroups(gidsetsize, grouplist));
11969             if (!is_error(ret) && gidsetsize > 0) {
11970                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11971                                              gidsetsize * sizeof(target_id), 0);
11972                 if (!target_grouplist) {
11973                     return -TARGET_EFAULT;
11974                 }
11975                 for (i = 0; i < ret; i++) {
11976                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11977                 }
11978                 unlock_user(target_grouplist, arg2,
11979                             gidsetsize * sizeof(target_id));
11980             }
11981             return ret;
11982         }
11983     case TARGET_NR_setgroups:
11984         { /* the same code as for TARGET_NR_setgroups32 */
11985             int gidsetsize = arg1;
11986             target_id *target_grouplist;
11987             g_autofree gid_t *grouplist = NULL;
11988             int i;
11989 
11990             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11991                 return -TARGET_EINVAL;
11992             }
11993             if (gidsetsize > 0) {
11994                 grouplist = g_try_new(gid_t, gidsetsize);
11995                 if (!grouplist) {
11996                     return -TARGET_ENOMEM;
11997                 }
11998                 target_grouplist = lock_user(VERIFY_READ, arg2,
11999                                              gidsetsize * sizeof(target_id), 1);
12000                 if (!target_grouplist) {
12001                     return -TARGET_EFAULT;
12002                 }
12003                 for (i = 0; i < gidsetsize; i++) {
12004                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12005                 }
12006                 unlock_user(target_grouplist, arg2,
12007                             gidsetsize * sizeof(target_id));
12008             }
12009             return get_errno(sys_setgroups(gidsetsize, grouplist));
12010         }
12011     case TARGET_NR_fchown:
12012         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12013 #if defined(TARGET_NR_fchownat)
12014     case TARGET_NR_fchownat:
12015         if (!(p = lock_user_string(arg2)))
12016             return -TARGET_EFAULT;
12017         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12018                                  low2highgid(arg4), arg5));
12019         unlock_user(p, arg2, 0);
12020         return ret;
12021 #endif
12022 #ifdef TARGET_NR_setresuid
12023     case TARGET_NR_setresuid:
12024         return get_errno(sys_setresuid(low2highuid(arg1),
12025                                        low2highuid(arg2),
12026                                        low2highuid(arg3)));
12027 #endif
12028 #ifdef TARGET_NR_getresuid
12029     case TARGET_NR_getresuid:
12030         {
12031             uid_t ruid, euid, suid;
12032             ret = get_errno(getresuid(&ruid, &euid, &suid));
12033             if (!is_error(ret)) {
12034                 if (put_user_id(high2lowuid(ruid), arg1)
12035                     || put_user_id(high2lowuid(euid), arg2)
12036                     || put_user_id(high2lowuid(suid), arg3))
12037                     return -TARGET_EFAULT;
12038             }
12039         }
12040         return ret;
12041 #endif
12042 #ifdef TARGET_NR_getresgid
12043     case TARGET_NR_setresgid:
12044         return get_errno(sys_setresgid(low2highgid(arg1),
12045                                        low2highgid(arg2),
12046                                        low2highgid(arg3)));
12047 #endif
12048 #ifdef TARGET_NR_getresgid
12049     case TARGET_NR_getresgid:
12050         {
12051             gid_t rgid, egid, sgid;
12052             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12053             if (!is_error(ret)) {
12054                 if (put_user_id(high2lowgid(rgid), arg1)
12055                     || put_user_id(high2lowgid(egid), arg2)
12056                     || put_user_id(high2lowgid(sgid), arg3))
12057                     return -TARGET_EFAULT;
12058             }
12059         }
12060         return ret;
12061 #endif
12062 #ifdef TARGET_NR_chown
12063     case TARGET_NR_chown:
12064         if (!(p = lock_user_string(arg1)))
12065             return -TARGET_EFAULT;
12066         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12067         unlock_user(p, arg1, 0);
12068         return ret;
12069 #endif
12070     case TARGET_NR_setuid:
12071         return get_errno(sys_setuid(low2highuid(arg1)));
12072     case TARGET_NR_setgid:
12073         return get_errno(sys_setgid(low2highgid(arg1)));
12074     case TARGET_NR_setfsuid:
12075         return get_errno(setfsuid(arg1));
12076     case TARGET_NR_setfsgid:
12077         return get_errno(setfsgid(arg1));
12078 
12079 #ifdef TARGET_NR_lchown32
12080     case TARGET_NR_lchown32:
12081         if (!(p = lock_user_string(arg1)))
12082             return -TARGET_EFAULT;
12083         ret = get_errno(lchown(p, arg2, arg3));
12084         unlock_user(p, arg1, 0);
12085         return ret;
12086 #endif
12087 #ifdef TARGET_NR_getuid32
12088     case TARGET_NR_getuid32:
12089         return get_errno(getuid());
12090 #endif
12091 
12092 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12093    /* Alpha specific */
12094     case TARGET_NR_getxuid:
12095          {
12096             uid_t euid;
12097             euid=geteuid();
12098             cpu_env->ir[IR_A4]=euid;
12099          }
12100         return get_errno(getuid());
12101 #endif
12102 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12103    /* Alpha specific */
12104     case TARGET_NR_getxgid:
12105          {
12106             uid_t egid;
12107             egid=getegid();
12108             cpu_env->ir[IR_A4]=egid;
12109          }
12110         return get_errno(getgid());
12111 #endif
12112 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12113     /* Alpha specific */
12114     case TARGET_NR_osf_getsysinfo:
12115         ret = -TARGET_EOPNOTSUPP;
12116         switch (arg1) {
12117           case TARGET_GSI_IEEE_FP_CONTROL:
12118             {
12119                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12120                 uint64_t swcr = cpu_env->swcr;
12121 
12122                 swcr &= ~SWCR_STATUS_MASK;
12123                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12124 
12125                 if (put_user_u64 (swcr, arg2))
12126                         return -TARGET_EFAULT;
12127                 ret = 0;
12128             }
12129             break;
12130 
12131           /* case GSI_IEEE_STATE_AT_SIGNAL:
12132              -- Not implemented in linux kernel.
12133              case GSI_UACPROC:
12134              -- Retrieves current unaligned access state; not much used.
12135              case GSI_PROC_TYPE:
12136              -- Retrieves implver information; surely not used.
12137              case GSI_GET_HWRPB:
12138              -- Grabs a copy of the HWRPB; surely not used.
12139           */
12140         }
12141         return ret;
12142 #endif
12143 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12144     /* Alpha specific */
12145     case TARGET_NR_osf_setsysinfo:
12146         ret = -TARGET_EOPNOTSUPP;
12147         switch (arg1) {
12148           case TARGET_SSI_IEEE_FP_CONTROL:
12149             {
12150                 uint64_t swcr, fpcr;
12151 
12152                 if (get_user_u64 (swcr, arg2)) {
12153                     return -TARGET_EFAULT;
12154                 }
12155 
12156                 /*
12157                  * The kernel calls swcr_update_status to update the
12158                  * status bits from the fpcr at every point that it
12159                  * could be queried.  Therefore, we store the status
12160                  * bits only in FPCR.
12161                  */
12162                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12163 
12164                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12165                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12166                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12167                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12168                 ret = 0;
12169             }
12170             break;
12171 
12172           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12173             {
12174                 uint64_t exc, fpcr, fex;
12175 
12176                 if (get_user_u64(exc, arg2)) {
12177                     return -TARGET_EFAULT;
12178                 }
12179                 exc &= SWCR_STATUS_MASK;
12180                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12181 
12182                 /* Old exceptions are not signaled.  */
12183                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12184                 fex = exc & ~fex;
12185                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12186                 fex &= (cpu_env)->swcr;
12187 
12188                 /* Update the hardware fpcr.  */
12189                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12190                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12191 
12192                 if (fex) {
12193                     int si_code = TARGET_FPE_FLTUNK;
12194                     target_siginfo_t info;
12195 
12196                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12197                         si_code = TARGET_FPE_FLTUND;
12198                     }
12199                     if (fex & SWCR_TRAP_ENABLE_INE) {
12200                         si_code = TARGET_FPE_FLTRES;
12201                     }
12202                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12203                         si_code = TARGET_FPE_FLTUND;
12204                     }
12205                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12206                         si_code = TARGET_FPE_FLTOVF;
12207                     }
12208                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12209                         si_code = TARGET_FPE_FLTDIV;
12210                     }
12211                     if (fex & SWCR_TRAP_ENABLE_INV) {
12212                         si_code = TARGET_FPE_FLTINV;
12213                     }
12214 
12215                     info.si_signo = SIGFPE;
12216                     info.si_errno = 0;
12217                     info.si_code = si_code;
12218                     info._sifields._sigfault._addr = (cpu_env)->pc;
12219                     queue_signal(cpu_env, info.si_signo,
12220                                  QEMU_SI_FAULT, &info);
12221                 }
12222                 ret = 0;
12223             }
12224             break;
12225 
12226           /* case SSI_NVPAIRS:
12227              -- Used with SSIN_UACPROC to enable unaligned accesses.
12228              case SSI_IEEE_STATE_AT_SIGNAL:
12229              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12230              -- Not implemented in linux kernel
12231           */
12232         }
12233         return ret;
12234 #endif
12235 #ifdef TARGET_NR_osf_sigprocmask
12236     /* Alpha specific.  */
12237     case TARGET_NR_osf_sigprocmask:
12238         {
12239             abi_ulong mask;
12240             int how;
12241             sigset_t set, oldset;
12242 
12243             switch(arg1) {
12244             case TARGET_SIG_BLOCK:
12245                 how = SIG_BLOCK;
12246                 break;
12247             case TARGET_SIG_UNBLOCK:
12248                 how = SIG_UNBLOCK;
12249                 break;
12250             case TARGET_SIG_SETMASK:
12251                 how = SIG_SETMASK;
12252                 break;
12253             default:
12254                 return -TARGET_EINVAL;
12255             }
12256             mask = arg2;
12257             target_to_host_old_sigset(&set, &mask);
12258             ret = do_sigprocmask(how, &set, &oldset);
12259             if (!ret) {
12260                 host_to_target_old_sigset(&mask, &oldset);
12261                 ret = mask;
12262             }
12263         }
12264         return ret;
12265 #endif
12266 
12267 #ifdef TARGET_NR_getgid32
12268     case TARGET_NR_getgid32:
12269         return get_errno(getgid());
12270 #endif
12271 #ifdef TARGET_NR_geteuid32
12272     case TARGET_NR_geteuid32:
12273         return get_errno(geteuid());
12274 #endif
12275 #ifdef TARGET_NR_getegid32
12276     case TARGET_NR_getegid32:
12277         return get_errno(getegid());
12278 #endif
12279 #ifdef TARGET_NR_setreuid32
12280     case TARGET_NR_setreuid32:
12281         return get_errno(sys_setreuid(arg1, arg2));
12282 #endif
12283 #ifdef TARGET_NR_setregid32
12284     case TARGET_NR_setregid32:
12285         return get_errno(sys_setregid(arg1, arg2));
12286 #endif
12287 #ifdef TARGET_NR_getgroups32
12288     case TARGET_NR_getgroups32:
12289         { /* the same code as for TARGET_NR_getgroups */
12290             int gidsetsize = arg1;
12291             uint32_t *target_grouplist;
12292             g_autofree gid_t *grouplist = NULL;
12293             int i;
12294 
12295             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12296                 return -TARGET_EINVAL;
12297             }
12298             if (gidsetsize > 0) {
12299                 grouplist = g_try_new(gid_t, gidsetsize);
12300                 if (!grouplist) {
12301                     return -TARGET_ENOMEM;
12302                 }
12303             }
12304             ret = get_errno(getgroups(gidsetsize, grouplist));
12305             if (!is_error(ret) && gidsetsize > 0) {
12306                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12307                                              gidsetsize * 4, 0);
12308                 if (!target_grouplist) {
12309                     return -TARGET_EFAULT;
12310                 }
12311                 for (i = 0; i < ret; i++) {
12312                     target_grouplist[i] = tswap32(grouplist[i]);
12313                 }
12314                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12315             }
12316             return ret;
12317         }
12318 #endif
12319 #ifdef TARGET_NR_setgroups32
12320     case TARGET_NR_setgroups32:
12321         { /* the same code as for TARGET_NR_setgroups */
12322             int gidsetsize = arg1;
12323             uint32_t *target_grouplist;
12324             g_autofree gid_t *grouplist = NULL;
12325             int i;
12326 
12327             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12328                 return -TARGET_EINVAL;
12329             }
12330             if (gidsetsize > 0) {
12331                 grouplist = g_try_new(gid_t, gidsetsize);
12332                 if (!grouplist) {
12333                     return -TARGET_ENOMEM;
12334                 }
12335                 target_grouplist = lock_user(VERIFY_READ, arg2,
12336                                              gidsetsize * 4, 1);
12337                 if (!target_grouplist) {
12338                     return -TARGET_EFAULT;
12339                 }
12340                 for (i = 0; i < gidsetsize; i++) {
12341                     grouplist[i] = tswap32(target_grouplist[i]);
12342                 }
12343                 unlock_user(target_grouplist, arg2, 0);
12344             }
12345             return get_errno(sys_setgroups(gidsetsize, grouplist));
12346         }
12347 #endif
12348 #ifdef TARGET_NR_fchown32
12349     case TARGET_NR_fchown32:
12350         return get_errno(fchown(arg1, arg2, arg3));
12351 #endif
12352 #ifdef TARGET_NR_setresuid32
12353     case TARGET_NR_setresuid32:
12354         return get_errno(sys_setresuid(arg1, arg2, arg3));
12355 #endif
12356 #ifdef TARGET_NR_getresuid32
12357     case TARGET_NR_getresuid32:
12358         {
12359             uid_t ruid, euid, suid;
12360             ret = get_errno(getresuid(&ruid, &euid, &suid));
12361             if (!is_error(ret)) {
12362                 if (put_user_u32(ruid, arg1)
12363                     || put_user_u32(euid, arg2)
12364                     || put_user_u32(suid, arg3))
12365                     return -TARGET_EFAULT;
12366             }
12367         }
12368         return ret;
12369 #endif
12370 #ifdef TARGET_NR_setresgid32
12371     case TARGET_NR_setresgid32:
12372         return get_errno(sys_setresgid(arg1, arg2, arg3));
12373 #endif
12374 #ifdef TARGET_NR_getresgid32
12375     case TARGET_NR_getresgid32:
12376         {
12377             gid_t rgid, egid, sgid;
12378             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12379             if (!is_error(ret)) {
12380                 if (put_user_u32(rgid, arg1)
12381                     || put_user_u32(egid, arg2)
12382                     || put_user_u32(sgid, arg3))
12383                     return -TARGET_EFAULT;
12384             }
12385         }
12386         return ret;
12387 #endif
12388 #ifdef TARGET_NR_chown32
12389     case TARGET_NR_chown32:
12390         if (!(p = lock_user_string(arg1)))
12391             return -TARGET_EFAULT;
12392         ret = get_errno(chown(p, arg2, arg3));
12393         unlock_user(p, arg1, 0);
12394         return ret;
12395 #endif
12396 #ifdef TARGET_NR_setuid32
12397     case TARGET_NR_setuid32:
12398         return get_errno(sys_setuid(arg1));
12399 #endif
12400 #ifdef TARGET_NR_setgid32
12401     case TARGET_NR_setgid32:
12402         return get_errno(sys_setgid(arg1));
12403 #endif
12404 #ifdef TARGET_NR_setfsuid32
12405     case TARGET_NR_setfsuid32:
12406         return get_errno(setfsuid(arg1));
12407 #endif
12408 #ifdef TARGET_NR_setfsgid32
12409     case TARGET_NR_setfsgid32:
12410         return get_errno(setfsgid(arg1));
12411 #endif
12412 #ifdef TARGET_NR_mincore
12413     case TARGET_NR_mincore:
12414         {
12415             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12416             if (!a) {
12417                 return -TARGET_ENOMEM;
12418             }
12419             p = lock_user_string(arg3);
12420             if (!p) {
12421                 ret = -TARGET_EFAULT;
12422             } else {
12423                 ret = get_errno(mincore(a, arg2, p));
12424                 unlock_user(p, arg3, ret);
12425             }
12426             unlock_user(a, arg1, 0);
12427         }
12428         return ret;
12429 #endif
12430 #ifdef TARGET_NR_arm_fadvise64_64
12431     case TARGET_NR_arm_fadvise64_64:
12432         /* arm_fadvise64_64 looks like fadvise64_64 but
12433          * with different argument order: fd, advice, offset, len
12434          * rather than the usual fd, offset, len, advice.
12435          * Note that offset and len are both 64-bit so appear as
12436          * pairs of 32-bit registers.
12437          */
12438         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12439                             target_offset64(arg5, arg6), arg2);
12440         return -host_to_target_errno(ret);
12441 #endif
12442 
12443 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12444 
12445 #ifdef TARGET_NR_fadvise64_64
12446     case TARGET_NR_fadvise64_64:
12447 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12448         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12449         ret = arg2;
12450         arg2 = arg3;
12451         arg3 = arg4;
12452         arg4 = arg5;
12453         arg5 = arg6;
12454         arg6 = ret;
12455 #else
12456         /* 6 args: fd, offset (high, low), len (high, low), advice */
12457         if (regpairs_aligned(cpu_env, num)) {
12458             /* offset is in (3,4), len in (5,6) and advice in 7 */
12459             arg2 = arg3;
12460             arg3 = arg4;
12461             arg4 = arg5;
12462             arg5 = arg6;
12463             arg6 = arg7;
12464         }
12465 #endif
12466         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12467                             target_offset64(arg4, arg5), arg6);
12468         return -host_to_target_errno(ret);
12469 #endif
12470 
12471 #ifdef TARGET_NR_fadvise64
12472     case TARGET_NR_fadvise64:
12473         /* 5 args: fd, offset (high, low), len, advice */
12474         if (regpairs_aligned(cpu_env, num)) {
12475             /* offset is in (3,4), len in 5 and advice in 6 */
12476             arg2 = arg3;
12477             arg3 = arg4;
12478             arg4 = arg5;
12479             arg5 = arg6;
12480         }
12481         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12482         return -host_to_target_errno(ret);
12483 #endif
12484 
12485 #else /* not a 32-bit ABI */
12486 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12487 #ifdef TARGET_NR_fadvise64_64
12488     case TARGET_NR_fadvise64_64:
12489 #endif
12490 #ifdef TARGET_NR_fadvise64
12491     case TARGET_NR_fadvise64:
12492 #endif
12493 #ifdef TARGET_S390X
12494         switch (arg4) {
12495         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12496         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12497         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12498         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12499         default: break;
12500         }
12501 #endif
12502         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12503 #endif
12504 #endif /* end of 64-bit ABI fadvise handling */
12505 
12506 #ifdef TARGET_NR_madvise
12507     case TARGET_NR_madvise:
12508         return target_madvise(arg1, arg2, arg3);
12509 #endif
12510 #ifdef TARGET_NR_fcntl64
12511     case TARGET_NR_fcntl64:
12512     {
12513         int cmd;
12514         struct flock fl;
12515         from_flock64_fn *copyfrom = copy_from_user_flock64;
12516         to_flock64_fn *copyto = copy_to_user_flock64;
12517 
12518 #ifdef TARGET_ARM
12519         if (!cpu_env->eabi) {
12520             copyfrom = copy_from_user_oabi_flock64;
12521             copyto = copy_to_user_oabi_flock64;
12522         }
12523 #endif
12524 
12525         cmd = target_to_host_fcntl_cmd(arg2);
12526         if (cmd == -TARGET_EINVAL) {
12527             return cmd;
12528         }
12529 
12530         switch(arg2) {
12531         case TARGET_F_GETLK64:
12532             ret = copyfrom(&fl, arg3);
12533             if (ret) {
12534                 break;
12535             }
12536             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12537             if (ret == 0) {
12538                 ret = copyto(arg3, &fl);
12539             }
12540 	    break;
12541 
12542         case TARGET_F_SETLK64:
12543         case TARGET_F_SETLKW64:
12544             ret = copyfrom(&fl, arg3);
12545             if (ret) {
12546                 break;
12547             }
12548             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12549 	    break;
12550         default:
12551             ret = do_fcntl(arg1, arg2, arg3);
12552             break;
12553         }
12554         return ret;
12555     }
12556 #endif
12557 #ifdef TARGET_NR_cacheflush
12558     case TARGET_NR_cacheflush:
12559         /* self-modifying code is handled automatically, so nothing needed */
12560         return 0;
12561 #endif
12562 #ifdef TARGET_NR_getpagesize
12563     case TARGET_NR_getpagesize:
12564         return TARGET_PAGE_SIZE;
12565 #endif
12566     case TARGET_NR_gettid:
12567         return get_errno(sys_gettid());
12568 #ifdef TARGET_NR_readahead
12569     case TARGET_NR_readahead:
12570 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12571         if (regpairs_aligned(cpu_env, num)) {
12572             arg2 = arg3;
12573             arg3 = arg4;
12574             arg4 = arg5;
12575         }
12576         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12577 #else
12578         ret = get_errno(readahead(arg1, arg2, arg3));
12579 #endif
12580         return ret;
12581 #endif
12582 #ifdef CONFIG_ATTR
12583 #ifdef TARGET_NR_setxattr
12584     case TARGET_NR_listxattr:
12585     case TARGET_NR_llistxattr:
12586     {
12587         void *b = 0;
12588         if (arg2) {
12589             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12590             if (!b) {
12591                 return -TARGET_EFAULT;
12592             }
12593         }
12594         p = lock_user_string(arg1);
12595         if (p) {
12596             if (num == TARGET_NR_listxattr) {
12597                 ret = get_errno(listxattr(p, b, arg3));
12598             } else {
12599                 ret = get_errno(llistxattr(p, b, arg3));
12600             }
12601         } else {
12602             ret = -TARGET_EFAULT;
12603         }
12604         unlock_user(p, arg1, 0);
12605         unlock_user(b, arg2, arg3);
12606         return ret;
12607     }
12608     case TARGET_NR_flistxattr:
12609     {
12610         void *b = 0;
12611         if (arg2) {
12612             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12613             if (!b) {
12614                 return -TARGET_EFAULT;
12615             }
12616         }
12617         ret = get_errno(flistxattr(arg1, b, arg3));
12618         unlock_user(b, arg2, arg3);
12619         return ret;
12620     }
12621     case TARGET_NR_setxattr:
12622     case TARGET_NR_lsetxattr:
12623         {
12624             void *n, *v = 0;
12625             if (arg3) {
12626                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12627                 if (!v) {
12628                     return -TARGET_EFAULT;
12629                 }
12630             }
12631             p = lock_user_string(arg1);
12632             n = lock_user_string(arg2);
12633             if (p && n) {
12634                 if (num == TARGET_NR_setxattr) {
12635                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12636                 } else {
12637                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12638                 }
12639             } else {
12640                 ret = -TARGET_EFAULT;
12641             }
12642             unlock_user(p, arg1, 0);
12643             unlock_user(n, arg2, 0);
12644             unlock_user(v, arg3, 0);
12645         }
12646         return ret;
12647     case TARGET_NR_fsetxattr:
12648         {
12649             void *n, *v = 0;
12650             if (arg3) {
12651                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12652                 if (!v) {
12653                     return -TARGET_EFAULT;
12654                 }
12655             }
12656             n = lock_user_string(arg2);
12657             if (n) {
12658                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12659             } else {
12660                 ret = -TARGET_EFAULT;
12661             }
12662             unlock_user(n, arg2, 0);
12663             unlock_user(v, arg3, 0);
12664         }
12665         return ret;
12666     case TARGET_NR_getxattr:
12667     case TARGET_NR_lgetxattr:
12668         {
12669             void *n, *v = 0;
12670             if (arg3) {
12671                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12672                 if (!v) {
12673                     return -TARGET_EFAULT;
12674                 }
12675             }
12676             p = lock_user_string(arg1);
12677             n = lock_user_string(arg2);
12678             if (p && n) {
12679                 if (num == TARGET_NR_getxattr) {
12680                     ret = get_errno(getxattr(p, n, v, arg4));
12681                 } else {
12682                     ret = get_errno(lgetxattr(p, n, v, arg4));
12683                 }
12684             } else {
12685                 ret = -TARGET_EFAULT;
12686             }
12687             unlock_user(p, arg1, 0);
12688             unlock_user(n, arg2, 0);
12689             unlock_user(v, arg3, arg4);
12690         }
12691         return ret;
12692     case TARGET_NR_fgetxattr:
12693         {
12694             void *n, *v = 0;
12695             if (arg3) {
12696                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12697                 if (!v) {
12698                     return -TARGET_EFAULT;
12699                 }
12700             }
12701             n = lock_user_string(arg2);
12702             if (n) {
12703                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12704             } else {
12705                 ret = -TARGET_EFAULT;
12706             }
12707             unlock_user(n, arg2, 0);
12708             unlock_user(v, arg3, arg4);
12709         }
12710         return ret;
12711     case TARGET_NR_removexattr:
12712     case TARGET_NR_lremovexattr:
12713         {
12714             void *n;
12715             p = lock_user_string(arg1);
12716             n = lock_user_string(arg2);
12717             if (p && n) {
12718                 if (num == TARGET_NR_removexattr) {
12719                     ret = get_errno(removexattr(p, n));
12720                 } else {
12721                     ret = get_errno(lremovexattr(p, n));
12722                 }
12723             } else {
12724                 ret = -TARGET_EFAULT;
12725             }
12726             unlock_user(p, arg1, 0);
12727             unlock_user(n, arg2, 0);
12728         }
12729         return ret;
12730     case TARGET_NR_fremovexattr:
12731         {
12732             void *n;
12733             n = lock_user_string(arg2);
12734             if (n) {
12735                 ret = get_errno(fremovexattr(arg1, n));
12736             } else {
12737                 ret = -TARGET_EFAULT;
12738             }
12739             unlock_user(n, arg2, 0);
12740         }
12741         return ret;
12742 #endif
12743 #endif /* CONFIG_ATTR */
12744 #ifdef TARGET_NR_set_thread_area
12745     case TARGET_NR_set_thread_area:
12746 #if defined(TARGET_MIPS)
12747       cpu_env->active_tc.CP0_UserLocal = arg1;
12748       return 0;
12749 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12750       return do_set_thread_area(cpu_env, arg1);
12751 #elif defined(TARGET_M68K)
12752       {
12753           TaskState *ts = get_task_state(cpu);
12754           ts->tp_value = arg1;
12755           return 0;
12756       }
12757 #else
12758       return -TARGET_ENOSYS;
12759 #endif
12760 #endif
12761 #ifdef TARGET_NR_get_thread_area
12762     case TARGET_NR_get_thread_area:
12763 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12764         return do_get_thread_area(cpu_env, arg1);
12765 #elif defined(TARGET_M68K)
12766         {
12767             TaskState *ts = get_task_state(cpu);
12768             return ts->tp_value;
12769         }
12770 #else
12771         return -TARGET_ENOSYS;
12772 #endif
12773 #endif
12774 #ifdef TARGET_NR_getdomainname
12775     case TARGET_NR_getdomainname:
12776         return -TARGET_ENOSYS;
12777 #endif
12778 
12779 #ifdef TARGET_NR_clock_settime
12780     case TARGET_NR_clock_settime:
12781     {
12782         struct timespec ts;
12783 
12784         ret = target_to_host_timespec(&ts, arg2);
12785         if (!is_error(ret)) {
12786             ret = get_errno(clock_settime(arg1, &ts));
12787         }
12788         return ret;
12789     }
12790 #endif
12791 #ifdef TARGET_NR_clock_settime64
12792     case TARGET_NR_clock_settime64:
12793     {
12794         struct timespec ts;
12795 
12796         ret = target_to_host_timespec64(&ts, arg2);
12797         if (!is_error(ret)) {
12798             ret = get_errno(clock_settime(arg1, &ts));
12799         }
12800         return ret;
12801     }
12802 #endif
12803 #ifdef TARGET_NR_clock_gettime
12804     case TARGET_NR_clock_gettime:
12805     {
12806         struct timespec ts;
12807         ret = get_errno(clock_gettime(arg1, &ts));
12808         if (!is_error(ret)) {
12809             ret = host_to_target_timespec(arg2, &ts);
12810         }
12811         return ret;
12812     }
12813 #endif
12814 #ifdef TARGET_NR_clock_gettime64
12815     case TARGET_NR_clock_gettime64:
12816     {
12817         struct timespec ts;
12818         ret = get_errno(clock_gettime(arg1, &ts));
12819         if (!is_error(ret)) {
12820             ret = host_to_target_timespec64(arg2, &ts);
12821         }
12822         return ret;
12823     }
12824 #endif
12825 #ifdef TARGET_NR_clock_getres
12826     case TARGET_NR_clock_getres:
12827     {
12828         struct timespec ts;
12829         ret = get_errno(clock_getres(arg1, &ts));
12830         if (!is_error(ret)) {
12831             host_to_target_timespec(arg2, &ts);
12832         }
12833         return ret;
12834     }
12835 #endif
12836 #ifdef TARGET_NR_clock_getres_time64
12837     case TARGET_NR_clock_getres_time64:
12838     {
12839         struct timespec ts;
12840         ret = get_errno(clock_getres(arg1, &ts));
12841         if (!is_error(ret)) {
12842             host_to_target_timespec64(arg2, &ts);
12843         }
12844         return ret;
12845     }
12846 #endif
12847 #ifdef TARGET_NR_clock_nanosleep
12848     case TARGET_NR_clock_nanosleep:
12849     {
12850         struct timespec ts;
12851         if (target_to_host_timespec(&ts, arg3)) {
12852             return -TARGET_EFAULT;
12853         }
12854         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12855                                              &ts, arg4 ? &ts : NULL));
12856         /*
12857          * if the call is interrupted by a signal handler, it fails
12858          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12859          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12860          */
12861         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12862             host_to_target_timespec(arg4, &ts)) {
12863               return -TARGET_EFAULT;
12864         }
12865 
12866         return ret;
12867     }
12868 #endif
12869 #ifdef TARGET_NR_clock_nanosleep_time64
12870     case TARGET_NR_clock_nanosleep_time64:
12871     {
12872         struct timespec ts;
12873 
12874         if (target_to_host_timespec64(&ts, arg3)) {
12875             return -TARGET_EFAULT;
12876         }
12877 
12878         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12879                                              &ts, arg4 ? &ts : NULL));
12880 
12881         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12882             host_to_target_timespec64(arg4, &ts)) {
12883             return -TARGET_EFAULT;
12884         }
12885         return ret;
12886     }
12887 #endif
12888 
12889 #if defined(TARGET_NR_set_tid_address)
12890     case TARGET_NR_set_tid_address:
12891     {
12892         TaskState *ts = get_task_state(cpu);
12893         ts->child_tidptr = arg1;
12894         /* do not call host set_tid_address() syscall, instead return tid() */
12895         return get_errno(sys_gettid());
12896     }
12897 #endif
12898 
12899     case TARGET_NR_tkill:
12900         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12901 
12902     case TARGET_NR_tgkill:
12903         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12904                          target_to_host_signal(arg3)));
12905 
12906 #ifdef TARGET_NR_set_robust_list
12907     case TARGET_NR_set_robust_list:
12908     case TARGET_NR_get_robust_list:
12909         /* The ABI for supporting robust futexes has userspace pass
12910          * the kernel a pointer to a linked list which is updated by
12911          * userspace after the syscall; the list is walked by the kernel
12912          * when the thread exits. Since the linked list in QEMU guest
12913          * memory isn't a valid linked list for the host and we have
12914          * no way to reliably intercept the thread-death event, we can't
12915          * support these. Silently return ENOSYS so that guest userspace
12916          * falls back to a non-robust futex implementation (which should
12917          * be OK except in the corner case of the guest crashing while
12918          * holding a mutex that is shared with another process via
12919          * shared memory).
12920          */
12921         return -TARGET_ENOSYS;
12922 #endif
12923 
12924 #if defined(TARGET_NR_utimensat)
12925     case TARGET_NR_utimensat:
12926         {
12927             struct timespec *tsp, ts[2];
12928             if (!arg3) {
12929                 tsp = NULL;
12930             } else {
12931                 if (target_to_host_timespec(ts, arg3)) {
12932                     return -TARGET_EFAULT;
12933                 }
12934                 if (target_to_host_timespec(ts + 1, arg3 +
12935                                             sizeof(struct target_timespec))) {
12936                     return -TARGET_EFAULT;
12937                 }
12938                 tsp = ts;
12939             }
12940             if (!arg2)
12941                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12942             else {
12943                 if (!(p = lock_user_string(arg2))) {
12944                     return -TARGET_EFAULT;
12945                 }
12946                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12947                 unlock_user(p, arg2, 0);
12948             }
12949         }
12950         return ret;
12951 #endif
12952 #ifdef TARGET_NR_utimensat_time64
12953     case TARGET_NR_utimensat_time64:
12954         {
12955             struct timespec *tsp, ts[2];
12956             if (!arg3) {
12957                 tsp = NULL;
12958             } else {
12959                 if (target_to_host_timespec64(ts, arg3)) {
12960                     return -TARGET_EFAULT;
12961                 }
12962                 if (target_to_host_timespec64(ts + 1, arg3 +
12963                                      sizeof(struct target__kernel_timespec))) {
12964                     return -TARGET_EFAULT;
12965                 }
12966                 tsp = ts;
12967             }
12968             if (!arg2)
12969                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12970             else {
12971                 p = lock_user_string(arg2);
12972                 if (!p) {
12973                     return -TARGET_EFAULT;
12974                 }
12975                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12976                 unlock_user(p, arg2, 0);
12977             }
12978         }
12979         return ret;
12980 #endif
12981 #ifdef TARGET_NR_futex
12982     case TARGET_NR_futex:
12983         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12984 #endif
12985 #ifdef TARGET_NR_futex_time64
12986     case TARGET_NR_futex_time64:
12987         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12988 #endif
12989 #ifdef CONFIG_INOTIFY
12990 #if defined(TARGET_NR_inotify_init)
12991     case TARGET_NR_inotify_init:
12992         ret = get_errno(inotify_init());
12993         if (ret >= 0) {
12994             fd_trans_register(ret, &target_inotify_trans);
12995         }
12996         return ret;
12997 #endif
12998 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12999     case TARGET_NR_inotify_init1:
13000         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13001                                           fcntl_flags_tbl)));
13002         if (ret >= 0) {
13003             fd_trans_register(ret, &target_inotify_trans);
13004         }
13005         return ret;
13006 #endif
13007 #if defined(TARGET_NR_inotify_add_watch)
13008     case TARGET_NR_inotify_add_watch:
13009         p = lock_user_string(arg2);
13010         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13011         unlock_user(p, arg2, 0);
13012         return ret;
13013 #endif
13014 #if defined(TARGET_NR_inotify_rm_watch)
13015     case TARGET_NR_inotify_rm_watch:
13016         return get_errno(inotify_rm_watch(arg1, arg2));
13017 #endif
13018 #endif
13019 
13020 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13021     case TARGET_NR_mq_open:
13022         {
13023             struct mq_attr posix_mq_attr;
13024             struct mq_attr *pposix_mq_attr;
13025             int host_flags;
13026 
13027             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13028             pposix_mq_attr = NULL;
13029             if (arg4) {
13030                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13031                     return -TARGET_EFAULT;
13032                 }
13033                 pposix_mq_attr = &posix_mq_attr;
13034             }
13035             p = lock_user_string(arg1 - 1);
13036             if (!p) {
13037                 return -TARGET_EFAULT;
13038             }
13039             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13040             unlock_user (p, arg1, 0);
13041         }
13042         return ret;
13043 
13044     case TARGET_NR_mq_unlink:
13045         p = lock_user_string(arg1 - 1);
13046         if (!p) {
13047             return -TARGET_EFAULT;
13048         }
13049         ret = get_errno(mq_unlink(p));
13050         unlock_user (p, arg1, 0);
13051         return ret;
13052 
13053 #ifdef TARGET_NR_mq_timedsend
13054     case TARGET_NR_mq_timedsend:
13055         {
13056             struct timespec ts;
13057 
13058             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13059             if (arg5 != 0) {
13060                 if (target_to_host_timespec(&ts, arg5)) {
13061                     return -TARGET_EFAULT;
13062                 }
13063                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13064                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13065                     return -TARGET_EFAULT;
13066                 }
13067             } else {
13068                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13069             }
13070             unlock_user (p, arg2, arg3);
13071         }
13072         return ret;
13073 #endif
13074 #ifdef TARGET_NR_mq_timedsend_time64
13075     case TARGET_NR_mq_timedsend_time64:
13076         {
13077             struct timespec ts;
13078 
13079             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13080             if (arg5 != 0) {
13081                 if (target_to_host_timespec64(&ts, arg5)) {
13082                     return -TARGET_EFAULT;
13083                 }
13084                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13085                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13086                     return -TARGET_EFAULT;
13087                 }
13088             } else {
13089                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13090             }
13091             unlock_user(p, arg2, arg3);
13092         }
13093         return ret;
13094 #endif
13095 
13096 #ifdef TARGET_NR_mq_timedreceive
13097     case TARGET_NR_mq_timedreceive:
13098         {
13099             struct timespec ts;
13100             unsigned int prio;
13101 
13102             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13103             if (arg5 != 0) {
13104                 if (target_to_host_timespec(&ts, arg5)) {
13105                     return -TARGET_EFAULT;
13106                 }
13107                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13108                                                      &prio, &ts));
13109                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13110                     return -TARGET_EFAULT;
13111                 }
13112             } else {
13113                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13114                                                      &prio, NULL));
13115             }
13116             unlock_user (p, arg2, arg3);
13117             if (arg4 != 0)
13118                 put_user_u32(prio, arg4);
13119         }
13120         return ret;
13121 #endif
13122 #ifdef TARGET_NR_mq_timedreceive_time64
13123     case TARGET_NR_mq_timedreceive_time64:
13124         {
13125             struct timespec ts;
13126             unsigned int prio;
13127 
13128             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13129             if (arg5 != 0) {
13130                 if (target_to_host_timespec64(&ts, arg5)) {
13131                     return -TARGET_EFAULT;
13132                 }
13133                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13134                                                      &prio, &ts));
13135                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13136                     return -TARGET_EFAULT;
13137                 }
13138             } else {
13139                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13140                                                      &prio, NULL));
13141             }
13142             unlock_user(p, arg2, arg3);
13143             if (arg4 != 0) {
13144                 put_user_u32(prio, arg4);
13145             }
13146         }
13147         return ret;
13148 #endif
13149 
13150     /* Not implemented for now... */
13151 /*     case TARGET_NR_mq_notify: */
13152 /*         break; */
13153 
13154     case TARGET_NR_mq_getsetattr:
13155         {
13156             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13157             ret = 0;
13158             if (arg2 != 0) {
13159                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13160                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13161                                            &posix_mq_attr_out));
13162             } else if (arg3 != 0) {
13163                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13164             }
13165             if (ret == 0 && arg3 != 0) {
13166                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13167             }
13168         }
13169         return ret;
13170 #endif
13171 
13172 #ifdef CONFIG_SPLICE
13173 #ifdef TARGET_NR_tee
13174     case TARGET_NR_tee:
13175         {
13176             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13177         }
13178         return ret;
13179 #endif
13180 #ifdef TARGET_NR_splice
13181     case TARGET_NR_splice:
13182         {
13183             loff_t loff_in, loff_out;
13184             loff_t *ploff_in = NULL, *ploff_out = NULL;
13185             if (arg2) {
13186                 if (get_user_u64(loff_in, arg2)) {
13187                     return -TARGET_EFAULT;
13188                 }
13189                 ploff_in = &loff_in;
13190             }
13191             if (arg4) {
13192                 if (get_user_u64(loff_out, arg4)) {
13193                     return -TARGET_EFAULT;
13194                 }
13195                 ploff_out = &loff_out;
13196             }
13197             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13198             if (arg2) {
13199                 if (put_user_u64(loff_in, arg2)) {
13200                     return -TARGET_EFAULT;
13201                 }
13202             }
13203             if (arg4) {
13204                 if (put_user_u64(loff_out, arg4)) {
13205                     return -TARGET_EFAULT;
13206                 }
13207             }
13208         }
13209         return ret;
13210 #endif
13211 #ifdef TARGET_NR_vmsplice
13212 	case TARGET_NR_vmsplice:
13213         {
13214             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13215             if (vec != NULL) {
13216                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13217                 unlock_iovec(vec, arg2, arg3, 0);
13218             } else {
13219                 ret = -host_to_target_errno(errno);
13220             }
13221         }
13222         return ret;
13223 #endif
13224 #endif /* CONFIG_SPLICE */
13225 #ifdef CONFIG_EVENTFD
13226 #if defined(TARGET_NR_eventfd)
13227     case TARGET_NR_eventfd:
13228         ret = get_errno(eventfd(arg1, 0));
13229         if (ret >= 0) {
13230             fd_trans_register(ret, &target_eventfd_trans);
13231         }
13232         return ret;
13233 #endif
13234 #if defined(TARGET_NR_eventfd2)
13235     case TARGET_NR_eventfd2:
13236     {
13237         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13238         if (arg2 & TARGET_O_NONBLOCK) {
13239             host_flags |= O_NONBLOCK;
13240         }
13241         if (arg2 & TARGET_O_CLOEXEC) {
13242             host_flags |= O_CLOEXEC;
13243         }
13244         ret = get_errno(eventfd(arg1, host_flags));
13245         if (ret >= 0) {
13246             fd_trans_register(ret, &target_eventfd_trans);
13247         }
13248         return ret;
13249     }
13250 #endif
13251 #endif /* CONFIG_EVENTFD  */
13252 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13253     case TARGET_NR_fallocate:
13254 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13255         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13256                                   target_offset64(arg5, arg6)));
13257 #else
13258         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13259 #endif
13260         return ret;
13261 #endif
13262 #if defined(CONFIG_SYNC_FILE_RANGE)
13263 #if defined(TARGET_NR_sync_file_range)
13264     case TARGET_NR_sync_file_range:
13265 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13266 #if defined(TARGET_MIPS)
13267         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13268                                         target_offset64(arg5, arg6), arg7));
13269 #else
13270         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13271                                         target_offset64(arg4, arg5), arg6));
13272 #endif /* !TARGET_MIPS */
13273 #else
13274         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13275 #endif
13276         return ret;
13277 #endif
13278 #if defined(TARGET_NR_sync_file_range2) || \
13279     defined(TARGET_NR_arm_sync_file_range)
13280 #if defined(TARGET_NR_sync_file_range2)
13281     case TARGET_NR_sync_file_range2:
13282 #endif
13283 #if defined(TARGET_NR_arm_sync_file_range)
13284     case TARGET_NR_arm_sync_file_range:
13285 #endif
13286         /* This is like sync_file_range but the arguments are reordered */
13287 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13288         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13289                                         target_offset64(arg5, arg6), arg2));
13290 #else
13291         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13292 #endif
13293         return ret;
13294 #endif
13295 #endif
13296 #if defined(TARGET_NR_signalfd4)
13297     case TARGET_NR_signalfd4:
13298         return do_signalfd4(arg1, arg2, arg4);
13299 #endif
13300 #if defined(TARGET_NR_signalfd)
13301     case TARGET_NR_signalfd:
13302         return do_signalfd4(arg1, arg2, 0);
13303 #endif
13304 #if defined(CONFIG_EPOLL)
13305 #if defined(TARGET_NR_epoll_create)
13306     case TARGET_NR_epoll_create:
13307         return get_errno(epoll_create(arg1));
13308 #endif
13309 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13310     case TARGET_NR_epoll_create1:
13311         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13312 #endif
13313 #if defined(TARGET_NR_epoll_ctl)
13314     case TARGET_NR_epoll_ctl:
13315     {
13316         struct epoll_event ep;
13317         struct epoll_event *epp = 0;
13318         if (arg4) {
13319             if (arg2 != EPOLL_CTL_DEL) {
13320                 struct target_epoll_event *target_ep;
13321                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13322                     return -TARGET_EFAULT;
13323                 }
13324                 ep.events = tswap32(target_ep->events);
13325                 /*
13326                  * The epoll_data_t union is just opaque data to the kernel,
13327                  * so we transfer all 64 bits across and need not worry what
13328                  * actual data type it is.
13329                  */
13330                 ep.data.u64 = tswap64(target_ep->data.u64);
13331                 unlock_user_struct(target_ep, arg4, 0);
13332             }
13333             /*
13334              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13335              * non-null pointer, even though this argument is ignored.
13336              *
13337              */
13338             epp = &ep;
13339         }
13340         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13341     }
13342 #endif
13343 
13344 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13345 #if defined(TARGET_NR_epoll_wait)
13346     case TARGET_NR_epoll_wait:
13347 #endif
13348 #if defined(TARGET_NR_epoll_pwait)
13349     case TARGET_NR_epoll_pwait:
13350 #endif
13351     {
13352         struct target_epoll_event *target_ep;
13353         struct epoll_event *ep;
13354         int epfd = arg1;
13355         int maxevents = arg3;
13356         int timeout = arg4;
13357 
13358         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13359             return -TARGET_EINVAL;
13360         }
13361 
13362         target_ep = lock_user(VERIFY_WRITE, arg2,
13363                               maxevents * sizeof(struct target_epoll_event), 1);
13364         if (!target_ep) {
13365             return -TARGET_EFAULT;
13366         }
13367 
13368         ep = g_try_new(struct epoll_event, maxevents);
13369         if (!ep) {
13370             unlock_user(target_ep, arg2, 0);
13371             return -TARGET_ENOMEM;
13372         }
13373 
13374         switch (num) {
13375 #if defined(TARGET_NR_epoll_pwait)
13376         case TARGET_NR_epoll_pwait:
13377         {
13378             sigset_t *set = NULL;
13379 
13380             if (arg5) {
13381                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13382                 if (ret != 0) {
13383                     break;
13384                 }
13385             }
13386 
13387             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13388                                              set, SIGSET_T_SIZE));
13389 
13390             if (set) {
13391                 finish_sigsuspend_mask(ret);
13392             }
13393             break;
13394         }
13395 #endif
13396 #if defined(TARGET_NR_epoll_wait)
13397         case TARGET_NR_epoll_wait:
13398             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13399                                              NULL, 0));
13400             break;
13401 #endif
13402         default:
13403             ret = -TARGET_ENOSYS;
13404         }
13405         if (!is_error(ret)) {
13406             int i;
13407             for (i = 0; i < ret; i++) {
13408                 target_ep[i].events = tswap32(ep[i].events);
13409                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13410             }
13411             unlock_user(target_ep, arg2,
13412                         ret * sizeof(struct target_epoll_event));
13413         } else {
13414             unlock_user(target_ep, arg2, 0);
13415         }
13416         g_free(ep);
13417         return ret;
13418     }
13419 #endif
13420 #endif
13421 #ifdef TARGET_NR_prlimit64
13422     case TARGET_NR_prlimit64:
13423     {
13424         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13425         struct target_rlimit64 *target_rnew, *target_rold;
13426         struct host_rlimit64 rnew, rold, *rnewp = 0;
13427         int resource = target_to_host_resource(arg2);
13428 
13429         if (arg3 && (resource != RLIMIT_AS &&
13430                      resource != RLIMIT_DATA &&
13431                      resource != RLIMIT_STACK)) {
13432             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13433                 return -TARGET_EFAULT;
13434             }
13435             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13436             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13437             unlock_user_struct(target_rnew, arg3, 0);
13438             rnewp = &rnew;
13439         }
13440 
13441         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13442         if (!is_error(ret) && arg4) {
13443             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13444                 return -TARGET_EFAULT;
13445             }
13446             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13447             __put_user(rold.rlim_max, &target_rold->rlim_max);
13448             unlock_user_struct(target_rold, arg4, 1);
13449         }
13450         return ret;
13451     }
13452 #endif
13453 #ifdef TARGET_NR_gethostname
13454     case TARGET_NR_gethostname:
13455     {
13456         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13457         if (name) {
13458             ret = get_errno(gethostname(name, arg2));
13459             unlock_user(name, arg1, arg2);
13460         } else {
13461             ret = -TARGET_EFAULT;
13462         }
13463         return ret;
13464     }
13465 #endif
13466 #ifdef TARGET_NR_atomic_cmpxchg_32
13467     case TARGET_NR_atomic_cmpxchg_32:
13468     {
13469         /* should use start_exclusive from main.c */
13470         abi_ulong mem_value;
13471         if (get_user_u32(mem_value, arg6)) {
13472             target_siginfo_t info;
13473             info.si_signo = SIGSEGV;
13474             info.si_errno = 0;
13475             info.si_code = TARGET_SEGV_MAPERR;
13476             info._sifields._sigfault._addr = arg6;
13477             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13478             ret = 0xdeadbeef;
13479 
13480         }
13481         if (mem_value == arg2)
13482             put_user_u32(arg1, arg6);
13483         return mem_value;
13484     }
13485 #endif
13486 #ifdef TARGET_NR_atomic_barrier
13487     case TARGET_NR_atomic_barrier:
13488         /* Like the kernel implementation and the
13489            qemu arm barrier, no-op this? */
13490         return 0;
13491 #endif
13492 
13493 #ifdef TARGET_NR_timer_create
13494     case TARGET_NR_timer_create:
13495     {
13496         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13497 
13498         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13499 
13500         int clkid = arg1;
13501         int timer_index = next_free_host_timer();
13502 
13503         if (timer_index < 0) {
13504             ret = -TARGET_EAGAIN;
13505         } else {
13506             timer_t *phtimer = g_posix_timers  + timer_index;
13507 
13508             if (arg2) {
13509                 phost_sevp = &host_sevp;
13510                 ret = target_to_host_sigevent(phost_sevp, arg2);
13511                 if (ret != 0) {
13512                     free_host_timer_slot(timer_index);
13513                     return ret;
13514                 }
13515             }
13516 
13517             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13518             if (ret) {
13519                 free_host_timer_slot(timer_index);
13520             } else {
13521                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13522                     timer_delete(*phtimer);
13523                     free_host_timer_slot(timer_index);
13524                     return -TARGET_EFAULT;
13525                 }
13526             }
13527         }
13528         return ret;
13529     }
13530 #endif
13531 
13532 #ifdef TARGET_NR_timer_settime
13533     case TARGET_NR_timer_settime:
13534     {
13535         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13536          * struct itimerspec * old_value */
13537         target_timer_t timerid = get_timer_id(arg1);
13538 
13539         if (timerid < 0) {
13540             ret = timerid;
13541         } else if (arg3 == 0) {
13542             ret = -TARGET_EINVAL;
13543         } else {
13544             timer_t htimer = g_posix_timers[timerid];
13545             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13546 
13547             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13548                 return -TARGET_EFAULT;
13549             }
13550             ret = get_errno(
13551                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13552             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13553                 return -TARGET_EFAULT;
13554             }
13555         }
13556         return ret;
13557     }
13558 #endif
13559 
13560 #ifdef TARGET_NR_timer_settime64
13561     case TARGET_NR_timer_settime64:
13562     {
13563         target_timer_t timerid = get_timer_id(arg1);
13564 
13565         if (timerid < 0) {
13566             ret = timerid;
13567         } else if (arg3 == 0) {
13568             ret = -TARGET_EINVAL;
13569         } else {
13570             timer_t htimer = g_posix_timers[timerid];
13571             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13572 
13573             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13574                 return -TARGET_EFAULT;
13575             }
13576             ret = get_errno(
13577                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13578             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13579                 return -TARGET_EFAULT;
13580             }
13581         }
13582         return ret;
13583     }
13584 #endif
13585 
13586 #ifdef TARGET_NR_timer_gettime
13587     case TARGET_NR_timer_gettime:
13588     {
13589         /* args: timer_t timerid, struct itimerspec *curr_value */
13590         target_timer_t timerid = get_timer_id(arg1);
13591 
13592         if (timerid < 0) {
13593             ret = timerid;
13594         } else if (!arg2) {
13595             ret = -TARGET_EFAULT;
13596         } else {
13597             timer_t htimer = g_posix_timers[timerid];
13598             struct itimerspec hspec;
13599             ret = get_errno(timer_gettime(htimer, &hspec));
13600 
13601             if (host_to_target_itimerspec(arg2, &hspec)) {
13602                 ret = -TARGET_EFAULT;
13603             }
13604         }
13605         return ret;
13606     }
13607 #endif
13608 
13609 #ifdef TARGET_NR_timer_gettime64
13610     case TARGET_NR_timer_gettime64:
13611     {
13612         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13613         target_timer_t timerid = get_timer_id(arg1);
13614 
13615         if (timerid < 0) {
13616             ret = timerid;
13617         } else if (!arg2) {
13618             ret = -TARGET_EFAULT;
13619         } else {
13620             timer_t htimer = g_posix_timers[timerid];
13621             struct itimerspec hspec;
13622             ret = get_errno(timer_gettime(htimer, &hspec));
13623 
13624             if (host_to_target_itimerspec64(arg2, &hspec)) {
13625                 ret = -TARGET_EFAULT;
13626             }
13627         }
13628         return ret;
13629     }
13630 #endif
13631 
13632 #ifdef TARGET_NR_timer_getoverrun
13633     case TARGET_NR_timer_getoverrun:
13634     {
13635         /* args: timer_t timerid */
13636         target_timer_t timerid = get_timer_id(arg1);
13637 
13638         if (timerid < 0) {
13639             ret = timerid;
13640         } else {
13641             timer_t htimer = g_posix_timers[timerid];
13642             ret = get_errno(timer_getoverrun(htimer));
13643         }
13644         return ret;
13645     }
13646 #endif
13647 
13648 #ifdef TARGET_NR_timer_delete
13649     case TARGET_NR_timer_delete:
13650     {
13651         /* args: timer_t timerid */
13652         target_timer_t timerid = get_timer_id(arg1);
13653 
13654         if (timerid < 0) {
13655             ret = timerid;
13656         } else {
13657             timer_t htimer = g_posix_timers[timerid];
13658             ret = get_errno(timer_delete(htimer));
13659             free_host_timer_slot(timerid);
13660         }
13661         return ret;
13662     }
13663 #endif
13664 
13665 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13666     case TARGET_NR_timerfd_create:
13667         ret = get_errno(timerfd_create(arg1,
13668                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13669         if (ret >= 0) {
13670             fd_trans_register(ret, &target_timerfd_trans);
13671         }
13672         return ret;
13673 #endif
13674 
13675 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13676     case TARGET_NR_timerfd_gettime:
13677         {
13678             struct itimerspec its_curr;
13679 
13680             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13681 
13682             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13683                 return -TARGET_EFAULT;
13684             }
13685         }
13686         return ret;
13687 #endif
13688 
13689 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13690     case TARGET_NR_timerfd_gettime64:
13691         {
13692             struct itimerspec its_curr;
13693 
13694             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13695 
13696             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13697                 return -TARGET_EFAULT;
13698             }
13699         }
13700         return ret;
13701 #endif
13702 
13703 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13704     case TARGET_NR_timerfd_settime:
13705         {
13706             struct itimerspec its_new, its_old, *p_new;
13707 
13708             if (arg3) {
13709                 if (target_to_host_itimerspec(&its_new, arg3)) {
13710                     return -TARGET_EFAULT;
13711                 }
13712                 p_new = &its_new;
13713             } else {
13714                 p_new = NULL;
13715             }
13716 
13717             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13718 
13719             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13720                 return -TARGET_EFAULT;
13721             }
13722         }
13723         return ret;
13724 #endif
13725 
13726 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13727     case TARGET_NR_timerfd_settime64:
13728         {
13729             struct itimerspec its_new, its_old, *p_new;
13730 
13731             if (arg3) {
13732                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13733                     return -TARGET_EFAULT;
13734                 }
13735                 p_new = &its_new;
13736             } else {
13737                 p_new = NULL;
13738             }
13739 
13740             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13741 
13742             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13743                 return -TARGET_EFAULT;
13744             }
13745         }
13746         return ret;
13747 #endif
13748 
13749 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13750     case TARGET_NR_ioprio_get:
13751         return get_errno(ioprio_get(arg1, arg2));
13752 #endif
13753 
13754 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13755     case TARGET_NR_ioprio_set:
13756         return get_errno(ioprio_set(arg1, arg2, arg3));
13757 #endif
13758 
13759 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13760     case TARGET_NR_setns:
13761         return get_errno(setns(arg1, arg2));
13762 #endif
13763 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13764     case TARGET_NR_unshare:
13765         return get_errno(unshare(arg1));
13766 #endif
13767 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13768     case TARGET_NR_kcmp:
13769         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13770 #endif
13771 #ifdef TARGET_NR_swapcontext
13772     case TARGET_NR_swapcontext:
13773         /* PowerPC specific.  */
13774         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13775 #endif
13776 #ifdef TARGET_NR_memfd_create
13777     case TARGET_NR_memfd_create:
13778         p = lock_user_string(arg1);
13779         if (!p) {
13780             return -TARGET_EFAULT;
13781         }
13782         ret = get_errno(memfd_create(p, arg2));
13783         fd_trans_unregister(ret);
13784         unlock_user(p, arg1, 0);
13785         return ret;
13786 #endif
13787 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13788     case TARGET_NR_membarrier:
13789         return get_errno(membarrier(arg1, arg2));
13790 #endif
13791 
13792 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13793     case TARGET_NR_copy_file_range:
13794         {
13795             loff_t inoff, outoff;
13796             loff_t *pinoff = NULL, *poutoff = NULL;
13797 
13798             if (arg2) {
13799                 if (get_user_u64(inoff, arg2)) {
13800                     return -TARGET_EFAULT;
13801                 }
13802                 pinoff = &inoff;
13803             }
13804             if (arg4) {
13805                 if (get_user_u64(outoff, arg4)) {
13806                     return -TARGET_EFAULT;
13807                 }
13808                 poutoff = &outoff;
13809             }
13810             /* Do not sign-extend the count parameter. */
13811             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13812                                                  (abi_ulong)arg5, arg6));
13813             if (!is_error(ret) && ret > 0) {
13814                 if (arg2) {
13815                     if (put_user_u64(inoff, arg2)) {
13816                         return -TARGET_EFAULT;
13817                     }
13818                 }
13819                 if (arg4) {
13820                     if (put_user_u64(outoff, arg4)) {
13821                         return -TARGET_EFAULT;
13822                     }
13823                 }
13824             }
13825         }
13826         return ret;
13827 #endif
13828 
13829 #if defined(TARGET_NR_pivot_root)
13830     case TARGET_NR_pivot_root:
13831         {
13832             void *p2;
13833             p = lock_user_string(arg1); /* new_root */
13834             p2 = lock_user_string(arg2); /* put_old */
13835             if (!p || !p2) {
13836                 ret = -TARGET_EFAULT;
13837             } else {
13838                 ret = get_errno(pivot_root(p, p2));
13839             }
13840             unlock_user(p2, arg2, 0);
13841             unlock_user(p, arg1, 0);
13842         }
13843         return ret;
13844 #endif
13845 
13846 #if defined(TARGET_NR_riscv_hwprobe)
13847     case TARGET_NR_riscv_hwprobe:
13848         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13849 #endif
13850 
13851     default:
13852         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13853         return -TARGET_ENOSYS;
13854     }
13855     return ret;
13856 }
13857 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13858 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13859                     abi_long arg2, abi_long arg3, abi_long arg4,
13860                     abi_long arg5, abi_long arg6, abi_long arg7,
13861                     abi_long arg8)
13862 {
13863     CPUState *cpu = env_cpu(cpu_env);
13864     abi_long ret;
13865 
13866 #ifdef DEBUG_ERESTARTSYS
13867     /* Debug-only code for exercising the syscall-restart code paths
13868      * in the per-architecture cpu main loops: restart every syscall
13869      * the guest makes once before letting it through.
13870      */
13871     {
13872         static bool flag;
13873         flag = !flag;
13874         if (flag) {
13875             return -QEMU_ERESTARTSYS;
13876         }
13877     }
13878 #endif
13879 
13880     record_syscall_start(cpu, num, arg1,
13881                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13882 
13883     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13884         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13885     }
13886 
13887     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13888                       arg5, arg6, arg7, arg8);
13889 
13890     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13891         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13892                           arg3, arg4, arg5, arg6);
13893     }
13894 
13895     record_syscall_return(cpu, num, ret);
13896     return ret;
13897 }
13898