xref: /openbmc/qemu/linux-user/syscall.c (revision 34febb2831ea51345fb853ee0f11bbbd5292dd6e)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include <elf.h>
30 #include <endian.h>
31 #include <grp.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/mount.h>
36 #include <sys/file.h>
37 #include <sys/fsuid.h>
38 #include <sys/personality.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
41 #include <sys/swap.h>
42 #include <linux/capability.h>
43 #include <sched.h>
44 #include <sys/timex.h>
45 #include <sys/socket.h>
46 #include <linux/sockios.h>
47 #include <sys/un.h>
48 #include <sys/uio.h>
49 #include <poll.h>
50 #include <sys/times.h>
51 #include <sys/shm.h>
52 #include <sys/sem.h>
53 #include <sys/statfs.h>
54 #include <utime.h>
55 #include <sys/sysinfo.h>
56 #include <sys/signalfd.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61 #include <linux/wireless.h>
62 #include <linux/icmp.h>
63 #include <linux/icmpv6.h>
64 #include <linux/if_tun.h>
65 #include <linux/in6.h>
66 #include <linux/errqueue.h>
67 #include <linux/random.h>
68 #ifdef CONFIG_TIMERFD
69 #include <sys/timerfd.h>
70 #endif
71 #ifdef CONFIG_EVENTFD
72 #include <sys/eventfd.h>
73 #endif
74 #ifdef CONFIG_EPOLL
75 #include <sys/epoll.h>
76 #endif
77 #ifdef CONFIG_ATTR
78 #include "qemu/xattr.h"
79 #endif
80 #ifdef CONFIG_SENDFILE
81 #include <sys/sendfile.h>
82 #endif
83 #ifdef HAVE_SYS_KCOV_H
84 #include <sys/kcov.h>
85 #endif
86 
87 #define termios host_termios
88 #define winsize host_winsize
89 #define termio host_termio
90 #define sgttyb host_sgttyb /* same as target */
91 #define tchars host_tchars /* same as target */
92 #define ltchars host_ltchars /* same as target */
93 
94 #include <linux/termios.h>
95 #include <linux/unistd.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
99 #include <linux/kd.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #include <linux/fd.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #if defined(CONFIG_USBFS)
108 #include <linux/usbdevice_fs.h>
109 #include <linux/usb/ch9.h>
110 #endif
111 #include <linux/vt.h>
112 #include <linux/dm-ioctl.h>
113 #include <linux/reboot.h>
114 #include <linux/route.h>
115 #include <linux/filter.h>
116 #include <linux/blkpg.h>
117 #include <netpacket/packet.h>
118 #include <linux/netlink.h>
119 #include <linux/if_alg.h>
120 #include <linux/rtc.h>
121 #include <sound/asound.h>
122 #ifdef HAVE_BTRFS_H
123 #include <linux/btrfs.h>
124 #endif
125 #ifdef HAVE_DRM_H
126 #include <libdrm/drm.h>
127 #include <libdrm/i915_drm.h>
128 #endif
129 #include "linux_loop.h"
130 #include "uname.h"
131 
132 #include "qemu.h"
133 #include "user-internals.h"
134 #include "strace.h"
135 #include "signal-common.h"
136 #include "loader.h"
137 #include "user-mmap.h"
138 #include "user/safe-syscall.h"
139 #include "qemu/guest-random.h"
140 #include "qemu/selfmap.h"
141 #include "user/syscall-trace.h"
142 #include "special-errno.h"
143 #include "qapi/error.h"
144 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc < 2.41 */
362 #ifndef SCHED_ATTR_SIZE_VER0
363 struct sched_attr {
364     uint32_t size;
365     uint32_t sched_policy;
366     uint64_t sched_flags;
367     int32_t sched_nice;
368     uint32_t sched_priority;
369     uint64_t sched_runtime;
370     uint64_t sched_deadline;
371     uint64_t sched_period;
372     uint32_t sched_util_min;
373     uint32_t sched_util_max;
374 };
375 #endif
376 #define __NR_sys_sched_getattr __NR_sched_getattr
377 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
378           unsigned int, size, unsigned int, flags);
379 #define __NR_sys_sched_setattr __NR_sched_setattr
380 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
381           unsigned int, flags);
382 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
383 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
384 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
385 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
386           const struct sched_param *, param);
387 #define __NR_sys_sched_getparam __NR_sched_getparam
388 _syscall2(int, sys_sched_getparam, pid_t, pid,
389           struct sched_param *, param);
390 #define __NR_sys_sched_setparam __NR_sched_setparam
391 _syscall2(int, sys_sched_setparam, pid_t, pid,
392           const struct sched_param *, param);
393 #define __NR_sys_getcpu __NR_getcpu
394 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
395 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
396           void *, arg);
397 _syscall2(int, capget, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 _syscall2(int, capset, struct __user_cap_header_struct *, header,
400           struct __user_cap_data_struct *, data);
401 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
402 _syscall2(int, ioprio_get, int, which, int, who)
403 #endif
404 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
405 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
406 #endif
407 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
408 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
409 #endif
410 
411 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
412 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
413           unsigned long, idx1, unsigned long, idx2)
414 #endif
415 
416 /*
417  * It is assumed that struct statx is architecture independent.
418  */
419 #if defined(TARGET_NR_statx) && defined(__NR_statx)
420 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
421           unsigned int, mask, struct target_statx *, statxbuf)
422 #endif
423 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
424 _syscall2(int, membarrier, int, cmd, int, flags)
425 #endif
426 
427 static const bitmask_transtbl fcntl_flags_tbl[] = {
428   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
429   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
430   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
431   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
432   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
433   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
434   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
435   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
436   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
437   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
438   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
439   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
440   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
441 #if defined(O_DIRECT)
442   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
443 #endif
444 #if defined(O_NOATIME)
445   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
446 #endif
447 #if defined(O_CLOEXEC)
448   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
449 #endif
450 #if defined(O_PATH)
451   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
452 #endif
453 #if defined(O_TMPFILE)
454   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
455 #endif
456   /* Don't terminate the list prematurely on 64-bit host+guest.  */
457 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
458   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
459 #endif
460 };
461 
462 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
463 
464 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
465 #if defined(__NR_utimensat)
466 #define __NR_sys_utimensat __NR_utimensat
467 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
468           const struct timespec *,tsp,int,flags)
469 #else
470 static int sys_utimensat(int dirfd, const char *pathname,
471                          const struct timespec times[2], int flags)
472 {
473     errno = ENOSYS;
474     return -1;
475 }
476 #endif
477 #endif /* TARGET_NR_utimensat */
478 
479 #ifdef TARGET_NR_renameat2
480 #if defined(__NR_renameat2)
481 #define __NR_sys_renameat2 __NR_renameat2
482 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
483           const char *, new, unsigned int, flags)
484 #else
485 static int sys_renameat2(int oldfd, const char *old,
486                          int newfd, const char *new, int flags)
487 {
488     if (flags == 0) {
489         return renameat(oldfd, old, newfd, new);
490     }
491     errno = ENOSYS;
492     return -1;
493 }
494 #endif
495 #endif /* TARGET_NR_renameat2 */
496 
497 #ifdef CONFIG_INOTIFY
498 #include <sys/inotify.h>
499 #else
500 /* Userspace can usually survive runtime without inotify */
501 #undef TARGET_NR_inotify_init
502 #undef TARGET_NR_inotify_init1
503 #undef TARGET_NR_inotify_add_watch
504 #undef TARGET_NR_inotify_rm_watch
505 #endif /* CONFIG_INOTIFY  */
506 
507 #if defined(TARGET_NR_prlimit64)
508 #ifndef __NR_prlimit64
509 # define __NR_prlimit64 -1
510 #endif
511 #define __NR_sys_prlimit64 __NR_prlimit64
512 /* The glibc rlimit structure may not be that used by the underlying syscall */
513 struct host_rlimit64 {
514     uint64_t rlim_cur;
515     uint64_t rlim_max;
516 };
517 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
518           const struct host_rlimit64 *, new_limit,
519           struct host_rlimit64 *, old_limit)
520 #endif
521 
522 
523 #if defined(TARGET_NR_timer_create)
524 /* Maximum of 32 active POSIX timers allowed at any one time. */
525 #define GUEST_TIMER_MAX 32
526 static timer_t g_posix_timers[GUEST_TIMER_MAX];
527 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
528 
next_free_host_timer(void)529 static inline int next_free_host_timer(void)
530 {
531     int k;
532     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
533         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
534             return k;
535         }
536     }
537     return -1;
538 }
539 
free_host_timer_slot(int id)540 static inline void free_host_timer_slot(int id)
541 {
542     qatomic_store_release(g_posix_timer_allocated + id, 0);
543 }
544 #endif
545 
host_to_target_errno(int host_errno)546 static inline int host_to_target_errno(int host_errno)
547 {
548     switch (host_errno) {
549 #define E(X)  case X: return TARGET_##X;
550 #include "errnos.c.inc"
551 #undef E
552     default:
553         return host_errno;
554     }
555 }
556 
target_to_host_errno(int target_errno)557 static inline int target_to_host_errno(int target_errno)
558 {
559     switch (target_errno) {
560 #define E(X)  case TARGET_##X: return X;
561 #include "errnos.c.inc"
562 #undef E
563     default:
564         return target_errno;
565     }
566 }
567 
get_errno(abi_long ret)568 abi_long get_errno(abi_long ret)
569 {
570     if (ret == -1)
571         return -host_to_target_errno(errno);
572     else
573         return ret;
574 }
575 
target_strerror(int err)576 const char *target_strerror(int err)
577 {
578     if (err == QEMU_ERESTARTSYS) {
579         return "To be restarted";
580     }
581     if (err == QEMU_ESIGRETURN) {
582         return "Successful exit from sigreturn";
583     }
584 
585     return strerror(target_to_host_errno(err));
586 }
587 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)588 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
589 {
590     int i;
591     uint8_t b;
592     if (usize <= ksize) {
593         return 1;
594     }
595     for (i = ksize; i < usize; i++) {
596         if (get_user_u8(b, addr + i)) {
597             return -TARGET_EFAULT;
598         }
599         if (b != 0) {
600             return 0;
601         }
602     }
603     return 1;
604 }
605 
606 /*
607  * Copies a target struct to a host struct, in a way that guarantees
608  * backwards-compatibility for struct syscall arguments.
609  *
610  * Similar to kernels uaccess.h:copy_struct_from_user()
611  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)612 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
613 {
614     size_t size = MIN(ksize, usize);
615     size_t rest = MAX(ksize, usize) - size;
616 
617     /* Deal with trailing bytes. */
618     if (usize < ksize) {
619         memset(dst + size, 0, rest);
620     } else if (usize > ksize) {
621         int ret = check_zeroed_user(src, ksize, usize);
622         if (ret <= 0) {
623             return ret ?: -TARGET_E2BIG;
624         }
625     }
626     /* Copy the interoperable parts of the struct. */
627     if (copy_from_user(dst, src, size)) {
628         return -TARGET_EFAULT;
629     }
630     return 0;
631 }
632 
633 #define safe_syscall0(type, name) \
634 static type safe_##name(void) \
635 { \
636     return safe_syscall(__NR_##name); \
637 }
638 
639 #define safe_syscall1(type, name, type1, arg1) \
640 static type safe_##name(type1 arg1) \
641 { \
642     return safe_syscall(__NR_##name, arg1); \
643 }
644 
645 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
646 static type safe_##name(type1 arg1, type2 arg2) \
647 { \
648     return safe_syscall(__NR_##name, arg1, arg2); \
649 }
650 
651 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
652 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
653 { \
654     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
655 }
656 
657 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
658     type4, arg4) \
659 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
660 { \
661     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
662 }
663 
664 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
665     type4, arg4, type5, arg5) \
666 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
667     type5 arg5) \
668 { \
669     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
670 }
671 
672 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
673     type4, arg4, type5, arg5, type6, arg6) \
674 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
675     type5 arg5, type6 arg6) \
676 { \
677     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
678 }
679 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)680 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
681 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
682 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
683               int, flags, mode_t, mode)
684 
685 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
686               const struct open_how_ver0 *, how, size_t, size)
687 
688 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
689 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
690               struct rusage *, rusage)
691 #endif
692 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
693               int, options, struct rusage *, rusage)
694 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
695 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
696               char **, argv, char **, envp, int, flags)
697 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
698     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
699 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
700               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
701 #endif
702 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
703 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
704               struct timespec *, tsp, const sigset_t *, sigmask,
705               size_t, sigsetsize)
706 #endif
707 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
708               int, maxevents, int, timeout, const sigset_t *, sigmask,
709               size_t, sigsetsize)
710 #if defined(__NR_futex)
711 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
712               const struct timespec *,timeout,int *,uaddr2,int,val3)
713 #endif
714 #if defined(__NR_futex_time64)
715 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
716               const struct timespec *,timeout,int *,uaddr2,int,val3)
717 #endif
718 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
719 safe_syscall2(int, kill, pid_t, pid, int, sig)
720 safe_syscall2(int, tkill, int, tid, int, sig)
721 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
722 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
723 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
724 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
725               unsigned long, pos_l, unsigned long, pos_h)
726 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
727               unsigned long, pos_l, unsigned long, pos_h)
728 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
729               socklen_t, addrlen)
730 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
731               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
732 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
733               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
734 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
735 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
736 safe_syscall2(int, flock, int, fd, int, operation)
737 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
738 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
739               const struct timespec *, uts, size_t, sigsetsize)
740 #endif
741 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
742               int, flags)
743 #if defined(TARGET_NR_nanosleep)
744 safe_syscall2(int, nanosleep, const struct timespec *, req,
745               struct timespec *, rem)
746 #endif
747 #if defined(TARGET_NR_clock_nanosleep) || \
748     defined(TARGET_NR_clock_nanosleep_time64)
749 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
750               const struct timespec *, req, struct timespec *, rem)
751 #endif
752 #ifdef __NR_ipc
753 #ifdef __s390x__
754 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
755               void *, ptr)
756 #else
757 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
758               void *, ptr, long, fifth)
759 #endif
760 #endif
761 #ifdef __NR_msgsnd
762 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
763               int, flags)
764 #endif
765 #ifdef __NR_msgrcv
766 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
767               long, msgtype, int, flags)
768 #endif
769 #ifdef __NR_semtimedop
770 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
771               unsigned, nsops, const struct timespec *, timeout)
772 #endif
773 #if defined(TARGET_NR_mq_timedsend) || \
774     defined(TARGET_NR_mq_timedsend_time64)
775 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
776               size_t, len, unsigned, prio, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_mq_timedreceive) || \
779     defined(TARGET_NR_mq_timedreceive_time64)
780 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
781               size_t, len, unsigned *, prio, const struct timespec *, timeout)
782 #endif
783 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
784 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
785               int, outfd, loff_t *, poutoff, size_t, length,
786               unsigned int, flags)
787 #endif
788 
789 /* We do ioctl like this rather than via safe_syscall3 to preserve the
790  * "third argument might be integer or pointer or not present" behaviour of
791  * the libc function.
792  */
793 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
794 /* Similarly for fcntl. Since we always build with LFS enabled,
795  * we should be using the 64-bit structures automatically.
796  */
797 #ifdef __NR_fcntl64
798 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
799 #else
800 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
801 #endif
802 
803 static inline int host_to_target_sock_type(int host_type)
804 {
805     int target_type;
806 
807     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
808     case SOCK_DGRAM:
809         target_type = TARGET_SOCK_DGRAM;
810         break;
811     case SOCK_STREAM:
812         target_type = TARGET_SOCK_STREAM;
813         break;
814     default:
815         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
816         break;
817     }
818 
819 #if defined(SOCK_CLOEXEC)
820     if (host_type & SOCK_CLOEXEC) {
821         target_type |= TARGET_SOCK_CLOEXEC;
822     }
823 #endif
824 
825 #if defined(SOCK_NONBLOCK)
826     if (host_type & SOCK_NONBLOCK) {
827         target_type |= TARGET_SOCK_NONBLOCK;
828     }
829 #endif
830 
831     return target_type;
832 }
833 
834 static abi_ulong target_brk, initial_target_brk;
835 
target_set_brk(abi_ulong new_brk)836 void target_set_brk(abi_ulong new_brk)
837 {
838     target_brk = TARGET_PAGE_ALIGN(new_brk);
839     initial_target_brk = target_brk;
840 }
841 
842 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)843 abi_long do_brk(abi_ulong brk_val)
844 {
845     abi_long mapped_addr;
846     abi_ulong new_brk;
847     abi_ulong old_brk;
848 
849     /* brk pointers are always untagged */
850 
851     /* do not allow to shrink below initial brk value */
852     if (brk_val < initial_target_brk) {
853         return target_brk;
854     }
855 
856     new_brk = TARGET_PAGE_ALIGN(brk_val);
857     old_brk = TARGET_PAGE_ALIGN(target_brk);
858 
859     /* new and old target_brk might be on the same page */
860     if (new_brk == old_brk) {
861         target_brk = brk_val;
862         return target_brk;
863     }
864 
865     /* Release heap if necessary */
866     if (new_brk < old_brk) {
867         target_munmap(new_brk, old_brk - new_brk);
868 
869         target_brk = brk_val;
870         return target_brk;
871     }
872 
873     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
874                               PROT_READ | PROT_WRITE,
875                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
876                               -1, 0);
877 
878     if (mapped_addr == old_brk) {
879         target_brk = brk_val;
880         return target_brk;
881     }
882 
883 #if defined(TARGET_ALPHA)
884     /* We (partially) emulate OSF/1 on Alpha, which requires we
885        return a proper errno, not an unchanged brk value.  */
886     return -TARGET_ENOMEM;
887 #endif
888     /* For everything else, return the previous break. */
889     return target_brk;
890 }
891 
892 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
893     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)894 static inline abi_long copy_from_user_fdset(fd_set *fds,
895                                             abi_ulong target_fds_addr,
896                                             int n)
897 {
898     int i, nw, j, k;
899     abi_ulong b, *target_fds;
900 
901     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
902     if (!(target_fds = lock_user(VERIFY_READ,
903                                  target_fds_addr,
904                                  sizeof(abi_ulong) * nw,
905                                  1)))
906         return -TARGET_EFAULT;
907 
908     FD_ZERO(fds);
909     k = 0;
910     for (i = 0; i < nw; i++) {
911         /* grab the abi_ulong */
912         __get_user(b, &target_fds[i]);
913         for (j = 0; j < TARGET_ABI_BITS; j++) {
914             /* check the bit inside the abi_ulong */
915             if ((b >> j) & 1)
916                 FD_SET(k, fds);
917             k++;
918         }
919     }
920 
921     unlock_user(target_fds, target_fds_addr, 0);
922 
923     return 0;
924 }
925 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)926 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
927                                                  abi_ulong target_fds_addr,
928                                                  int n)
929 {
930     if (target_fds_addr) {
931         if (copy_from_user_fdset(fds, target_fds_addr, n))
932             return -TARGET_EFAULT;
933         *fds_ptr = fds;
934     } else {
935         *fds_ptr = NULL;
936     }
937     return 0;
938 }
939 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)940 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
941                                           const fd_set *fds,
942                                           int n)
943 {
944     int i, nw, j, k;
945     abi_long v;
946     abi_ulong *target_fds;
947 
948     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
949     if (!(target_fds = lock_user(VERIFY_WRITE,
950                                  target_fds_addr,
951                                  sizeof(abi_ulong) * nw,
952                                  0)))
953         return -TARGET_EFAULT;
954 
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         v = 0;
958         for (j = 0; j < TARGET_ABI_BITS; j++) {
959             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
960             k++;
961         }
962         __put_user(v, &target_fds[i]);
963     }
964 
965     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
966 
967     return 0;
968 }
969 #endif
970 
971 #if defined(__alpha__)
972 #define HOST_HZ 1024
973 #else
974 #define HOST_HZ 100
975 #endif
976 
host_to_target_clock_t(long ticks)977 static inline abi_long host_to_target_clock_t(long ticks)
978 {
979 #if HOST_HZ == TARGET_HZ
980     return ticks;
981 #else
982     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
983 #endif
984 }
985 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)986 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
987                                              const struct rusage *rusage)
988 {
989     struct target_rusage *target_rusage;
990 
991     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
992         return -TARGET_EFAULT;
993     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
994     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
995     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
996     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
997     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
998     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
999     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1000     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1001     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1002     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1003     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1004     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1005     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1006     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1007     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1008     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1009     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1010     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1011     unlock_user_struct(target_rusage, target_addr, 1);
1012 
1013     return 0;
1014 }
1015 
1016 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1017 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1018 {
1019     abi_ulong target_rlim_swap;
1020     rlim_t result;
1021 
1022     target_rlim_swap = tswapal(target_rlim);
1023     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1024         return RLIM_INFINITY;
1025 
1026     result = target_rlim_swap;
1027     if (target_rlim_swap != (rlim_t)result)
1028         return RLIM_INFINITY;
1029 
1030     return result;
1031 }
1032 #endif
1033 
1034 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1035 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1036 {
1037     abi_ulong target_rlim_swap;
1038     abi_ulong result;
1039 
1040     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1041         target_rlim_swap = TARGET_RLIM_INFINITY;
1042     else
1043         target_rlim_swap = rlim;
1044     result = tswapal(target_rlim_swap);
1045 
1046     return result;
1047 }
1048 #endif
1049 
target_to_host_resource(int code)1050 static inline int target_to_host_resource(int code)
1051 {
1052     switch (code) {
1053     case TARGET_RLIMIT_AS:
1054         return RLIMIT_AS;
1055     case TARGET_RLIMIT_CORE:
1056         return RLIMIT_CORE;
1057     case TARGET_RLIMIT_CPU:
1058         return RLIMIT_CPU;
1059     case TARGET_RLIMIT_DATA:
1060         return RLIMIT_DATA;
1061     case TARGET_RLIMIT_FSIZE:
1062         return RLIMIT_FSIZE;
1063     case TARGET_RLIMIT_LOCKS:
1064         return RLIMIT_LOCKS;
1065     case TARGET_RLIMIT_MEMLOCK:
1066         return RLIMIT_MEMLOCK;
1067     case TARGET_RLIMIT_MSGQUEUE:
1068         return RLIMIT_MSGQUEUE;
1069     case TARGET_RLIMIT_NICE:
1070         return RLIMIT_NICE;
1071     case TARGET_RLIMIT_NOFILE:
1072         return RLIMIT_NOFILE;
1073     case TARGET_RLIMIT_NPROC:
1074         return RLIMIT_NPROC;
1075     case TARGET_RLIMIT_RSS:
1076         return RLIMIT_RSS;
1077     case TARGET_RLIMIT_RTPRIO:
1078         return RLIMIT_RTPRIO;
1079 #ifdef RLIMIT_RTTIME
1080     case TARGET_RLIMIT_RTTIME:
1081         return RLIMIT_RTTIME;
1082 #endif
1083     case TARGET_RLIMIT_SIGPENDING:
1084         return RLIMIT_SIGPENDING;
1085     case TARGET_RLIMIT_STACK:
1086         return RLIMIT_STACK;
1087     default:
1088         return code;
1089     }
1090 }
1091 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1092 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1093                                               abi_ulong target_tv_addr)
1094 {
1095     struct target_timeval *target_tv;
1096 
1097     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1098         return -TARGET_EFAULT;
1099     }
1100 
1101     __get_user(tv->tv_sec, &target_tv->tv_sec);
1102     __get_user(tv->tv_usec, &target_tv->tv_usec);
1103 
1104     unlock_user_struct(target_tv, target_tv_addr, 0);
1105 
1106     return 0;
1107 }
1108 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1109 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1110                                             const struct timeval *tv)
1111 {
1112     struct target_timeval *target_tv;
1113 
1114     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1115         return -TARGET_EFAULT;
1116     }
1117 
1118     __put_user(tv->tv_sec, &target_tv->tv_sec);
1119     __put_user(tv->tv_usec, &target_tv->tv_usec);
1120 
1121     unlock_user_struct(target_tv, target_tv_addr, 1);
1122 
1123     return 0;
1124 }
1125 
1126 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1127 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1128                                                 abi_ulong target_tv_addr)
1129 {
1130     struct target__kernel_sock_timeval *target_tv;
1131 
1132     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1133         return -TARGET_EFAULT;
1134     }
1135 
1136     __get_user(tv->tv_sec, &target_tv->tv_sec);
1137     __get_user(tv->tv_usec, &target_tv->tv_usec);
1138 
1139     unlock_user_struct(target_tv, target_tv_addr, 0);
1140 
1141     return 0;
1142 }
1143 #endif
1144 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1145 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1146                                               const struct timeval *tv)
1147 {
1148     struct target__kernel_sock_timeval *target_tv;
1149 
1150     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1151         return -TARGET_EFAULT;
1152     }
1153 
1154     __put_user(tv->tv_sec, &target_tv->tv_sec);
1155     __put_user(tv->tv_usec, &target_tv->tv_usec);
1156 
1157     unlock_user_struct(target_tv, target_tv_addr, 1);
1158 
1159     return 0;
1160 }
1161 
1162 #if defined(TARGET_NR_futex) || \
1163     defined(TARGET_NR_rt_sigtimedwait) || \
1164     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1165     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1166     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1167     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1168     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1169     defined(TARGET_NR_timer_settime) || \
1170     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1171 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1172                                                abi_ulong target_addr)
1173 {
1174     struct target_timespec *target_ts;
1175 
1176     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1177         return -TARGET_EFAULT;
1178     }
1179     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1180     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1181     unlock_user_struct(target_ts, target_addr, 0);
1182     return 0;
1183 }
1184 #endif
1185 
1186 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1187     defined(TARGET_NR_timer_settime64) || \
1188     defined(TARGET_NR_mq_timedsend_time64) || \
1189     defined(TARGET_NR_mq_timedreceive_time64) || \
1190     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1191     defined(TARGET_NR_clock_nanosleep_time64) || \
1192     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1193     defined(TARGET_NR_utimensat) || \
1194     defined(TARGET_NR_utimensat_time64) || \
1195     defined(TARGET_NR_semtimedop_time64) || \
1196     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1197 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1198                                                  abi_ulong target_addr)
1199 {
1200     struct target__kernel_timespec *target_ts;
1201 
1202     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1203         return -TARGET_EFAULT;
1204     }
1205     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1206     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1207     /* in 32bit mode, this drops the padding */
1208     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1209     unlock_user_struct(target_ts, target_addr, 0);
1210     return 0;
1211 }
1212 #endif
1213 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1214 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1215                                                struct timespec *host_ts)
1216 {
1217     struct target_timespec *target_ts;
1218 
1219     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1220         return -TARGET_EFAULT;
1221     }
1222     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1223     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1224     unlock_user_struct(target_ts, target_addr, 1);
1225     return 0;
1226 }
1227 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1228 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1229                                                  struct timespec *host_ts)
1230 {
1231     struct target__kernel_timespec *target_ts;
1232 
1233     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1234         return -TARGET_EFAULT;
1235     }
1236     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1237     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1238     unlock_user_struct(target_ts, target_addr, 1);
1239     return 0;
1240 }
1241 
1242 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1243 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1244                                              struct timezone *tz)
1245 {
1246     struct target_timezone *target_tz;
1247 
1248     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1249         return -TARGET_EFAULT;
1250     }
1251 
1252     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1253     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1254 
1255     unlock_user_struct(target_tz, target_tz_addr, 1);
1256 
1257     return 0;
1258 }
1259 #endif
1260 
1261 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1262 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1263                                                abi_ulong target_tz_addr)
1264 {
1265     struct target_timezone *target_tz;
1266 
1267     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1268         return -TARGET_EFAULT;
1269     }
1270 
1271     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1272     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1273 
1274     unlock_user_struct(target_tz, target_tz_addr, 0);
1275 
1276     return 0;
1277 }
1278 #endif
1279 
1280 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1281 #include <mqueue.h>
1282 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1283 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1284                                               abi_ulong target_mq_attr_addr)
1285 {
1286     struct target_mq_attr *target_mq_attr;
1287 
1288     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1289                           target_mq_attr_addr, 1))
1290         return -TARGET_EFAULT;
1291 
1292     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1293     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1294     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1295     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1296 
1297     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1298 
1299     return 0;
1300 }
1301 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1302 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1303                                             const struct mq_attr *attr)
1304 {
1305     struct target_mq_attr *target_mq_attr;
1306 
1307     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1308                           target_mq_attr_addr, 0))
1309         return -TARGET_EFAULT;
1310 
1311     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1312     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1313     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1314     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1315 
1316     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1317 
1318     return 0;
1319 }
1320 #endif
1321 
1322 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1323 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1324 static abi_long do_select(int n,
1325                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1326                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1327 {
1328     fd_set rfds, wfds, efds;
1329     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1330     struct timeval tv;
1331     struct timespec ts, *ts_ptr;
1332     abi_long ret;
1333 
1334     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1335     if (ret) {
1336         return ret;
1337     }
1338     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346 
1347     if (target_tv_addr) {
1348         if (copy_from_user_timeval(&tv, target_tv_addr))
1349             return -TARGET_EFAULT;
1350         ts.tv_sec = tv.tv_sec;
1351         ts.tv_nsec = tv.tv_usec * 1000;
1352         ts_ptr = &ts;
1353     } else {
1354         ts_ptr = NULL;
1355     }
1356 
1357     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1358                                   ts_ptr, NULL));
1359 
1360     if (!is_error(ret)) {
1361         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1362             return -TARGET_EFAULT;
1363         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1364             return -TARGET_EFAULT;
1365         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1366             return -TARGET_EFAULT;
1367 
1368         if (target_tv_addr) {
1369             tv.tv_sec = ts.tv_sec;
1370             tv.tv_usec = ts.tv_nsec / 1000;
1371             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1372                 return -TARGET_EFAULT;
1373             }
1374         }
1375     }
1376 
1377     return ret;
1378 }
1379 
1380 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1381 static abi_long do_old_select(abi_ulong arg1)
1382 {
1383     struct target_sel_arg_struct *sel;
1384     abi_ulong inp, outp, exp, tvp;
1385     long nsel;
1386 
1387     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1388         return -TARGET_EFAULT;
1389     }
1390 
1391     nsel = tswapal(sel->n);
1392     inp = tswapal(sel->inp);
1393     outp = tswapal(sel->outp);
1394     exp = tswapal(sel->exp);
1395     tvp = tswapal(sel->tvp);
1396 
1397     unlock_user_struct(sel, arg1, 0);
1398 
1399     return do_select(nsel, inp, outp, exp, tvp);
1400 }
1401 #endif
1402 #endif
1403 
1404 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1405 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1406                             abi_long arg4, abi_long arg5, abi_long arg6,
1407                             bool time64)
1408 {
1409     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1410     fd_set rfds, wfds, efds;
1411     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1412     struct timespec ts, *ts_ptr;
1413     abi_long ret;
1414 
1415     /*
1416      * The 6th arg is actually two args smashed together,
1417      * so we cannot use the C library.
1418      */
1419     struct {
1420         sigset_t *set;
1421         size_t size;
1422     } sig, *sig_ptr;
1423 
1424     abi_ulong arg_sigset, arg_sigsize, *arg7;
1425 
1426     n = arg1;
1427     rfd_addr = arg2;
1428     wfd_addr = arg3;
1429     efd_addr = arg4;
1430     ts_addr = arg5;
1431 
1432     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1433     if (ret) {
1434         return ret;
1435     }
1436     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444 
1445     /*
1446      * This takes a timespec, and not a timeval, so we cannot
1447      * use the do_select() helper ...
1448      */
1449     if (ts_addr) {
1450         if (time64) {
1451             if (target_to_host_timespec64(&ts, ts_addr)) {
1452                 return -TARGET_EFAULT;
1453             }
1454         } else {
1455             if (target_to_host_timespec(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         }
1459             ts_ptr = &ts;
1460     } else {
1461         ts_ptr = NULL;
1462     }
1463 
1464     /* Extract the two packed args for the sigset */
1465     sig_ptr = NULL;
1466     if (arg6) {
1467         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1468         if (!arg7) {
1469             return -TARGET_EFAULT;
1470         }
1471         arg_sigset = tswapal(arg7[0]);
1472         arg_sigsize = tswapal(arg7[1]);
1473         unlock_user(arg7, arg6, 0);
1474 
1475         if (arg_sigset) {
1476             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1477             if (ret != 0) {
1478                 return ret;
1479             }
1480             sig_ptr = &sig;
1481             sig.size = SIGSET_T_SIZE;
1482         }
1483     }
1484 
1485     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486                                   ts_ptr, sig_ptr));
1487 
1488     if (sig_ptr) {
1489         finish_sigsuspend_mask(ret);
1490     }
1491 
1492     if (!is_error(ret)) {
1493         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1494             return -TARGET_EFAULT;
1495         }
1496         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1497             return -TARGET_EFAULT;
1498         }
1499         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1500             return -TARGET_EFAULT;
1501         }
1502         if (time64) {
1503             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1504                 return -TARGET_EFAULT;
1505             }
1506         } else {
1507             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         }
1511     }
1512     return ret;
1513 }
1514 #endif
1515 
1516 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1517     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1518 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1519                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1520 {
1521     struct target_pollfd *target_pfd;
1522     unsigned int nfds = arg2;
1523     struct pollfd *pfd;
1524     unsigned int i;
1525     abi_long ret;
1526 
1527     pfd = NULL;
1528     target_pfd = NULL;
1529     if (nfds) {
1530         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1531             return -TARGET_EINVAL;
1532         }
1533         target_pfd = lock_user(VERIFY_WRITE, arg1,
1534                                sizeof(struct target_pollfd) * nfds, 1);
1535         if (!target_pfd) {
1536             return -TARGET_EFAULT;
1537         }
1538 
1539         pfd = alloca(sizeof(struct pollfd) * nfds);
1540         for (i = 0; i < nfds; i++) {
1541             pfd[i].fd = tswap32(target_pfd[i].fd);
1542             pfd[i].events = tswap16(target_pfd[i].events);
1543         }
1544     }
1545     if (ppoll) {
1546         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1547         sigset_t *set = NULL;
1548 
1549         if (arg3) {
1550             if (time64) {
1551                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1552                     unlock_user(target_pfd, arg1, 0);
1553                     return -TARGET_EFAULT;
1554                 }
1555             } else {
1556                 if (target_to_host_timespec(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             }
1561         } else {
1562             timeout_ts = NULL;
1563         }
1564 
1565         if (arg4) {
1566             ret = process_sigsuspend_mask(&set, arg4, arg5);
1567             if (ret != 0) {
1568                 unlock_user(target_pfd, arg1, 0);
1569                 return ret;
1570             }
1571         }
1572 
1573         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1574                                    set, SIGSET_T_SIZE));
1575 
1576         if (set) {
1577             finish_sigsuspend_mask(ret);
1578         }
1579         if (!is_error(ret) && arg3) {
1580             if (time64) {
1581                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1582                     return -TARGET_EFAULT;
1583                 }
1584             } else {
1585                 if (host_to_target_timespec(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             }
1589         }
1590     } else {
1591           struct timespec ts, *pts;
1592 
1593           if (arg3 >= 0) {
1594               /* Convert ms to secs, ns */
1595               ts.tv_sec = arg3 / 1000;
1596               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1597               pts = &ts;
1598           } else {
1599               /* -ve poll() timeout means "infinite" */
1600               pts = NULL;
1601           }
1602           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1603     }
1604 
1605     if (!is_error(ret)) {
1606         for (i = 0; i < nfds; i++) {
1607             target_pfd[i].revents = tswap16(pfd[i].revents);
1608         }
1609     }
1610     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1611     return ret;
1612 }
1613 #endif
1614 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1615 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1616                         int flags, int is_pipe2)
1617 {
1618     int host_pipe[2];
1619     abi_long ret;
1620     ret = pipe2(host_pipe, flags);
1621 
1622     if (is_error(ret))
1623         return get_errno(ret);
1624 
1625     /* Several targets have special calling conventions for the original
1626        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1627     if (!is_pipe2) {
1628 #if defined(TARGET_ALPHA)
1629         cpu_env->ir[IR_A4] = host_pipe[1];
1630         return host_pipe[0];
1631 #elif defined(TARGET_MIPS)
1632         cpu_env->active_tc.gpr[3] = host_pipe[1];
1633         return host_pipe[0];
1634 #elif defined(TARGET_SH4)
1635         cpu_env->gregs[1] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_SPARC)
1638         cpu_env->regwptr[1] = host_pipe[1];
1639         return host_pipe[0];
1640 #endif
1641     }
1642 
1643     if (put_user_s32(host_pipe[0], pipedes)
1644         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1645         return -TARGET_EFAULT;
1646     return get_errno(ret);
1647 }
1648 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1649 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1650                                                abi_ulong target_addr,
1651                                                socklen_t len)
1652 {
1653     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1654     sa_family_t sa_family;
1655     struct target_sockaddr *target_saddr;
1656 
1657     if (fd_trans_target_to_host_addr(fd)) {
1658         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1659     }
1660 
1661     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1662     if (!target_saddr)
1663         return -TARGET_EFAULT;
1664 
1665     sa_family = tswap16(target_saddr->sa_family);
1666 
1667     /* Oops. The caller might send a incomplete sun_path; sun_path
1668      * must be terminated by \0 (see the manual page), but
1669      * unfortunately it is quite common to specify sockaddr_un
1670      * length as "strlen(x->sun_path)" while it should be
1671      * "strlen(...) + 1". We'll fix that here if needed.
1672      * Linux kernel has a similar feature.
1673      */
1674 
1675     if (sa_family == AF_UNIX) {
1676         if (len < unix_maxlen && len > 0) {
1677             char *cp = (char*)target_saddr;
1678 
1679             if ( cp[len-1] && !cp[len] )
1680                 len++;
1681         }
1682         if (len > unix_maxlen)
1683             len = unix_maxlen;
1684     }
1685 
1686     memcpy(addr, target_saddr, len);
1687     addr->sa_family = sa_family;
1688     if (sa_family == AF_NETLINK) {
1689         struct sockaddr_nl *nladdr;
1690 
1691         nladdr = (struct sockaddr_nl *)addr;
1692         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1693         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1694     } else if (sa_family == AF_PACKET) {
1695 	struct target_sockaddr_ll *lladdr;
1696 
1697 	lladdr = (struct target_sockaddr_ll *)addr;
1698 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1699 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1700     } else if (sa_family == AF_INET6) {
1701         struct sockaddr_in6 *in6addr;
1702 
1703         in6addr = (struct sockaddr_in6 *)addr;
1704         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1705     }
1706     unlock_user(target_saddr, target_addr, 0);
1707 
1708     return 0;
1709 }
1710 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1711 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1712                                                struct sockaddr *addr,
1713                                                socklen_t len)
1714 {
1715     struct target_sockaddr *target_saddr;
1716 
1717     if (len == 0) {
1718         return 0;
1719     }
1720     assert(addr);
1721 
1722     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1723     if (!target_saddr)
1724         return -TARGET_EFAULT;
1725     memcpy(target_saddr, addr, len);
1726     if (len >= offsetof(struct target_sockaddr, sa_family) +
1727         sizeof(target_saddr->sa_family)) {
1728         target_saddr->sa_family = tswap16(addr->sa_family);
1729     }
1730     if (addr->sa_family == AF_NETLINK &&
1731         len >= sizeof(struct target_sockaddr_nl)) {
1732         struct target_sockaddr_nl *target_nl =
1733                (struct target_sockaddr_nl *)target_saddr;
1734         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1735         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1736     } else if (addr->sa_family == AF_PACKET) {
1737         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1738         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1739         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1740     } else if (addr->sa_family == AF_INET6 &&
1741                len >= sizeof(struct target_sockaddr_in6)) {
1742         struct target_sockaddr_in6 *target_in6 =
1743                (struct target_sockaddr_in6 *)target_saddr;
1744         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1745     }
1746     unlock_user(target_saddr, target_addr, len);
1747 
1748     return 0;
1749 }
1750 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1751 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1752                                            struct target_msghdr *target_msgh)
1753 {
1754     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1755     abi_long msg_controllen;
1756     abi_ulong target_cmsg_addr;
1757     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1758     socklen_t space = 0;
1759 
1760     msg_controllen = tswapal(target_msgh->msg_controllen);
1761     if (msg_controllen < sizeof (struct target_cmsghdr))
1762         goto the_end;
1763     target_cmsg_addr = tswapal(target_msgh->msg_control);
1764     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1765     target_cmsg_start = target_cmsg;
1766     if (!target_cmsg)
1767         return -TARGET_EFAULT;
1768 
1769     while (cmsg && target_cmsg) {
1770         void *data = CMSG_DATA(cmsg);
1771         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1772 
1773         int len = tswapal(target_cmsg->cmsg_len)
1774             - sizeof(struct target_cmsghdr);
1775 
1776         space += CMSG_SPACE(len);
1777         if (space > msgh->msg_controllen) {
1778             space -= CMSG_SPACE(len);
1779             /* This is a QEMU bug, since we allocated the payload
1780              * area ourselves (unlike overflow in host-to-target
1781              * conversion, which is just the guest giving us a buffer
1782              * that's too small). It can't happen for the payload types
1783              * we currently support; if it becomes an issue in future
1784              * we would need to improve our allocation strategy to
1785              * something more intelligent than "twice the size of the
1786              * target buffer we're reading from".
1787              */
1788             qemu_log_mask(LOG_UNIMP,
1789                           ("Unsupported ancillary data %d/%d: "
1790                            "unhandled msg size\n"),
1791                           tswap32(target_cmsg->cmsg_level),
1792                           tswap32(target_cmsg->cmsg_type));
1793             break;
1794         }
1795 
1796         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1797             cmsg->cmsg_level = SOL_SOCKET;
1798         } else {
1799             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1800         }
1801         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1802         cmsg->cmsg_len = CMSG_LEN(len);
1803 
1804         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1805             int *fd = (int *)data;
1806             int *target_fd = (int *)target_data;
1807             int i, numfds = len / sizeof(int);
1808 
1809             for (i = 0; i < numfds; i++) {
1810                 __get_user(fd[i], target_fd + i);
1811             }
1812         } else if (cmsg->cmsg_level == SOL_SOCKET
1813                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1814             struct ucred *cred = (struct ucred *)data;
1815             struct target_ucred *target_cred =
1816                 (struct target_ucred *)target_data;
1817 
1818             __get_user(cred->pid, &target_cred->pid);
1819             __get_user(cred->uid, &target_cred->uid);
1820             __get_user(cred->gid, &target_cred->gid);
1821         } else if (cmsg->cmsg_level == SOL_ALG) {
1822             uint32_t *dst = (uint32_t *)data;
1823 
1824             memcpy(dst, target_data, len);
1825             /* fix endianness of first 32-bit word */
1826             if (len >= sizeof(uint32_t)) {
1827                 *dst = tswap32(*dst);
1828             }
1829         } else {
1830             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1831                           cmsg->cmsg_level, cmsg->cmsg_type);
1832             memcpy(data, target_data, len);
1833         }
1834 
1835         cmsg = CMSG_NXTHDR(msgh, cmsg);
1836         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1837                                          target_cmsg_start);
1838     }
1839     unlock_user(target_cmsg, target_cmsg_addr, 0);
1840  the_end:
1841     msgh->msg_controllen = space;
1842     return 0;
1843 }
1844 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1845 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1846                                            struct msghdr *msgh)
1847 {
1848     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1849     abi_long msg_controllen;
1850     abi_ulong target_cmsg_addr;
1851     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1852     socklen_t space = 0;
1853 
1854     msg_controllen = tswapal(target_msgh->msg_controllen);
1855     if (msg_controllen < sizeof (struct target_cmsghdr))
1856         goto the_end;
1857     target_cmsg_addr = tswapal(target_msgh->msg_control);
1858     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1859     target_cmsg_start = target_cmsg;
1860     if (!target_cmsg)
1861         return -TARGET_EFAULT;
1862 
1863     while (cmsg && target_cmsg) {
1864         void *data = CMSG_DATA(cmsg);
1865         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1866 
1867         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1868         int tgt_len, tgt_space;
1869 
1870         /* We never copy a half-header but may copy half-data;
1871          * this is Linux's behaviour in put_cmsg(). Note that
1872          * truncation here is a guest problem (which we report
1873          * to the guest via the CTRUNC bit), unlike truncation
1874          * in target_to_host_cmsg, which is a QEMU bug.
1875          */
1876         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1877             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1878             break;
1879         }
1880 
1881         if (cmsg->cmsg_level == SOL_SOCKET) {
1882             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1883         } else {
1884             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1885         }
1886         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1887 
1888         /* Payload types which need a different size of payload on
1889          * the target must adjust tgt_len here.
1890          */
1891         tgt_len = len;
1892         switch (cmsg->cmsg_level) {
1893         case SOL_SOCKET:
1894             switch (cmsg->cmsg_type) {
1895             case SO_TIMESTAMP:
1896                 tgt_len = sizeof(struct target_timeval);
1897                 break;
1898             default:
1899                 break;
1900             }
1901             break;
1902         default:
1903             break;
1904         }
1905 
1906         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1907             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1908             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1909         }
1910 
1911         /* We must now copy-and-convert len bytes of payload
1912          * into tgt_len bytes of destination space. Bear in mind
1913          * that in both source and destination we may be dealing
1914          * with a truncated value!
1915          */
1916         switch (cmsg->cmsg_level) {
1917         case SOL_SOCKET:
1918             switch (cmsg->cmsg_type) {
1919             case SCM_RIGHTS:
1920             {
1921                 int *fd = (int *)data;
1922                 int *target_fd = (int *)target_data;
1923                 int i, numfds = tgt_len / sizeof(int);
1924 
1925                 for (i = 0; i < numfds; i++) {
1926                     __put_user(fd[i], target_fd + i);
1927                 }
1928                 break;
1929             }
1930             case SO_TIMESTAMP:
1931             {
1932                 struct timeval *tv = (struct timeval *)data;
1933                 struct target_timeval *target_tv =
1934                     (struct target_timeval *)target_data;
1935 
1936                 if (len != sizeof(struct timeval) ||
1937                     tgt_len != sizeof(struct target_timeval)) {
1938                     goto unimplemented;
1939                 }
1940 
1941                 /* copy struct timeval to target */
1942                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1943                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1944                 break;
1945             }
1946             case SCM_CREDENTIALS:
1947             {
1948                 struct ucred *cred = (struct ucred *)data;
1949                 struct target_ucred *target_cred =
1950                     (struct target_ucred *)target_data;
1951 
1952                 __put_user(cred->pid, &target_cred->pid);
1953                 __put_user(cred->uid, &target_cred->uid);
1954                 __put_user(cred->gid, &target_cred->gid);
1955                 break;
1956             }
1957             default:
1958                 goto unimplemented;
1959             }
1960             break;
1961 
1962         case SOL_IP:
1963             switch (cmsg->cmsg_type) {
1964             case IP_TTL:
1965             {
1966                 uint32_t *v = (uint32_t *)data;
1967                 uint32_t *t_int = (uint32_t *)target_data;
1968 
1969                 if (len != sizeof(uint32_t) ||
1970                     tgt_len != sizeof(uint32_t)) {
1971                     goto unimplemented;
1972                 }
1973                 __put_user(*v, t_int);
1974                 break;
1975             }
1976             case IP_RECVERR:
1977             {
1978                 struct errhdr_t {
1979                    struct sock_extended_err ee;
1980                    struct sockaddr_in offender;
1981                 };
1982                 struct errhdr_t *errh = (struct errhdr_t *)data;
1983                 struct errhdr_t *target_errh =
1984                     (struct errhdr_t *)target_data;
1985 
1986                 if (len != sizeof(struct errhdr_t) ||
1987                     tgt_len != sizeof(struct errhdr_t)) {
1988                     goto unimplemented;
1989                 }
1990                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1991                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1992                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1993                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1994                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1995                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1996                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1997                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1998                     (void *) &errh->offender, sizeof(errh->offender));
1999                 break;
2000             }
2001             default:
2002                 goto unimplemented;
2003             }
2004             break;
2005 
2006         case SOL_IPV6:
2007             switch (cmsg->cmsg_type) {
2008             case IPV6_HOPLIMIT:
2009             {
2010                 uint32_t *v = (uint32_t *)data;
2011                 uint32_t *t_int = (uint32_t *)target_data;
2012 
2013                 if (len != sizeof(uint32_t) ||
2014                     tgt_len != sizeof(uint32_t)) {
2015                     goto unimplemented;
2016                 }
2017                 __put_user(*v, t_int);
2018                 break;
2019             }
2020             case IPV6_RECVERR:
2021             {
2022                 struct errhdr6_t {
2023                    struct sock_extended_err ee;
2024                    struct sockaddr_in6 offender;
2025                 };
2026                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2027                 struct errhdr6_t *target_errh =
2028                     (struct errhdr6_t *)target_data;
2029 
2030                 if (len != sizeof(struct errhdr6_t) ||
2031                     tgt_len != sizeof(struct errhdr6_t)) {
2032                     goto unimplemented;
2033                 }
2034                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2035                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2036                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2037                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2038                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2039                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2040                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2041                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2042                     (void *) &errh->offender, sizeof(errh->offender));
2043                 break;
2044             }
2045             default:
2046                 goto unimplemented;
2047             }
2048             break;
2049 
2050         default:
2051         unimplemented:
2052             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2053                           cmsg->cmsg_level, cmsg->cmsg_type);
2054             memcpy(target_data, data, MIN(len, tgt_len));
2055             if (tgt_len > len) {
2056                 memset(target_data + len, 0, tgt_len - len);
2057             }
2058         }
2059 
2060         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2061         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2062         if (msg_controllen < tgt_space) {
2063             tgt_space = msg_controllen;
2064         }
2065         msg_controllen -= tgt_space;
2066         space += tgt_space;
2067         cmsg = CMSG_NXTHDR(msgh, cmsg);
2068         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2069                                          target_cmsg_start);
2070     }
2071     unlock_user(target_cmsg, target_cmsg_addr, space);
2072  the_end:
2073     target_msgh->msg_controllen = tswapal(space);
2074     return 0;
2075 }
2076 
2077 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2078 static abi_long do_setsockopt(int sockfd, int level, int optname,
2079                               abi_ulong optval_addr, socklen_t optlen)
2080 {
2081     abi_long ret;
2082     int val;
2083 
2084     switch(level) {
2085     case SOL_TCP:
2086     case SOL_UDP:
2087         /* TCP and UDP options all take an 'int' value.  */
2088         if (optlen < sizeof(uint32_t))
2089             return -TARGET_EINVAL;
2090 
2091         if (get_user_u32(val, optval_addr))
2092             return -TARGET_EFAULT;
2093         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2094         break;
2095     case SOL_IP:
2096         switch(optname) {
2097         case IP_TOS:
2098         case IP_TTL:
2099         case IP_HDRINCL:
2100         case IP_ROUTER_ALERT:
2101         case IP_RECVOPTS:
2102         case IP_RETOPTS:
2103         case IP_PKTINFO:
2104         case IP_MTU_DISCOVER:
2105         case IP_RECVERR:
2106         case IP_RECVTTL:
2107         case IP_RECVTOS:
2108 #ifdef IP_FREEBIND
2109         case IP_FREEBIND:
2110 #endif
2111         case IP_MULTICAST_TTL:
2112         case IP_MULTICAST_LOOP:
2113             val = 0;
2114             if (optlen >= sizeof(uint32_t)) {
2115                 if (get_user_u32(val, optval_addr))
2116                     return -TARGET_EFAULT;
2117             } else if (optlen >= 1) {
2118                 if (get_user_u8(val, optval_addr))
2119                     return -TARGET_EFAULT;
2120             }
2121             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2122             break;
2123         case IP_ADD_MEMBERSHIP:
2124         case IP_DROP_MEMBERSHIP:
2125         {
2126             struct ip_mreqn ip_mreq;
2127             struct target_ip_mreqn *target_smreqn;
2128 
2129             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2130                               sizeof(struct target_ip_mreq));
2131 
2132             if (optlen < sizeof (struct target_ip_mreq) ||
2133                 optlen > sizeof (struct target_ip_mreqn)) {
2134                 return -TARGET_EINVAL;
2135             }
2136 
2137             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2138             if (!target_smreqn) {
2139                 return -TARGET_EFAULT;
2140             }
2141             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2142             ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2143             if (optlen == sizeof(struct target_ip_mreqn)) {
2144                 ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2145                 optlen = sizeof(struct ip_mreqn);
2146             }
2147             unlock_user(target_smreqn, optval_addr, 0);
2148 
2149             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2150             break;
2151         }
2152         case IP_BLOCK_SOURCE:
2153         case IP_UNBLOCK_SOURCE:
2154         case IP_ADD_SOURCE_MEMBERSHIP:
2155         case IP_DROP_SOURCE_MEMBERSHIP:
2156         {
2157             struct ip_mreq_source *ip_mreq_source;
2158 
2159             if (optlen != sizeof (struct target_ip_mreq_source))
2160                 return -TARGET_EINVAL;
2161 
2162             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2163             if (!ip_mreq_source) {
2164                 return -TARGET_EFAULT;
2165             }
2166             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2167             unlock_user (ip_mreq_source, optval_addr, 0);
2168             break;
2169         }
2170         default:
2171             goto unimplemented;
2172         }
2173         break;
2174     case SOL_IPV6:
2175         switch (optname) {
2176         case IPV6_MTU_DISCOVER:
2177         case IPV6_MTU:
2178         case IPV6_V6ONLY:
2179         case IPV6_RECVPKTINFO:
2180         case IPV6_UNICAST_HOPS:
2181         case IPV6_MULTICAST_HOPS:
2182         case IPV6_MULTICAST_LOOP:
2183         case IPV6_RECVERR:
2184         case IPV6_RECVHOPLIMIT:
2185         case IPV6_2292HOPLIMIT:
2186         case IPV6_CHECKSUM:
2187         case IPV6_ADDRFORM:
2188         case IPV6_2292PKTINFO:
2189         case IPV6_RECVTCLASS:
2190         case IPV6_RECVRTHDR:
2191         case IPV6_2292RTHDR:
2192         case IPV6_RECVHOPOPTS:
2193         case IPV6_2292HOPOPTS:
2194         case IPV6_RECVDSTOPTS:
2195         case IPV6_2292DSTOPTS:
2196         case IPV6_TCLASS:
2197         case IPV6_ADDR_PREFERENCES:
2198 #ifdef IPV6_RECVPATHMTU
2199         case IPV6_RECVPATHMTU:
2200 #endif
2201 #ifdef IPV6_TRANSPARENT
2202         case IPV6_TRANSPARENT:
2203 #endif
2204 #ifdef IPV6_FREEBIND
2205         case IPV6_FREEBIND:
2206 #endif
2207 #ifdef IPV6_RECVORIGDSTADDR
2208         case IPV6_RECVORIGDSTADDR:
2209 #endif
2210             val = 0;
2211             if (optlen < sizeof(uint32_t)) {
2212                 return -TARGET_EINVAL;
2213             }
2214             if (get_user_u32(val, optval_addr)) {
2215                 return -TARGET_EFAULT;
2216             }
2217             ret = get_errno(setsockopt(sockfd, level, optname,
2218                                        &val, sizeof(val)));
2219             break;
2220         case IPV6_PKTINFO:
2221         {
2222             struct in6_pktinfo pki;
2223 
2224             if (optlen < sizeof(pki)) {
2225                 return -TARGET_EINVAL;
2226             }
2227 
2228             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2229                 return -TARGET_EFAULT;
2230             }
2231 
2232             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2233 
2234             ret = get_errno(setsockopt(sockfd, level, optname,
2235                                        &pki, sizeof(pki)));
2236             break;
2237         }
2238         case IPV6_ADD_MEMBERSHIP:
2239         case IPV6_DROP_MEMBERSHIP:
2240         {
2241             struct ipv6_mreq ipv6mreq;
2242 
2243             if (optlen < sizeof(ipv6mreq)) {
2244                 return -TARGET_EINVAL;
2245             }
2246 
2247             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2248                 return -TARGET_EFAULT;
2249             }
2250 
2251             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2252 
2253             ret = get_errno(setsockopt(sockfd, level, optname,
2254                                        &ipv6mreq, sizeof(ipv6mreq)));
2255             break;
2256         }
2257         default:
2258             goto unimplemented;
2259         }
2260         break;
2261     case SOL_ICMPV6:
2262         switch (optname) {
2263         case ICMPV6_FILTER:
2264         {
2265             struct icmp6_filter icmp6f;
2266 
2267             if (optlen > sizeof(icmp6f)) {
2268                 optlen = sizeof(icmp6f);
2269             }
2270 
2271             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2272                 return -TARGET_EFAULT;
2273             }
2274 
2275             for (val = 0; val < 8; val++) {
2276                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2277             }
2278 
2279             ret = get_errno(setsockopt(sockfd, level, optname,
2280                                        &icmp6f, optlen));
2281             break;
2282         }
2283         default:
2284             goto unimplemented;
2285         }
2286         break;
2287     case SOL_RAW:
2288         switch (optname) {
2289         case ICMP_FILTER:
2290         case IPV6_CHECKSUM:
2291             /* those take an u32 value */
2292             if (optlen < sizeof(uint32_t)) {
2293                 return -TARGET_EINVAL;
2294             }
2295 
2296             if (get_user_u32(val, optval_addr)) {
2297                 return -TARGET_EFAULT;
2298             }
2299             ret = get_errno(setsockopt(sockfd, level, optname,
2300                                        &val, sizeof(val)));
2301             break;
2302 
2303         default:
2304             goto unimplemented;
2305         }
2306         break;
2307 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2308     case SOL_ALG:
2309         switch (optname) {
2310         case ALG_SET_KEY:
2311         {
2312             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2313             if (!alg_key) {
2314                 return -TARGET_EFAULT;
2315             }
2316             ret = get_errno(setsockopt(sockfd, level, optname,
2317                                        alg_key, optlen));
2318             unlock_user(alg_key, optval_addr, optlen);
2319             break;
2320         }
2321         case ALG_SET_AEAD_AUTHSIZE:
2322         {
2323             ret = get_errno(setsockopt(sockfd, level, optname,
2324                                        NULL, optlen));
2325             break;
2326         }
2327         default:
2328             goto unimplemented;
2329         }
2330         break;
2331 #endif
2332     case TARGET_SOL_SOCKET:
2333         switch (optname) {
2334         case TARGET_SO_RCVTIMEO:
2335         case TARGET_SO_SNDTIMEO:
2336         {
2337                 struct timeval tv;
2338 
2339                 if (optlen != sizeof(struct target_timeval)) {
2340                     return -TARGET_EINVAL;
2341                 }
2342 
2343                 if (copy_from_user_timeval(&tv, optval_addr)) {
2344                     return -TARGET_EFAULT;
2345                 }
2346 
2347                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2348                                 optname == TARGET_SO_RCVTIMEO ?
2349                                     SO_RCVTIMEO : SO_SNDTIMEO,
2350                                 &tv, sizeof(tv)));
2351                 return ret;
2352         }
2353         case TARGET_SO_ATTACH_FILTER:
2354         {
2355                 struct target_sock_fprog *tfprog;
2356                 struct target_sock_filter *tfilter;
2357                 struct sock_fprog fprog;
2358                 struct sock_filter *filter;
2359                 int i;
2360 
2361                 if (optlen != sizeof(*tfprog)) {
2362                     return -TARGET_EINVAL;
2363                 }
2364                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2365                     return -TARGET_EFAULT;
2366                 }
2367                 if (!lock_user_struct(VERIFY_READ, tfilter,
2368                                       tswapal(tfprog->filter), 0)) {
2369                     unlock_user_struct(tfprog, optval_addr, 1);
2370                     return -TARGET_EFAULT;
2371                 }
2372 
2373                 fprog.len = tswap16(tfprog->len);
2374                 filter = g_try_new(struct sock_filter, fprog.len);
2375                 if (filter == NULL) {
2376                     unlock_user_struct(tfilter, tfprog->filter, 1);
2377                     unlock_user_struct(tfprog, optval_addr, 1);
2378                     return -TARGET_ENOMEM;
2379                 }
2380                 for (i = 0; i < fprog.len; i++) {
2381                     filter[i].code = tswap16(tfilter[i].code);
2382                     filter[i].jt = tfilter[i].jt;
2383                     filter[i].jf = tfilter[i].jf;
2384                     filter[i].k = tswap32(tfilter[i].k);
2385                 }
2386                 fprog.filter = filter;
2387 
2388                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2389                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2390                 g_free(filter);
2391 
2392                 unlock_user_struct(tfilter, tfprog->filter, 1);
2393                 unlock_user_struct(tfprog, optval_addr, 1);
2394                 return ret;
2395         }
2396 	case TARGET_SO_BINDTODEVICE:
2397 	{
2398 		char *dev_ifname, *addr_ifname;
2399 
2400 		if (optlen > IFNAMSIZ - 1) {
2401 		    optlen = IFNAMSIZ - 1;
2402 		}
2403 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2404 		if (!dev_ifname) {
2405 		    return -TARGET_EFAULT;
2406 		}
2407 		optname = SO_BINDTODEVICE;
2408 		addr_ifname = alloca(IFNAMSIZ);
2409 		memcpy(addr_ifname, dev_ifname, optlen);
2410 		addr_ifname[optlen] = 0;
2411 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2412                                            addr_ifname, optlen));
2413 		unlock_user (dev_ifname, optval_addr, 0);
2414 		return ret;
2415 	}
2416         case TARGET_SO_LINGER:
2417         {
2418                 struct linger lg;
2419                 struct target_linger *tlg;
2420 
2421                 if (optlen != sizeof(struct target_linger)) {
2422                     return -TARGET_EINVAL;
2423                 }
2424                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2425                     return -TARGET_EFAULT;
2426                 }
2427                 __get_user(lg.l_onoff, &tlg->l_onoff);
2428                 __get_user(lg.l_linger, &tlg->l_linger);
2429                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2430                                 &lg, sizeof(lg)));
2431                 unlock_user_struct(tlg, optval_addr, 0);
2432                 return ret;
2433         }
2434             /* Options with 'int' argument.  */
2435         case TARGET_SO_DEBUG:
2436 		optname = SO_DEBUG;
2437 		break;
2438         case TARGET_SO_REUSEADDR:
2439 		optname = SO_REUSEADDR;
2440 		break;
2441 #ifdef SO_REUSEPORT
2442         case TARGET_SO_REUSEPORT:
2443                 optname = SO_REUSEPORT;
2444                 break;
2445 #endif
2446         case TARGET_SO_TYPE:
2447 		optname = SO_TYPE;
2448 		break;
2449         case TARGET_SO_ERROR:
2450 		optname = SO_ERROR;
2451 		break;
2452         case TARGET_SO_DONTROUTE:
2453 		optname = SO_DONTROUTE;
2454 		break;
2455         case TARGET_SO_BROADCAST:
2456 		optname = SO_BROADCAST;
2457 		break;
2458         case TARGET_SO_SNDBUF:
2459 		optname = SO_SNDBUF;
2460 		break;
2461         case TARGET_SO_SNDBUFFORCE:
2462                 optname = SO_SNDBUFFORCE;
2463                 break;
2464         case TARGET_SO_RCVBUF:
2465 		optname = SO_RCVBUF;
2466 		break;
2467         case TARGET_SO_RCVBUFFORCE:
2468                 optname = SO_RCVBUFFORCE;
2469                 break;
2470         case TARGET_SO_KEEPALIVE:
2471 		optname = SO_KEEPALIVE;
2472 		break;
2473         case TARGET_SO_OOBINLINE:
2474 		optname = SO_OOBINLINE;
2475 		break;
2476         case TARGET_SO_NO_CHECK:
2477 		optname = SO_NO_CHECK;
2478 		break;
2479         case TARGET_SO_PRIORITY:
2480 		optname = SO_PRIORITY;
2481 		break;
2482 #ifdef SO_BSDCOMPAT
2483         case TARGET_SO_BSDCOMPAT:
2484 		optname = SO_BSDCOMPAT;
2485 		break;
2486 #endif
2487         case TARGET_SO_PASSCRED:
2488 		optname = SO_PASSCRED;
2489 		break;
2490         case TARGET_SO_PASSSEC:
2491                 optname = SO_PASSSEC;
2492                 break;
2493         case TARGET_SO_TIMESTAMP:
2494 		optname = SO_TIMESTAMP;
2495 		break;
2496         case TARGET_SO_RCVLOWAT:
2497 		optname = SO_RCVLOWAT;
2498 		break;
2499         default:
2500             goto unimplemented;
2501         }
2502 	if (optlen < sizeof(uint32_t))
2503             return -TARGET_EINVAL;
2504 
2505 	if (get_user_u32(val, optval_addr))
2506             return -TARGET_EFAULT;
2507 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2508         break;
2509 #ifdef SOL_NETLINK
2510     case SOL_NETLINK:
2511         switch (optname) {
2512         case NETLINK_PKTINFO:
2513         case NETLINK_ADD_MEMBERSHIP:
2514         case NETLINK_DROP_MEMBERSHIP:
2515         case NETLINK_BROADCAST_ERROR:
2516         case NETLINK_NO_ENOBUFS:
2517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2518         case NETLINK_LISTEN_ALL_NSID:
2519         case NETLINK_CAP_ACK:
2520 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2521 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2522         case NETLINK_EXT_ACK:
2523 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2525         case NETLINK_GET_STRICT_CHK:
2526 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2527             break;
2528         default:
2529             goto unimplemented;
2530         }
2531         val = 0;
2532         if (optlen < sizeof(uint32_t)) {
2533             return -TARGET_EINVAL;
2534         }
2535         if (get_user_u32(val, optval_addr)) {
2536             return -TARGET_EFAULT;
2537         }
2538         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2539                                    sizeof(val)));
2540         break;
2541 #endif /* SOL_NETLINK */
2542     default:
2543     unimplemented:
2544         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2545                       level, optname);
2546         ret = -TARGET_ENOPROTOOPT;
2547     }
2548     return ret;
2549 }
2550 
2551 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2552 static abi_long do_getsockopt(int sockfd, int level, int optname,
2553                               abi_ulong optval_addr, abi_ulong optlen)
2554 {
2555     abi_long ret;
2556     int len, val;
2557     socklen_t lv;
2558 
2559     switch(level) {
2560     case TARGET_SOL_SOCKET:
2561         level = SOL_SOCKET;
2562         switch (optname) {
2563         /* These don't just return a single integer */
2564         case TARGET_SO_PEERNAME:
2565             goto unimplemented;
2566         case TARGET_SO_RCVTIMEO: {
2567             struct timeval tv;
2568             socklen_t tvlen;
2569 
2570             optname = SO_RCVTIMEO;
2571 
2572 get_timeout:
2573             if (get_user_u32(len, optlen)) {
2574                 return -TARGET_EFAULT;
2575             }
2576             if (len < 0) {
2577                 return -TARGET_EINVAL;
2578             }
2579 
2580             tvlen = sizeof(tv);
2581             ret = get_errno(getsockopt(sockfd, level, optname,
2582                                        &tv, &tvlen));
2583             if (ret < 0) {
2584                 return ret;
2585             }
2586             if (len > sizeof(struct target_timeval)) {
2587                 len = sizeof(struct target_timeval);
2588             }
2589             if (copy_to_user_timeval(optval_addr, &tv)) {
2590                 return -TARGET_EFAULT;
2591             }
2592             if (put_user_u32(len, optlen)) {
2593                 return -TARGET_EFAULT;
2594             }
2595             break;
2596         }
2597         case TARGET_SO_SNDTIMEO:
2598             optname = SO_SNDTIMEO;
2599             goto get_timeout;
2600         case TARGET_SO_PEERCRED: {
2601             struct ucred cr;
2602             socklen_t crlen;
2603             struct target_ucred *tcr;
2604 
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611 
2612             crlen = sizeof(cr);
2613             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2614                                        &cr, &crlen));
2615             if (ret < 0) {
2616                 return ret;
2617             }
2618             if (len > crlen) {
2619                 len = crlen;
2620             }
2621             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2622                 return -TARGET_EFAULT;
2623             }
2624             __put_user(cr.pid, &tcr->pid);
2625             __put_user(cr.uid, &tcr->uid);
2626             __put_user(cr.gid, &tcr->gid);
2627             unlock_user_struct(tcr, optval_addr, 1);
2628             if (put_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             break;
2632         }
2633         case TARGET_SO_PEERSEC: {
2634             char *name;
2635 
2636             if (get_user_u32(len, optlen)) {
2637                 return -TARGET_EFAULT;
2638             }
2639             if (len < 0) {
2640                 return -TARGET_EINVAL;
2641             }
2642             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2643             if (!name) {
2644                 return -TARGET_EFAULT;
2645             }
2646             lv = len;
2647             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2648                                        name, &lv));
2649             if (put_user_u32(lv, optlen)) {
2650                 ret = -TARGET_EFAULT;
2651             }
2652             unlock_user(name, optval_addr, lv);
2653             break;
2654         }
2655         case TARGET_SO_LINGER:
2656         {
2657             struct linger lg;
2658             socklen_t lglen;
2659             struct target_linger *tlg;
2660 
2661             if (get_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             if (len < 0) {
2665                 return -TARGET_EINVAL;
2666             }
2667 
2668             lglen = sizeof(lg);
2669             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2670                                        &lg, &lglen));
2671             if (ret < 0) {
2672                 return ret;
2673             }
2674             if (len > lglen) {
2675                 len = lglen;
2676             }
2677             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2678                 return -TARGET_EFAULT;
2679             }
2680             __put_user(lg.l_onoff, &tlg->l_onoff);
2681             __put_user(lg.l_linger, &tlg->l_linger);
2682             unlock_user_struct(tlg, optval_addr, 1);
2683             if (put_user_u32(len, optlen)) {
2684                 return -TARGET_EFAULT;
2685             }
2686             break;
2687         }
2688         /* Options with 'int' argument.  */
2689         case TARGET_SO_DEBUG:
2690             optname = SO_DEBUG;
2691             goto int_case;
2692         case TARGET_SO_REUSEADDR:
2693             optname = SO_REUSEADDR;
2694             goto int_case;
2695 #ifdef SO_REUSEPORT
2696         case TARGET_SO_REUSEPORT:
2697             optname = SO_REUSEPORT;
2698             goto int_case;
2699 #endif
2700         case TARGET_SO_TYPE:
2701             optname = SO_TYPE;
2702             goto int_case;
2703         case TARGET_SO_ERROR:
2704             optname = SO_ERROR;
2705             goto int_case;
2706         case TARGET_SO_DONTROUTE:
2707             optname = SO_DONTROUTE;
2708             goto int_case;
2709         case TARGET_SO_BROADCAST:
2710             optname = SO_BROADCAST;
2711             goto int_case;
2712         case TARGET_SO_SNDBUF:
2713             optname = SO_SNDBUF;
2714             goto int_case;
2715         case TARGET_SO_RCVBUF:
2716             optname = SO_RCVBUF;
2717             goto int_case;
2718         case TARGET_SO_KEEPALIVE:
2719             optname = SO_KEEPALIVE;
2720             goto int_case;
2721         case TARGET_SO_OOBINLINE:
2722             optname = SO_OOBINLINE;
2723             goto int_case;
2724         case TARGET_SO_NO_CHECK:
2725             optname = SO_NO_CHECK;
2726             goto int_case;
2727         case TARGET_SO_PRIORITY:
2728             optname = SO_PRIORITY;
2729             goto int_case;
2730 #ifdef SO_BSDCOMPAT
2731         case TARGET_SO_BSDCOMPAT:
2732             optname = SO_BSDCOMPAT;
2733             goto int_case;
2734 #endif
2735         case TARGET_SO_PASSCRED:
2736             optname = SO_PASSCRED;
2737             goto int_case;
2738         case TARGET_SO_TIMESTAMP:
2739             optname = SO_TIMESTAMP;
2740             goto int_case;
2741         case TARGET_SO_RCVLOWAT:
2742             optname = SO_RCVLOWAT;
2743             goto int_case;
2744         case TARGET_SO_ACCEPTCONN:
2745             optname = SO_ACCEPTCONN;
2746             goto int_case;
2747         case TARGET_SO_PROTOCOL:
2748             optname = SO_PROTOCOL;
2749             goto int_case;
2750         case TARGET_SO_DOMAIN:
2751             optname = SO_DOMAIN;
2752             goto int_case;
2753         default:
2754             goto int_case;
2755         }
2756         break;
2757     case SOL_TCP:
2758     case SOL_UDP:
2759         /* TCP and UDP options all take an 'int' value.  */
2760     int_case:
2761         if (get_user_u32(len, optlen))
2762             return -TARGET_EFAULT;
2763         if (len < 0)
2764             return -TARGET_EINVAL;
2765         lv = sizeof(lv);
2766         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2767         if (ret < 0)
2768             return ret;
2769         switch (optname) {
2770         case SO_TYPE:
2771             val = host_to_target_sock_type(val);
2772             break;
2773         case SO_ERROR:
2774             val = host_to_target_errno(val);
2775             break;
2776         }
2777         if (len > lv)
2778             len = lv;
2779         if (len == 4) {
2780             if (put_user_u32(val, optval_addr))
2781                 return -TARGET_EFAULT;
2782         } else {
2783             if (put_user_u8(val, optval_addr))
2784                 return -TARGET_EFAULT;
2785         }
2786         if (put_user_u32(len, optlen))
2787             return -TARGET_EFAULT;
2788         break;
2789     case SOL_IP:
2790         switch(optname) {
2791         case IP_TOS:
2792         case IP_TTL:
2793         case IP_HDRINCL:
2794         case IP_ROUTER_ALERT:
2795         case IP_RECVOPTS:
2796         case IP_RETOPTS:
2797         case IP_PKTINFO:
2798         case IP_MTU_DISCOVER:
2799         case IP_RECVERR:
2800         case IP_RECVTOS:
2801 #ifdef IP_FREEBIND
2802         case IP_FREEBIND:
2803 #endif
2804         case IP_MULTICAST_TTL:
2805         case IP_MULTICAST_LOOP:
2806             if (get_user_u32(len, optlen))
2807                 return -TARGET_EFAULT;
2808             if (len < 0)
2809                 return -TARGET_EINVAL;
2810             lv = sizeof(lv);
2811             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2812             if (ret < 0)
2813                 return ret;
2814             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2815                 len = 1;
2816                 if (put_user_u32(len, optlen)
2817                     || put_user_u8(val, optval_addr))
2818                     return -TARGET_EFAULT;
2819             } else {
2820                 if (len > sizeof(int))
2821                     len = sizeof(int);
2822                 if (put_user_u32(len, optlen)
2823                     || put_user_u32(val, optval_addr))
2824                     return -TARGET_EFAULT;
2825             }
2826             break;
2827         default:
2828             ret = -TARGET_ENOPROTOOPT;
2829             break;
2830         }
2831         break;
2832     case SOL_IPV6:
2833         switch (optname) {
2834         case IPV6_MTU_DISCOVER:
2835         case IPV6_MTU:
2836         case IPV6_V6ONLY:
2837         case IPV6_RECVPKTINFO:
2838         case IPV6_UNICAST_HOPS:
2839         case IPV6_MULTICAST_HOPS:
2840         case IPV6_MULTICAST_LOOP:
2841         case IPV6_RECVERR:
2842         case IPV6_RECVHOPLIMIT:
2843         case IPV6_2292HOPLIMIT:
2844         case IPV6_CHECKSUM:
2845         case IPV6_ADDRFORM:
2846         case IPV6_2292PKTINFO:
2847         case IPV6_RECVTCLASS:
2848         case IPV6_RECVRTHDR:
2849         case IPV6_2292RTHDR:
2850         case IPV6_RECVHOPOPTS:
2851         case IPV6_2292HOPOPTS:
2852         case IPV6_RECVDSTOPTS:
2853         case IPV6_2292DSTOPTS:
2854         case IPV6_TCLASS:
2855         case IPV6_ADDR_PREFERENCES:
2856 #ifdef IPV6_RECVPATHMTU
2857         case IPV6_RECVPATHMTU:
2858 #endif
2859 #ifdef IPV6_TRANSPARENT
2860         case IPV6_TRANSPARENT:
2861 #endif
2862 #ifdef IPV6_FREEBIND
2863         case IPV6_FREEBIND:
2864 #endif
2865 #ifdef IPV6_RECVORIGDSTADDR
2866         case IPV6_RECVORIGDSTADDR:
2867 #endif
2868             if (get_user_u32(len, optlen))
2869                 return -TARGET_EFAULT;
2870             if (len < 0)
2871                 return -TARGET_EINVAL;
2872             lv = sizeof(lv);
2873             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2874             if (ret < 0)
2875                 return ret;
2876             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2877                 len = 1;
2878                 if (put_user_u32(len, optlen)
2879                     || put_user_u8(val, optval_addr))
2880                     return -TARGET_EFAULT;
2881             } else {
2882                 if (len > sizeof(int))
2883                     len = sizeof(int);
2884                 if (put_user_u32(len, optlen)
2885                     || put_user_u32(val, optval_addr))
2886                     return -TARGET_EFAULT;
2887             }
2888             break;
2889         default:
2890             ret = -TARGET_ENOPROTOOPT;
2891             break;
2892         }
2893         break;
2894 #ifdef SOL_NETLINK
2895     case SOL_NETLINK:
2896         switch (optname) {
2897         case NETLINK_PKTINFO:
2898         case NETLINK_BROADCAST_ERROR:
2899         case NETLINK_NO_ENOBUFS:
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2901         case NETLINK_LISTEN_ALL_NSID:
2902         case NETLINK_CAP_ACK:
2903 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2904 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2905         case NETLINK_EXT_ACK:
2906 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2908         case NETLINK_GET_STRICT_CHK:
2909 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2910             if (get_user_u32(len, optlen)) {
2911                 return -TARGET_EFAULT;
2912             }
2913             if (len != sizeof(val)) {
2914                 return -TARGET_EINVAL;
2915             }
2916             lv = len;
2917             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2918             if (ret < 0) {
2919                 return ret;
2920             }
2921             if (put_user_u32(lv, optlen)
2922                 || put_user_u32(val, optval_addr)) {
2923                 return -TARGET_EFAULT;
2924             }
2925             break;
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2927         case NETLINK_LIST_MEMBERSHIPS:
2928         {
2929             uint32_t *results;
2930             int i;
2931             if (get_user_u32(len, optlen)) {
2932                 return -TARGET_EFAULT;
2933             }
2934             if (len < 0) {
2935                 return -TARGET_EINVAL;
2936             }
2937             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2938             if (!results && len > 0) {
2939                 return -TARGET_EFAULT;
2940             }
2941             lv = len;
2942             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2943             if (ret < 0) {
2944                 unlock_user(results, optval_addr, 0);
2945                 return ret;
2946             }
2947             /* swap host endianness to target endianness. */
2948             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2949                 results[i] = tswap32(results[i]);
2950             }
2951             if (put_user_u32(lv, optlen)) {
2952                 return -TARGET_EFAULT;
2953             }
2954             unlock_user(results, optval_addr, 0);
2955             break;
2956         }
2957 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2958         default:
2959             goto unimplemented;
2960         }
2961         break;
2962 #endif /* SOL_NETLINK */
2963     default:
2964     unimplemented:
2965         qemu_log_mask(LOG_UNIMP,
2966                       "getsockopt level=%d optname=%d not yet supported\n",
2967                       level, optname);
2968         ret = -TARGET_EOPNOTSUPP;
2969         break;
2970     }
2971     return ret;
2972 }
2973 
2974 /* Convert target low/high pair representing file offset into the host
2975  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2976  * as the kernel doesn't handle them either.
2977  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)2978 static void target_to_host_low_high(abi_ulong tlow,
2979                                     abi_ulong thigh,
2980                                     unsigned long *hlow,
2981                                     unsigned long *hhigh)
2982 {
2983     uint64_t off = tlow |
2984         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2985         TARGET_LONG_BITS / 2;
2986 
2987     *hlow = off;
2988     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2989 }
2990 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)2991 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2992                                 abi_ulong count, int copy)
2993 {
2994     struct target_iovec *target_vec;
2995     struct iovec *vec;
2996     abi_ulong total_len, max_len;
2997     int i;
2998     int err = 0;
2999     bool bad_address = false;
3000 
3001     if (count == 0) {
3002         errno = 0;
3003         return NULL;
3004     }
3005     if (count > IOV_MAX) {
3006         errno = EINVAL;
3007         return NULL;
3008     }
3009 
3010     vec = g_try_new0(struct iovec, count);
3011     if (vec == NULL) {
3012         errno = ENOMEM;
3013         return NULL;
3014     }
3015 
3016     target_vec = lock_user(VERIFY_READ, target_addr,
3017                            count * sizeof(struct target_iovec), 1);
3018     if (target_vec == NULL) {
3019         err = EFAULT;
3020         goto fail2;
3021     }
3022 
3023     /* ??? If host page size > target page size, this will result in a
3024        value larger than what we can actually support.  */
3025     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3026     total_len = 0;
3027 
3028     for (i = 0; i < count; i++) {
3029         abi_ulong base = tswapal(target_vec[i].iov_base);
3030         abi_long len = tswapal(target_vec[i].iov_len);
3031 
3032         if (len < 0) {
3033             err = EINVAL;
3034             goto fail;
3035         } else if (len == 0) {
3036             /* Zero length pointer is ignored.  */
3037             vec[i].iov_base = 0;
3038         } else {
3039             vec[i].iov_base = lock_user(type, base, len, copy);
3040             /* If the first buffer pointer is bad, this is a fault.  But
3041              * subsequent bad buffers will result in a partial write; this
3042              * is realized by filling the vector with null pointers and
3043              * zero lengths. */
3044             if (!vec[i].iov_base) {
3045                 if (i == 0) {
3046                     err = EFAULT;
3047                     goto fail;
3048                 } else {
3049                     bad_address = true;
3050                 }
3051             }
3052             if (bad_address) {
3053                 len = 0;
3054             }
3055             if (len > max_len - total_len) {
3056                 len = max_len - total_len;
3057             }
3058         }
3059         vec[i].iov_len = len;
3060         total_len += len;
3061     }
3062 
3063     unlock_user(target_vec, target_addr, 0);
3064     return vec;
3065 
3066  fail:
3067     while (--i >= 0) {
3068         if (tswapal(target_vec[i].iov_len) > 0) {
3069             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3070         }
3071     }
3072     unlock_user(target_vec, target_addr, 0);
3073  fail2:
3074     g_free(vec);
3075     errno = err;
3076     return NULL;
3077 }
3078 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3079 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3080                          abi_ulong count, int copy)
3081 {
3082     struct target_iovec *target_vec;
3083     int i;
3084 
3085     target_vec = lock_user(VERIFY_READ, target_addr,
3086                            count * sizeof(struct target_iovec), 1);
3087     if (target_vec) {
3088         for (i = 0; i < count; i++) {
3089             abi_ulong base = tswapal(target_vec[i].iov_base);
3090             abi_long len = tswapal(target_vec[i].iov_len);
3091             if (len < 0) {
3092                 break;
3093             }
3094             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3095         }
3096         unlock_user(target_vec, target_addr, 0);
3097     }
3098 
3099     g_free(vec);
3100 }
3101 
target_to_host_sock_type(int * type)3102 static inline int target_to_host_sock_type(int *type)
3103 {
3104     int host_type = 0;
3105     int target_type = *type;
3106 
3107     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3108     case TARGET_SOCK_DGRAM:
3109         host_type = SOCK_DGRAM;
3110         break;
3111     case TARGET_SOCK_STREAM:
3112         host_type = SOCK_STREAM;
3113         break;
3114     default:
3115         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3116         break;
3117     }
3118     if (target_type & TARGET_SOCK_CLOEXEC) {
3119 #if defined(SOCK_CLOEXEC)
3120         host_type |= SOCK_CLOEXEC;
3121 #else
3122         return -TARGET_EINVAL;
3123 #endif
3124     }
3125     if (target_type & TARGET_SOCK_NONBLOCK) {
3126 #if defined(SOCK_NONBLOCK)
3127         host_type |= SOCK_NONBLOCK;
3128 #elif !defined(O_NONBLOCK)
3129         return -TARGET_EINVAL;
3130 #endif
3131     }
3132     *type = host_type;
3133     return 0;
3134 }
3135 
3136 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3137 static int sock_flags_fixup(int fd, int target_type)
3138 {
3139 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3140     if (target_type & TARGET_SOCK_NONBLOCK) {
3141         int flags = fcntl(fd, F_GETFL);
3142         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3143             close(fd);
3144             return -TARGET_EINVAL;
3145         }
3146     }
3147 #endif
3148     return fd;
3149 }
3150 
3151 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3152 static abi_long do_socket(int domain, int type, int protocol)
3153 {
3154     int target_type = type;
3155     int ret;
3156 
3157     ret = target_to_host_sock_type(&type);
3158     if (ret) {
3159         return ret;
3160     }
3161 
3162     if (domain == PF_NETLINK && !(
3163 #ifdef CONFIG_RTNETLINK
3164          protocol == NETLINK_ROUTE ||
3165 #endif
3166          protocol == NETLINK_KOBJECT_UEVENT ||
3167          protocol == NETLINK_AUDIT)) {
3168         return -TARGET_EPROTONOSUPPORT;
3169     }
3170 
3171     if (domain == AF_PACKET ||
3172         (domain == AF_INET && type == SOCK_PACKET)) {
3173         protocol = tswap16(protocol);
3174     }
3175 
3176     ret = get_errno(socket(domain, type, protocol));
3177     if (ret >= 0) {
3178         ret = sock_flags_fixup(ret, target_type);
3179         if (type == SOCK_PACKET) {
3180             /* Manage an obsolete case :
3181              * if socket type is SOCK_PACKET, bind by name
3182              */
3183             fd_trans_register(ret, &target_packet_trans);
3184         } else if (domain == PF_NETLINK) {
3185             switch (protocol) {
3186 #ifdef CONFIG_RTNETLINK
3187             case NETLINK_ROUTE:
3188                 fd_trans_register(ret, &target_netlink_route_trans);
3189                 break;
3190 #endif
3191             case NETLINK_KOBJECT_UEVENT:
3192                 /* nothing to do: messages are strings */
3193                 break;
3194             case NETLINK_AUDIT:
3195                 fd_trans_register(ret, &target_netlink_audit_trans);
3196                 break;
3197             default:
3198                 g_assert_not_reached();
3199             }
3200         }
3201     }
3202     return ret;
3203 }
3204 
3205 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3206 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3207                         socklen_t addrlen)
3208 {
3209     void *addr;
3210     abi_long ret;
3211 
3212     if ((int)addrlen < 0) {
3213         return -TARGET_EINVAL;
3214     }
3215 
3216     addr = alloca(addrlen+1);
3217 
3218     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3219     if (ret)
3220         return ret;
3221 
3222     return get_errno(bind(sockfd, addr, addrlen));
3223 }
3224 
3225 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3226 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3227                            socklen_t addrlen)
3228 {
3229     void *addr;
3230     abi_long ret;
3231 
3232     if ((int)addrlen < 0) {
3233         return -TARGET_EINVAL;
3234     }
3235 
3236     addr = alloca(addrlen+1);
3237 
3238     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3239     if (ret)
3240         return ret;
3241 
3242     return get_errno(safe_connect(sockfd, addr, addrlen));
3243 }
3244 
3245 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3246 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3247                                       int flags, int send)
3248 {
3249     abi_long ret, len;
3250     struct msghdr msg;
3251     abi_ulong count;
3252     struct iovec *vec;
3253     abi_ulong target_vec;
3254 
3255     if (msgp->msg_name) {
3256         msg.msg_namelen = tswap32(msgp->msg_namelen);
3257         msg.msg_name = alloca(msg.msg_namelen+1);
3258         ret = target_to_host_sockaddr(fd, msg.msg_name,
3259                                       tswapal(msgp->msg_name),
3260                                       msg.msg_namelen);
3261         if (ret == -TARGET_EFAULT) {
3262             /* For connected sockets msg_name and msg_namelen must
3263              * be ignored, so returning EFAULT immediately is wrong.
3264              * Instead, pass a bad msg_name to the host kernel, and
3265              * let it decide whether to return EFAULT or not.
3266              */
3267             msg.msg_name = (void *)-1;
3268         } else if (ret) {
3269             goto out2;
3270         }
3271     } else {
3272         msg.msg_name = NULL;
3273         msg.msg_namelen = 0;
3274     }
3275     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3276     msg.msg_control = alloca(msg.msg_controllen);
3277     memset(msg.msg_control, 0, msg.msg_controllen);
3278 
3279     msg.msg_flags = tswap32(msgp->msg_flags);
3280 
3281     count = tswapal(msgp->msg_iovlen);
3282     target_vec = tswapal(msgp->msg_iov);
3283 
3284     if (count > IOV_MAX) {
3285         /* sendrcvmsg returns a different errno for this condition than
3286          * readv/writev, so we must catch it here before lock_iovec() does.
3287          */
3288         ret = -TARGET_EMSGSIZE;
3289         goto out2;
3290     }
3291 
3292     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3293                      target_vec, count, send);
3294     if (vec == NULL) {
3295         ret = -host_to_target_errno(errno);
3296         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3297         if (!send || ret) {
3298             goto out2;
3299         }
3300     }
3301     msg.msg_iovlen = count;
3302     msg.msg_iov = vec;
3303 
3304     if (send) {
3305         if (fd_trans_target_to_host_data(fd)) {
3306             void *host_msg;
3307 
3308             host_msg = g_malloc(msg.msg_iov->iov_len);
3309             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3310             ret = fd_trans_target_to_host_data(fd)(host_msg,
3311                                                    msg.msg_iov->iov_len);
3312             if (ret >= 0) {
3313                 msg.msg_iov->iov_base = host_msg;
3314                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3315             }
3316             g_free(host_msg);
3317         } else {
3318             ret = target_to_host_cmsg(&msg, msgp);
3319             if (ret == 0) {
3320                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3321             }
3322         }
3323     } else {
3324         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3325         if (!is_error(ret)) {
3326             len = ret;
3327             if (fd_trans_host_to_target_data(fd)) {
3328                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3329                                                MIN(msg.msg_iov->iov_len, len));
3330             }
3331             if (!is_error(ret)) {
3332                 ret = host_to_target_cmsg(msgp, &msg);
3333             }
3334             if (!is_error(ret)) {
3335                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3336                 msgp->msg_flags = tswap32(msg.msg_flags);
3337                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3338                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3339                                     msg.msg_name, msg.msg_namelen);
3340                     if (ret) {
3341                         goto out;
3342                     }
3343                 }
3344 
3345                 ret = len;
3346             }
3347         }
3348     }
3349 
3350 out:
3351     if (vec) {
3352         unlock_iovec(vec, target_vec, count, !send);
3353     }
3354 out2:
3355     return ret;
3356 }
3357 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3358 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3359                                int flags, int send)
3360 {
3361     abi_long ret;
3362     struct target_msghdr *msgp;
3363 
3364     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3365                           msgp,
3366                           target_msg,
3367                           send ? 1 : 0)) {
3368         return -TARGET_EFAULT;
3369     }
3370     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3371     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3372     return ret;
3373 }
3374 
3375 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3376  * so it might not have this *mmsg-specific flag either.
3377  */
3378 #ifndef MSG_WAITFORONE
3379 #define MSG_WAITFORONE 0x10000
3380 #endif
3381 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3382 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3383                                 unsigned int vlen, unsigned int flags,
3384                                 int send)
3385 {
3386     struct target_mmsghdr *mmsgp;
3387     abi_long ret = 0;
3388     int i;
3389 
3390     if (vlen > UIO_MAXIOV) {
3391         vlen = UIO_MAXIOV;
3392     }
3393 
3394     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3395     if (!mmsgp) {
3396         return -TARGET_EFAULT;
3397     }
3398 
3399     for (i = 0; i < vlen; i++) {
3400         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3401         if (is_error(ret)) {
3402             break;
3403         }
3404         mmsgp[i].msg_len = tswap32(ret);
3405         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3406         if (flags & MSG_WAITFORONE) {
3407             flags |= MSG_DONTWAIT;
3408         }
3409     }
3410 
3411     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3412 
3413     /* Return number of datagrams sent if we sent any at all;
3414      * otherwise return the error.
3415      */
3416     if (i) {
3417         return i;
3418     }
3419     return ret;
3420 }
3421 
3422 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3423 static abi_long do_accept4(int fd, abi_ulong target_addr,
3424                            abi_ulong target_addrlen_addr, int flags)
3425 {
3426     socklen_t addrlen, ret_addrlen;
3427     void *addr;
3428     abi_long ret;
3429     int host_flags;
3430 
3431     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3432         return -TARGET_EINVAL;
3433     }
3434 
3435     host_flags = 0;
3436     if (flags & TARGET_SOCK_NONBLOCK) {
3437         host_flags |= SOCK_NONBLOCK;
3438     }
3439     if (flags & TARGET_SOCK_CLOEXEC) {
3440         host_flags |= SOCK_CLOEXEC;
3441     }
3442 
3443     if (target_addr == 0) {
3444         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3445     }
3446 
3447     /* linux returns EFAULT if addrlen pointer is invalid */
3448     if (get_user_u32(addrlen, target_addrlen_addr))
3449         return -TARGET_EFAULT;
3450 
3451     if ((int)addrlen < 0) {
3452         return -TARGET_EINVAL;
3453     }
3454 
3455     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3456         return -TARGET_EFAULT;
3457     }
3458 
3459     addr = alloca(addrlen);
3460 
3461     ret_addrlen = addrlen;
3462     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3463     if (!is_error(ret)) {
3464         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3465         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3466             ret = -TARGET_EFAULT;
3467         }
3468     }
3469     return ret;
3470 }
3471 
3472 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3473 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3474                                abi_ulong target_addrlen_addr)
3475 {
3476     socklen_t addrlen, ret_addrlen;
3477     void *addr;
3478     abi_long ret;
3479 
3480     if (get_user_u32(addrlen, target_addrlen_addr))
3481         return -TARGET_EFAULT;
3482 
3483     if ((int)addrlen < 0) {
3484         return -TARGET_EINVAL;
3485     }
3486 
3487     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3488         return -TARGET_EFAULT;
3489     }
3490 
3491     addr = alloca(addrlen);
3492 
3493     ret_addrlen = addrlen;
3494     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3495     if (!is_error(ret)) {
3496         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3497         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3498             ret = -TARGET_EFAULT;
3499         }
3500     }
3501     return ret;
3502 }
3503 
3504 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3505 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3506                                abi_ulong target_addrlen_addr)
3507 {
3508     socklen_t addrlen, ret_addrlen;
3509     void *addr;
3510     abi_long ret;
3511 
3512     if (get_user_u32(addrlen, target_addrlen_addr))
3513         return -TARGET_EFAULT;
3514 
3515     if ((int)addrlen < 0) {
3516         return -TARGET_EINVAL;
3517     }
3518 
3519     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3520         return -TARGET_EFAULT;
3521     }
3522 
3523     addr = alloca(addrlen);
3524 
3525     ret_addrlen = addrlen;
3526     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3527     if (!is_error(ret)) {
3528         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3529         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3530             ret = -TARGET_EFAULT;
3531         }
3532     }
3533     return ret;
3534 }
3535 
3536 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3537 static abi_long do_socketpair(int domain, int type, int protocol,
3538                               abi_ulong target_tab_addr)
3539 {
3540     int tab[2];
3541     abi_long ret;
3542 
3543     target_to_host_sock_type(&type);
3544 
3545     ret = get_errno(socketpair(domain, type, protocol, tab));
3546     if (!is_error(ret)) {
3547         if (put_user_s32(tab[0], target_tab_addr)
3548             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3549             ret = -TARGET_EFAULT;
3550     }
3551     return ret;
3552 }
3553 
3554 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3555 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3556                           abi_ulong target_addr, socklen_t addrlen)
3557 {
3558     void *addr;
3559     void *host_msg;
3560     void *copy_msg = NULL;
3561     abi_long ret;
3562 
3563     if ((int)addrlen < 0) {
3564         return -TARGET_EINVAL;
3565     }
3566 
3567     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3568     if (!host_msg)
3569         return -TARGET_EFAULT;
3570     if (fd_trans_target_to_host_data(fd)) {
3571         copy_msg = host_msg;
3572         host_msg = g_malloc(len);
3573         memcpy(host_msg, copy_msg, len);
3574         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3575         if (ret < 0) {
3576             goto fail;
3577         }
3578     }
3579     if (target_addr) {
3580         addr = alloca(addrlen+1);
3581         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3582         if (ret) {
3583             goto fail;
3584         }
3585         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3586     } else {
3587         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3588     }
3589 fail:
3590     if (copy_msg) {
3591         g_free(host_msg);
3592         host_msg = copy_msg;
3593     }
3594     unlock_user(host_msg, msg, 0);
3595     return ret;
3596 }
3597 
3598 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3599 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3600                             abi_ulong target_addr,
3601                             abi_ulong target_addrlen)
3602 {
3603     socklen_t addrlen, ret_addrlen;
3604     void *addr;
3605     void *host_msg;
3606     abi_long ret;
3607 
3608     if (!msg) {
3609         host_msg = NULL;
3610     } else {
3611         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3612         if (!host_msg) {
3613             return -TARGET_EFAULT;
3614         }
3615     }
3616     if (target_addr) {
3617         if (get_user_u32(addrlen, target_addrlen)) {
3618             ret = -TARGET_EFAULT;
3619             goto fail;
3620         }
3621         if ((int)addrlen < 0) {
3622             ret = -TARGET_EINVAL;
3623             goto fail;
3624         }
3625         addr = alloca(addrlen);
3626         ret_addrlen = addrlen;
3627         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3628                                       addr, &ret_addrlen));
3629     } else {
3630         addr = NULL; /* To keep compiler quiet.  */
3631         addrlen = 0; /* To keep compiler quiet.  */
3632         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3633     }
3634     if (!is_error(ret)) {
3635         if (fd_trans_host_to_target_data(fd)) {
3636             abi_long trans;
3637             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3638             if (is_error(trans)) {
3639                 ret = trans;
3640                 goto fail;
3641             }
3642         }
3643         if (target_addr) {
3644             host_to_target_sockaddr(target_addr, addr,
3645                                     MIN(addrlen, ret_addrlen));
3646             if (put_user_u32(ret_addrlen, target_addrlen)) {
3647                 ret = -TARGET_EFAULT;
3648                 goto fail;
3649             }
3650         }
3651         unlock_user(host_msg, msg, len);
3652     } else {
3653 fail:
3654         unlock_user(host_msg, msg, 0);
3655     }
3656     return ret;
3657 }
3658 
3659 #ifdef TARGET_NR_socketcall
3660 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3661 static abi_long do_socketcall(int num, abi_ulong vptr)
3662 {
3663     static const unsigned nargs[] = { /* number of arguments per operation */
3664         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3665         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3666         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3667         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3668         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3669         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3670         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3671         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3672         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3673         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3674         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3675         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3676         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3677         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3678         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3679         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3680         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3681         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3682         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3683         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3684     };
3685     abi_long a[6]; /* max 6 args */
3686     unsigned i;
3687 
3688     /* check the range of the first argument num */
3689     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3690     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3691         return -TARGET_EINVAL;
3692     }
3693     /* ensure we have space for args */
3694     if (nargs[num] > ARRAY_SIZE(a)) {
3695         return -TARGET_EINVAL;
3696     }
3697     /* collect the arguments in a[] according to nargs[] */
3698     for (i = 0; i < nargs[num]; ++i) {
3699         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3700             return -TARGET_EFAULT;
3701         }
3702     }
3703     /* now when we have the args, invoke the appropriate underlying function */
3704     switch (num) {
3705     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3706         return do_socket(a[0], a[1], a[2]);
3707     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3708         return do_bind(a[0], a[1], a[2]);
3709     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3710         return do_connect(a[0], a[1], a[2]);
3711     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3712         return get_errno(listen(a[0], a[1]));
3713     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3714         return do_accept4(a[0], a[1], a[2], 0);
3715     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3716         return do_getsockname(a[0], a[1], a[2]);
3717     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3718         return do_getpeername(a[0], a[1], a[2]);
3719     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3720         return do_socketpair(a[0], a[1], a[2], a[3]);
3721     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3722         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3723     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3724         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3725     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3726         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3727     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3728         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3729     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3730         return get_errno(shutdown(a[0], a[1]));
3731     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3732         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3733     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3734         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3735     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3736         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3737     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3738         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3739     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3740         return do_accept4(a[0], a[1], a[2], a[3]);
3741     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3742         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3743     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3744         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3745     default:
3746         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3747         return -TARGET_EINVAL;
3748     }
3749 }
3750 #endif
3751 
3752 #ifndef TARGET_SEMID64_DS
3753 /* asm-generic version of this struct */
3754 struct target_semid64_ds
3755 {
3756   struct target_ipc_perm sem_perm;
3757   abi_ulong sem_otime;
3758 #if TARGET_ABI_BITS == 32
3759   abi_ulong __unused1;
3760 #endif
3761   abi_ulong sem_ctime;
3762 #if TARGET_ABI_BITS == 32
3763   abi_ulong __unused2;
3764 #endif
3765   abi_ulong sem_nsems;
3766   abi_ulong __unused3;
3767   abi_ulong __unused4;
3768 };
3769 #endif
3770 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3771 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3772                                                abi_ulong target_addr)
3773 {
3774     struct target_ipc_perm *target_ip;
3775     struct target_semid64_ds *target_sd;
3776 
3777     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3778         return -TARGET_EFAULT;
3779     target_ip = &(target_sd->sem_perm);
3780     host_ip->__key = tswap32(target_ip->__key);
3781     host_ip->uid = tswap32(target_ip->uid);
3782     host_ip->gid = tswap32(target_ip->gid);
3783     host_ip->cuid = tswap32(target_ip->cuid);
3784     host_ip->cgid = tswap32(target_ip->cgid);
3785 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3786     host_ip->mode = tswap32(target_ip->mode);
3787 #else
3788     host_ip->mode = tswap16(target_ip->mode);
3789 #endif
3790 #if defined(TARGET_PPC)
3791     host_ip->__seq = tswap32(target_ip->__seq);
3792 #else
3793     host_ip->__seq = tswap16(target_ip->__seq);
3794 #endif
3795     unlock_user_struct(target_sd, target_addr, 0);
3796     return 0;
3797 }
3798 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3799 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3800                                                struct ipc_perm *host_ip)
3801 {
3802     struct target_ipc_perm *target_ip;
3803     struct target_semid64_ds *target_sd;
3804 
3805     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3806         return -TARGET_EFAULT;
3807     target_ip = &(target_sd->sem_perm);
3808     target_ip->__key = tswap32(host_ip->__key);
3809     target_ip->uid = tswap32(host_ip->uid);
3810     target_ip->gid = tswap32(host_ip->gid);
3811     target_ip->cuid = tswap32(host_ip->cuid);
3812     target_ip->cgid = tswap32(host_ip->cgid);
3813 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3814     target_ip->mode = tswap32(host_ip->mode);
3815 #else
3816     target_ip->mode = tswap16(host_ip->mode);
3817 #endif
3818 #if defined(TARGET_PPC)
3819     target_ip->__seq = tswap32(host_ip->__seq);
3820 #else
3821     target_ip->__seq = tswap16(host_ip->__seq);
3822 #endif
3823     unlock_user_struct(target_sd, target_addr, 1);
3824     return 0;
3825 }
3826 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3827 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3828                                                abi_ulong target_addr)
3829 {
3830     struct target_semid64_ds *target_sd;
3831 
3832     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3833         return -TARGET_EFAULT;
3834     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3835         return -TARGET_EFAULT;
3836     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3837     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3838     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3839     unlock_user_struct(target_sd, target_addr, 0);
3840     return 0;
3841 }
3842 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3843 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3844                                                struct semid_ds *host_sd)
3845 {
3846     struct target_semid64_ds *target_sd;
3847 
3848     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3849         return -TARGET_EFAULT;
3850     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3851         return -TARGET_EFAULT;
3852     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3853     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3854     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3855     unlock_user_struct(target_sd, target_addr, 1);
3856     return 0;
3857 }
3858 
3859 struct target_seminfo {
3860     int semmap;
3861     int semmni;
3862     int semmns;
3863     int semmnu;
3864     int semmsl;
3865     int semopm;
3866     int semume;
3867     int semusz;
3868     int semvmx;
3869     int semaem;
3870 };
3871 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3872 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3873                                               struct seminfo *host_seminfo)
3874 {
3875     struct target_seminfo *target_seminfo;
3876     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3877         return -TARGET_EFAULT;
3878     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3879     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3880     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3881     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3882     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3883     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3884     __put_user(host_seminfo->semume, &target_seminfo->semume);
3885     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3886     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3887     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3888     unlock_user_struct(target_seminfo, target_addr, 1);
3889     return 0;
3890 }
3891 
3892 union semun {
3893 	int val;
3894 	struct semid_ds *buf;
3895 	unsigned short *array;
3896 	struct seminfo *__buf;
3897 };
3898 
3899 union target_semun {
3900 	int val;
3901 	abi_ulong buf;
3902 	abi_ulong array;
3903 	abi_ulong __buf;
3904 };
3905 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3906 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3907                                                abi_ulong target_addr)
3908 {
3909     int nsems;
3910     unsigned short *array;
3911     union semun semun;
3912     struct semid_ds semid_ds;
3913     int i, ret;
3914 
3915     semun.buf = &semid_ds;
3916 
3917     ret = semctl(semid, 0, IPC_STAT, semun);
3918     if (ret == -1)
3919         return get_errno(ret);
3920 
3921     nsems = semid_ds.sem_nsems;
3922 
3923     *host_array = g_try_new(unsigned short, nsems);
3924     if (!*host_array) {
3925         return -TARGET_ENOMEM;
3926     }
3927     array = lock_user(VERIFY_READ, target_addr,
3928                       nsems*sizeof(unsigned short), 1);
3929     if (!array) {
3930         g_free(*host_array);
3931         return -TARGET_EFAULT;
3932     }
3933 
3934     for(i=0; i<nsems; i++) {
3935         __get_user((*host_array)[i], &array[i]);
3936     }
3937     unlock_user(array, target_addr, 0);
3938 
3939     return 0;
3940 }
3941 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3942 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3943                                                unsigned short **host_array)
3944 {
3945     int nsems;
3946     unsigned short *array;
3947     union semun semun;
3948     struct semid_ds semid_ds;
3949     int i, ret;
3950 
3951     semun.buf = &semid_ds;
3952 
3953     ret = semctl(semid, 0, IPC_STAT, semun);
3954     if (ret == -1)
3955         return get_errno(ret);
3956 
3957     nsems = semid_ds.sem_nsems;
3958 
3959     array = lock_user(VERIFY_WRITE, target_addr,
3960                       nsems*sizeof(unsigned short), 0);
3961     if (!array)
3962         return -TARGET_EFAULT;
3963 
3964     for(i=0; i<nsems; i++) {
3965         __put_user((*host_array)[i], &array[i]);
3966     }
3967     g_free(*host_array);
3968     unlock_user(array, target_addr, 1);
3969 
3970     return 0;
3971 }
3972 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3973 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3974                                  abi_ulong target_arg)
3975 {
3976     union target_semun target_su = { .buf = target_arg };
3977     union semun arg;
3978     struct semid_ds dsarg;
3979     unsigned short *array = NULL;
3980     struct seminfo seminfo;
3981     abi_long ret = -TARGET_EINVAL;
3982     abi_long err;
3983     cmd &= 0xff;
3984 
3985     switch( cmd ) {
3986 	case GETVAL:
3987 	case SETVAL:
3988             /* In 64 bit cross-endian situations, we will erroneously pick up
3989              * the wrong half of the union for the "val" element.  To rectify
3990              * this, the entire 8-byte structure is byteswapped, followed by
3991 	     * a swap of the 4 byte val field. In other cases, the data is
3992 	     * already in proper host byte order. */
3993 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3994 		target_su.buf = tswapal(target_su.buf);
3995 		arg.val = tswap32(target_su.val);
3996 	    } else {
3997 		arg.val = target_su.val;
3998 	    }
3999             ret = get_errno(semctl(semid, semnum, cmd, arg));
4000             break;
4001 	case GETALL:
4002 	case SETALL:
4003             err = target_to_host_semarray(semid, &array, target_su.array);
4004             if (err)
4005                 return err;
4006             arg.array = array;
4007             ret = get_errno(semctl(semid, semnum, cmd, arg));
4008             err = host_to_target_semarray(semid, target_su.array, &array);
4009             if (err)
4010                 return err;
4011             break;
4012 	case IPC_STAT:
4013 	case IPC_SET:
4014 	case SEM_STAT:
4015             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4016             if (err)
4017                 return err;
4018             arg.buf = &dsarg;
4019             ret = get_errno(semctl(semid, semnum, cmd, arg));
4020             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4021             if (err)
4022                 return err;
4023             break;
4024 	case IPC_INFO:
4025 	case SEM_INFO:
4026             arg.__buf = &seminfo;
4027             ret = get_errno(semctl(semid, semnum, cmd, arg));
4028             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4029             if (err)
4030                 return err;
4031             break;
4032 	case IPC_RMID:
4033 	case GETPID:
4034 	case GETNCNT:
4035 	case GETZCNT:
4036             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4037             break;
4038     }
4039 
4040     return ret;
4041 }
4042 
4043 struct target_sembuf {
4044     unsigned short sem_num;
4045     short sem_op;
4046     short sem_flg;
4047 };
4048 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4049 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4050                                              abi_ulong target_addr,
4051                                              unsigned nsops)
4052 {
4053     struct target_sembuf *target_sembuf;
4054     int i;
4055 
4056     target_sembuf = lock_user(VERIFY_READ, target_addr,
4057                               nsops*sizeof(struct target_sembuf), 1);
4058     if (!target_sembuf)
4059         return -TARGET_EFAULT;
4060 
4061     for(i=0; i<nsops; i++) {
4062         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4063         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4064         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4065     }
4066 
4067     unlock_user(target_sembuf, target_addr, 0);
4068 
4069     return 0;
4070 }
4071 
4072 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4073     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4074 
4075 /*
4076  * This macro is required to handle the s390 variants, which passes the
4077  * arguments in a different order than default.
4078  */
4079 #ifdef __s390x__
4080 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4081   (__nsops), (__timeout), (__sops)
4082 #else
4083 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4084   (__nsops), 0, (__sops), (__timeout)
4085 #endif
4086 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4087 static inline abi_long do_semtimedop(int semid,
4088                                      abi_long ptr,
4089                                      unsigned nsops,
4090                                      abi_long timeout, bool time64)
4091 {
4092     struct sembuf *sops;
4093     struct timespec ts, *pts = NULL;
4094     abi_long ret;
4095 
4096     if (timeout) {
4097         pts = &ts;
4098         if (time64) {
4099             if (target_to_host_timespec64(pts, timeout)) {
4100                 return -TARGET_EFAULT;
4101             }
4102         } else {
4103             if (target_to_host_timespec(pts, timeout)) {
4104                 return -TARGET_EFAULT;
4105             }
4106         }
4107     }
4108 
4109     if (nsops > TARGET_SEMOPM) {
4110         return -TARGET_E2BIG;
4111     }
4112 
4113     sops = g_new(struct sembuf, nsops);
4114 
4115     if (target_to_host_sembuf(sops, ptr, nsops)) {
4116         g_free(sops);
4117         return -TARGET_EFAULT;
4118     }
4119 
4120     ret = -TARGET_ENOSYS;
4121 #ifdef __NR_semtimedop
4122     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4123 #endif
4124 #ifdef __NR_ipc
4125     if (ret == -TARGET_ENOSYS) {
4126         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4127                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4128     }
4129 #endif
4130     g_free(sops);
4131     return ret;
4132 }
4133 #endif
4134 
4135 struct target_msqid_ds
4136 {
4137     struct target_ipc_perm msg_perm;
4138     abi_ulong msg_stime;
4139 #if TARGET_ABI_BITS == 32
4140     abi_ulong __unused1;
4141 #endif
4142     abi_ulong msg_rtime;
4143 #if TARGET_ABI_BITS == 32
4144     abi_ulong __unused2;
4145 #endif
4146     abi_ulong msg_ctime;
4147 #if TARGET_ABI_BITS == 32
4148     abi_ulong __unused3;
4149 #endif
4150     abi_ulong __msg_cbytes;
4151     abi_ulong msg_qnum;
4152     abi_ulong msg_qbytes;
4153     abi_ulong msg_lspid;
4154     abi_ulong msg_lrpid;
4155     abi_ulong __unused4;
4156     abi_ulong __unused5;
4157 };
4158 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4159 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4160                                                abi_ulong target_addr)
4161 {
4162     struct target_msqid_ds *target_md;
4163 
4164     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4165         return -TARGET_EFAULT;
4166     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4167         return -TARGET_EFAULT;
4168     host_md->msg_stime = tswapal(target_md->msg_stime);
4169     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4170     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4171     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4172     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4173     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4174     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4175     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4176     unlock_user_struct(target_md, target_addr, 0);
4177     return 0;
4178 }
4179 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4180 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4181                                                struct msqid_ds *host_md)
4182 {
4183     struct target_msqid_ds *target_md;
4184 
4185     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4186         return -TARGET_EFAULT;
4187     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4188         return -TARGET_EFAULT;
4189     target_md->msg_stime = tswapal(host_md->msg_stime);
4190     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4191     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4192     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4193     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4194     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4195     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4196     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4197     unlock_user_struct(target_md, target_addr, 1);
4198     return 0;
4199 }
4200 
4201 struct target_msginfo {
4202     int msgpool;
4203     int msgmap;
4204     int msgmax;
4205     int msgmnb;
4206     int msgmni;
4207     int msgssz;
4208     int msgtql;
4209     unsigned short int msgseg;
4210 };
4211 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4212 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4213                                               struct msginfo *host_msginfo)
4214 {
4215     struct target_msginfo *target_msginfo;
4216     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4217         return -TARGET_EFAULT;
4218     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4219     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4220     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4221     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4222     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4223     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4224     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4225     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4226     unlock_user_struct(target_msginfo, target_addr, 1);
4227     return 0;
4228 }
4229 
do_msgctl(int msgid,int cmd,abi_long ptr)4230 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4231 {
4232     struct msqid_ds dsarg;
4233     struct msginfo msginfo;
4234     abi_long ret = -TARGET_EINVAL;
4235 
4236     cmd &= 0xff;
4237 
4238     switch (cmd) {
4239     case IPC_STAT:
4240     case IPC_SET:
4241     case MSG_STAT:
4242         if (target_to_host_msqid_ds(&dsarg,ptr))
4243             return -TARGET_EFAULT;
4244         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4245         if (host_to_target_msqid_ds(ptr,&dsarg))
4246             return -TARGET_EFAULT;
4247         break;
4248     case IPC_RMID:
4249         ret = get_errno(msgctl(msgid, cmd, NULL));
4250         break;
4251     case IPC_INFO:
4252     case MSG_INFO:
4253         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4254         if (host_to_target_msginfo(ptr, &msginfo))
4255             return -TARGET_EFAULT;
4256         break;
4257     }
4258 
4259     return ret;
4260 }
4261 
4262 struct target_msgbuf {
4263     abi_long mtype;
4264     char	mtext[1];
4265 };
4266 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4267 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4268                                  ssize_t msgsz, int msgflg)
4269 {
4270     struct target_msgbuf *target_mb;
4271     struct msgbuf *host_mb;
4272     abi_long ret = 0;
4273 
4274     if (msgsz < 0) {
4275         return -TARGET_EINVAL;
4276     }
4277 
4278     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4279         return -TARGET_EFAULT;
4280     host_mb = g_try_malloc(msgsz + sizeof(long));
4281     if (!host_mb) {
4282         unlock_user_struct(target_mb, msgp, 0);
4283         return -TARGET_ENOMEM;
4284     }
4285     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4286     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4287     ret = -TARGET_ENOSYS;
4288 #ifdef __NR_msgsnd
4289     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4290 #endif
4291 #ifdef __NR_ipc
4292     if (ret == -TARGET_ENOSYS) {
4293 #ifdef __s390x__
4294         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4295                                  host_mb));
4296 #else
4297         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4298                                  host_mb, 0));
4299 #endif
4300     }
4301 #endif
4302     g_free(host_mb);
4303     unlock_user_struct(target_mb, msgp, 0);
4304 
4305     return ret;
4306 }
4307 
4308 #ifdef __NR_ipc
4309 #if defined(__sparc__)
4310 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4311 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4312 #elif defined(__s390x__)
4313 /* The s390 sys_ipc variant has only five parameters.  */
4314 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4315     ((long int[]){(long int)__msgp, __msgtyp})
4316 #else
4317 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4318     ((long int[]){(long int)__msgp, __msgtyp}), 0
4319 #endif
4320 #endif
4321 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4322 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4323                                  ssize_t msgsz, abi_long msgtyp,
4324                                  int msgflg)
4325 {
4326     struct target_msgbuf *target_mb;
4327     char *target_mtext;
4328     struct msgbuf *host_mb;
4329     abi_long ret = 0;
4330 
4331     if (msgsz < 0) {
4332         return -TARGET_EINVAL;
4333     }
4334 
4335     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4336         return -TARGET_EFAULT;
4337 
4338     host_mb = g_try_malloc(msgsz + sizeof(long));
4339     if (!host_mb) {
4340         ret = -TARGET_ENOMEM;
4341         goto end;
4342     }
4343     ret = -TARGET_ENOSYS;
4344 #ifdef __NR_msgrcv
4345     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4346 #endif
4347 #ifdef __NR_ipc
4348     if (ret == -TARGET_ENOSYS) {
4349         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4350                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4351     }
4352 #endif
4353 
4354     if (ret > 0) {
4355         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4356         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4357         if (!target_mtext) {
4358             ret = -TARGET_EFAULT;
4359             goto end;
4360         }
4361         memcpy(target_mb->mtext, host_mb->mtext, ret);
4362         unlock_user(target_mtext, target_mtext_addr, ret);
4363     }
4364 
4365     target_mb->mtype = tswapal(host_mb->mtype);
4366 
4367 end:
4368     if (target_mb)
4369         unlock_user_struct(target_mb, msgp, 1);
4370     g_free(host_mb);
4371     return ret;
4372 }
4373 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4374 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4375                                                abi_ulong target_addr)
4376 {
4377     struct target_shmid_ds *target_sd;
4378 
4379     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4380         return -TARGET_EFAULT;
4381     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4382         return -TARGET_EFAULT;
4383     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4384     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4385     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4386     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4387     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4388     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4389     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4390     unlock_user_struct(target_sd, target_addr, 0);
4391     return 0;
4392 }
4393 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4394 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4395                                                struct shmid_ds *host_sd)
4396 {
4397     struct target_shmid_ds *target_sd;
4398 
4399     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4400         return -TARGET_EFAULT;
4401     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4402         return -TARGET_EFAULT;
4403     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4404     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4405     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4406     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4407     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4408     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4409     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4410     unlock_user_struct(target_sd, target_addr, 1);
4411     return 0;
4412 }
4413 
4414 struct  target_shminfo {
4415     abi_ulong shmmax;
4416     abi_ulong shmmin;
4417     abi_ulong shmmni;
4418     abi_ulong shmseg;
4419     abi_ulong shmall;
4420 };
4421 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4422 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4423                                               struct shminfo *host_shminfo)
4424 {
4425     struct target_shminfo *target_shminfo;
4426     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4427         return -TARGET_EFAULT;
4428     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4429     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4430     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4431     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4432     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4433     unlock_user_struct(target_shminfo, target_addr, 1);
4434     return 0;
4435 }
4436 
4437 struct target_shm_info {
4438     int used_ids;
4439     abi_ulong shm_tot;
4440     abi_ulong shm_rss;
4441     abi_ulong shm_swp;
4442     abi_ulong swap_attempts;
4443     abi_ulong swap_successes;
4444 };
4445 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4446 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4447                                                struct shm_info *host_shm_info)
4448 {
4449     struct target_shm_info *target_shm_info;
4450     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4451         return -TARGET_EFAULT;
4452     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4453     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4454     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4455     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4456     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4457     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4458     unlock_user_struct(target_shm_info, target_addr, 1);
4459     return 0;
4460 }
4461 
do_shmctl(int shmid,int cmd,abi_long buf)4462 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4463 {
4464     struct shmid_ds dsarg;
4465     struct shminfo shminfo;
4466     struct shm_info shm_info;
4467     abi_long ret = -TARGET_EINVAL;
4468 
4469     cmd &= 0xff;
4470 
4471     switch(cmd) {
4472     case IPC_STAT:
4473     case IPC_SET:
4474     case SHM_STAT:
4475         if (target_to_host_shmid_ds(&dsarg, buf))
4476             return -TARGET_EFAULT;
4477         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4478         if (host_to_target_shmid_ds(buf, &dsarg))
4479             return -TARGET_EFAULT;
4480         break;
4481     case IPC_INFO:
4482         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4483         if (host_to_target_shminfo(buf, &shminfo))
4484             return -TARGET_EFAULT;
4485         break;
4486     case SHM_INFO:
4487         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4488         if (host_to_target_shm_info(buf, &shm_info))
4489             return -TARGET_EFAULT;
4490         break;
4491     case IPC_RMID:
4492     case SHM_LOCK:
4493     case SHM_UNLOCK:
4494         ret = get_errno(shmctl(shmid, cmd, NULL));
4495         break;
4496     }
4497 
4498     return ret;
4499 }
4500 
4501 #ifdef TARGET_NR_ipc
4502 /* ??? This only works with linear mappings.  */
4503 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4504 static abi_long do_ipc(CPUArchState *cpu_env,
4505                        unsigned int call, abi_long first,
4506                        abi_long second, abi_long third,
4507                        abi_long ptr, abi_long fifth)
4508 {
4509     int version;
4510     abi_long ret = 0;
4511 
4512     version = call >> 16;
4513     call &= 0xffff;
4514 
4515     switch (call) {
4516     case IPCOP_semop:
4517         ret = do_semtimedop(first, ptr, second, 0, false);
4518         break;
4519     case IPCOP_semtimedop:
4520     /*
4521      * The s390 sys_ipc variant has only five parameters instead of six
4522      * (as for default variant) and the only difference is the handling of
4523      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4524      * to a struct timespec where the generic variant uses fifth parameter.
4525      */
4526 #if defined(TARGET_S390X)
4527         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4528 #else
4529         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4530 #endif
4531         break;
4532 
4533     case IPCOP_semget:
4534         ret = get_errno(semget(first, second, third));
4535         break;
4536 
4537     case IPCOP_semctl: {
4538         /* The semun argument to semctl is passed by value, so dereference the
4539          * ptr argument. */
4540         abi_ulong atptr;
4541         get_user_ual(atptr, ptr);
4542         ret = do_semctl(first, second, third, atptr);
4543         break;
4544     }
4545 
4546     case IPCOP_msgget:
4547         ret = get_errno(msgget(first, second));
4548         break;
4549 
4550     case IPCOP_msgsnd:
4551         ret = do_msgsnd(first, ptr, second, third);
4552         break;
4553 
4554     case IPCOP_msgctl:
4555         ret = do_msgctl(first, second, ptr);
4556         break;
4557 
4558     case IPCOP_msgrcv:
4559         switch (version) {
4560         case 0:
4561             {
4562                 struct target_ipc_kludge {
4563                     abi_long msgp;
4564                     abi_long msgtyp;
4565                 } *tmp;
4566 
4567                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4568                     ret = -TARGET_EFAULT;
4569                     break;
4570                 }
4571 
4572                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4573 
4574                 unlock_user_struct(tmp, ptr, 0);
4575                 break;
4576             }
4577         default:
4578             ret = do_msgrcv(first, ptr, second, fifth, third);
4579         }
4580         break;
4581 
4582     case IPCOP_shmat:
4583         switch (version) {
4584         default:
4585         {
4586             abi_ulong raddr;
4587             raddr = target_shmat(cpu_env, first, ptr, second);
4588             if (is_error(raddr))
4589                 return get_errno(raddr);
4590             if (put_user_ual(raddr, third))
4591                 return -TARGET_EFAULT;
4592             break;
4593         }
4594         case 1:
4595             ret = -TARGET_EINVAL;
4596             break;
4597         }
4598 	break;
4599     case IPCOP_shmdt:
4600         ret = target_shmdt(ptr);
4601 	break;
4602 
4603     case IPCOP_shmget:
4604 	/* IPC_* flag values are the same on all linux platforms */
4605 	ret = get_errno(shmget(first, second, third));
4606 	break;
4607 
4608 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4609     case IPCOP_shmctl:
4610         ret = do_shmctl(first, second, ptr);
4611         break;
4612     default:
4613         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4614                       call, version);
4615 	ret = -TARGET_ENOSYS;
4616 	break;
4617     }
4618     return ret;
4619 }
4620 #endif
4621 
4622 /* kernel structure types definitions */
4623 
4624 #define STRUCT(name, ...) STRUCT_ ## name,
4625 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4626 enum {
4627 #include "syscall_types.h"
4628 STRUCT_MAX
4629 };
4630 #undef STRUCT
4631 #undef STRUCT_SPECIAL
4632 
4633 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4634 #define STRUCT_SPECIAL(name)
4635 #include "syscall_types.h"
4636 #undef STRUCT
4637 #undef STRUCT_SPECIAL
4638 
4639 #define MAX_STRUCT_SIZE 4096
4640 
4641 #ifdef CONFIG_FIEMAP
4642 /* So fiemap access checks don't overflow on 32 bit systems.
4643  * This is very slightly smaller than the limit imposed by
4644  * the underlying kernel.
4645  */
4646 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4647                             / sizeof(struct fiemap_extent))
4648 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4649 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4650                                        int fd, int cmd, abi_long arg)
4651 {
4652     /* The parameter for this ioctl is a struct fiemap followed
4653      * by an array of struct fiemap_extent whose size is set
4654      * in fiemap->fm_extent_count. The array is filled in by the
4655      * ioctl.
4656      */
4657     int target_size_in, target_size_out;
4658     struct fiemap *fm;
4659     const argtype *arg_type = ie->arg_type;
4660     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4661     void *argptr, *p;
4662     abi_long ret;
4663     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4664     uint32_t outbufsz;
4665     int free_fm = 0;
4666 
4667     assert(arg_type[0] == TYPE_PTR);
4668     assert(ie->access == IOC_RW);
4669     arg_type++;
4670     target_size_in = thunk_type_size(arg_type, 0);
4671     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4672     if (!argptr) {
4673         return -TARGET_EFAULT;
4674     }
4675     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4676     unlock_user(argptr, arg, 0);
4677     fm = (struct fiemap *)buf_temp;
4678     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4679         return -TARGET_EINVAL;
4680     }
4681 
4682     outbufsz = sizeof (*fm) +
4683         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4684 
4685     if (outbufsz > MAX_STRUCT_SIZE) {
4686         /* We can't fit all the extents into the fixed size buffer.
4687          * Allocate one that is large enough and use it instead.
4688          */
4689         fm = g_try_malloc(outbufsz);
4690         if (!fm) {
4691             return -TARGET_ENOMEM;
4692         }
4693         memcpy(fm, buf_temp, sizeof(struct fiemap));
4694         free_fm = 1;
4695     }
4696     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4697     if (!is_error(ret)) {
4698         target_size_out = target_size_in;
4699         /* An extent_count of 0 means we were only counting the extents
4700          * so there are no structs to copy
4701          */
4702         if (fm->fm_extent_count != 0) {
4703             target_size_out += fm->fm_mapped_extents * extent_size;
4704         }
4705         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4706         if (!argptr) {
4707             ret = -TARGET_EFAULT;
4708         } else {
4709             /* Convert the struct fiemap */
4710             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4711             if (fm->fm_extent_count != 0) {
4712                 p = argptr + target_size_in;
4713                 /* ...and then all the struct fiemap_extents */
4714                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4715                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4716                                   THUNK_TARGET);
4717                     p += extent_size;
4718                 }
4719             }
4720             unlock_user(argptr, arg, target_size_out);
4721         }
4722     }
4723     if (free_fm) {
4724         g_free(fm);
4725     }
4726     return ret;
4727 }
4728 #endif
4729 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4730 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4731                                 int fd, int cmd, abi_long arg)
4732 {
4733     const argtype *arg_type = ie->arg_type;
4734     int target_size;
4735     void *argptr;
4736     int ret;
4737     struct ifconf *host_ifconf;
4738     uint32_t outbufsz;
4739     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4740     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4741     int target_ifreq_size;
4742     int nb_ifreq;
4743     int free_buf = 0;
4744     int i;
4745     int target_ifc_len;
4746     abi_long target_ifc_buf;
4747     int host_ifc_len;
4748     char *host_ifc_buf;
4749 
4750     assert(arg_type[0] == TYPE_PTR);
4751     assert(ie->access == IOC_RW);
4752 
4753     arg_type++;
4754     target_size = thunk_type_size(arg_type, 0);
4755 
4756     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4757     if (!argptr)
4758         return -TARGET_EFAULT;
4759     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4760     unlock_user(argptr, arg, 0);
4761 
4762     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4763     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4764     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4765 
4766     if (target_ifc_buf != 0) {
4767         target_ifc_len = host_ifconf->ifc_len;
4768         nb_ifreq = target_ifc_len / target_ifreq_size;
4769         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4770 
4771         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4772         if (outbufsz > MAX_STRUCT_SIZE) {
4773             /*
4774              * We can't fit all the extents into the fixed size buffer.
4775              * Allocate one that is large enough and use it instead.
4776              */
4777             host_ifconf = g_try_malloc(outbufsz);
4778             if (!host_ifconf) {
4779                 return -TARGET_ENOMEM;
4780             }
4781             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4782             free_buf = 1;
4783         }
4784         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4785 
4786         host_ifconf->ifc_len = host_ifc_len;
4787     } else {
4788       host_ifc_buf = NULL;
4789     }
4790     host_ifconf->ifc_buf = host_ifc_buf;
4791 
4792     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4793     if (!is_error(ret)) {
4794 	/* convert host ifc_len to target ifc_len */
4795 
4796         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4797         target_ifc_len = nb_ifreq * target_ifreq_size;
4798         host_ifconf->ifc_len = target_ifc_len;
4799 
4800 	/* restore target ifc_buf */
4801 
4802         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4803 
4804 	/* copy struct ifconf to target user */
4805 
4806         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4807         if (!argptr)
4808             return -TARGET_EFAULT;
4809         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4810         unlock_user(argptr, arg, target_size);
4811 
4812         if (target_ifc_buf != 0) {
4813             /* copy ifreq[] to target user */
4814             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4815             for (i = 0; i < nb_ifreq ; i++) {
4816                 thunk_convert(argptr + i * target_ifreq_size,
4817                               host_ifc_buf + i * sizeof(struct ifreq),
4818                               ifreq_arg_type, THUNK_TARGET);
4819             }
4820             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4821         }
4822     }
4823 
4824     if (free_buf) {
4825         g_free(host_ifconf);
4826     }
4827 
4828     return ret;
4829 }
4830 
4831 #if defined(CONFIG_USBFS)
4832 #if HOST_LONG_BITS > 64
4833 #error USBDEVFS thunks do not support >64 bit hosts yet.
4834 #endif
4835 struct live_urb {
4836     uint64_t target_urb_adr;
4837     uint64_t target_buf_adr;
4838     char *target_buf_ptr;
4839     struct usbdevfs_urb host_urb;
4840 };
4841 
usbdevfs_urb_hashtable(void)4842 static GHashTable *usbdevfs_urb_hashtable(void)
4843 {
4844     static GHashTable *urb_hashtable;
4845 
4846     if (!urb_hashtable) {
4847         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4848     }
4849     return urb_hashtable;
4850 }
4851 
urb_hashtable_insert(struct live_urb * urb)4852 static void urb_hashtable_insert(struct live_urb *urb)
4853 {
4854     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4855     g_hash_table_insert(urb_hashtable, urb, urb);
4856 }
4857 
urb_hashtable_lookup(uint64_t target_urb_adr)4858 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4859 {
4860     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4861     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4862 }
4863 
urb_hashtable_remove(struct live_urb * urb)4864 static void urb_hashtable_remove(struct live_urb *urb)
4865 {
4866     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4867     g_hash_table_remove(urb_hashtable, urb);
4868 }
4869 
4870 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4871 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4872                           int fd, int cmd, abi_long arg)
4873 {
4874     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4875     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4876     struct live_urb *lurb;
4877     void *argptr;
4878     uint64_t hurb;
4879     int target_size;
4880     uintptr_t target_urb_adr;
4881     abi_long ret;
4882 
4883     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4884 
4885     memset(buf_temp, 0, sizeof(uint64_t));
4886     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4887     if (is_error(ret)) {
4888         return ret;
4889     }
4890 
4891     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4892     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4893     if (!lurb->target_urb_adr) {
4894         return -TARGET_EFAULT;
4895     }
4896     urb_hashtable_remove(lurb);
4897     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4898         lurb->host_urb.buffer_length);
4899     lurb->target_buf_ptr = NULL;
4900 
4901     /* restore the guest buffer pointer */
4902     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4903 
4904     /* update the guest urb struct */
4905     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4906     if (!argptr) {
4907         g_free(lurb);
4908         return -TARGET_EFAULT;
4909     }
4910     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4911     unlock_user(argptr, lurb->target_urb_adr, target_size);
4912 
4913     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4914     /* write back the urb handle */
4915     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4916     if (!argptr) {
4917         g_free(lurb);
4918         return -TARGET_EFAULT;
4919     }
4920 
4921     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4922     target_urb_adr = lurb->target_urb_adr;
4923     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4924     unlock_user(argptr, arg, target_size);
4925 
4926     g_free(lurb);
4927     return ret;
4928 }
4929 
4930 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4931 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4932                              uint8_t *buf_temp __attribute__((unused)),
4933                              int fd, int cmd, abi_long arg)
4934 {
4935     struct live_urb *lurb;
4936 
4937     /* map target address back to host URB with metadata. */
4938     lurb = urb_hashtable_lookup(arg);
4939     if (!lurb) {
4940         return -TARGET_EFAULT;
4941     }
4942     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4943 }
4944 
4945 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4946 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4947                             int fd, int cmd, abi_long arg)
4948 {
4949     const argtype *arg_type = ie->arg_type;
4950     int target_size;
4951     abi_long ret;
4952     void *argptr;
4953     int rw_dir;
4954     struct live_urb *lurb;
4955 
4956     /*
4957      * each submitted URB needs to map to a unique ID for the
4958      * kernel, and that unique ID needs to be a pointer to
4959      * host memory.  hence, we need to malloc for each URB.
4960      * isochronous transfers have a variable length struct.
4961      */
4962     arg_type++;
4963     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4964 
4965     /* construct host copy of urb and metadata */
4966     lurb = g_try_new0(struct live_urb, 1);
4967     if (!lurb) {
4968         return -TARGET_ENOMEM;
4969     }
4970 
4971     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4972     if (!argptr) {
4973         g_free(lurb);
4974         return -TARGET_EFAULT;
4975     }
4976     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4977     unlock_user(argptr, arg, 0);
4978 
4979     lurb->target_urb_adr = arg;
4980     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4981 
4982     /* buffer space used depends on endpoint type so lock the entire buffer */
4983     /* control type urbs should check the buffer contents for true direction */
4984     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4985     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4986         lurb->host_urb.buffer_length, 1);
4987     if (lurb->target_buf_ptr == NULL) {
4988         g_free(lurb);
4989         return -TARGET_EFAULT;
4990     }
4991 
4992     /* update buffer pointer in host copy */
4993     lurb->host_urb.buffer = lurb->target_buf_ptr;
4994 
4995     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4996     if (is_error(ret)) {
4997         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4998         g_free(lurb);
4999     } else {
5000         urb_hashtable_insert(lurb);
5001     }
5002 
5003     return ret;
5004 }
5005 #endif /* CONFIG_USBFS */
5006 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5007 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5008                             int cmd, abi_long arg)
5009 {
5010     void *argptr;
5011     struct dm_ioctl *host_dm;
5012     abi_long guest_data;
5013     uint32_t guest_data_size;
5014     int target_size;
5015     const argtype *arg_type = ie->arg_type;
5016     abi_long ret;
5017     void *big_buf = NULL;
5018     char *host_data;
5019 
5020     arg_type++;
5021     target_size = thunk_type_size(arg_type, 0);
5022     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5023     if (!argptr) {
5024         ret = -TARGET_EFAULT;
5025         goto out;
5026     }
5027     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5028     unlock_user(argptr, arg, 0);
5029 
5030     /* buf_temp is too small, so fetch things into a bigger buffer */
5031     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5032     memcpy(big_buf, buf_temp, target_size);
5033     buf_temp = big_buf;
5034     host_dm = big_buf;
5035 
5036     guest_data = arg + host_dm->data_start;
5037     if ((guest_data - arg) < 0) {
5038         ret = -TARGET_EINVAL;
5039         goto out;
5040     }
5041     guest_data_size = host_dm->data_size - host_dm->data_start;
5042     host_data = (char*)host_dm + host_dm->data_start;
5043 
5044     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5045     if (!argptr) {
5046         ret = -TARGET_EFAULT;
5047         goto out;
5048     }
5049 
5050     switch (ie->host_cmd) {
5051     case DM_REMOVE_ALL:
5052     case DM_LIST_DEVICES:
5053     case DM_DEV_CREATE:
5054     case DM_DEV_REMOVE:
5055     case DM_DEV_SUSPEND:
5056     case DM_DEV_STATUS:
5057     case DM_DEV_WAIT:
5058     case DM_TABLE_STATUS:
5059     case DM_TABLE_CLEAR:
5060     case DM_TABLE_DEPS:
5061     case DM_LIST_VERSIONS:
5062         /* no input data */
5063         break;
5064     case DM_DEV_RENAME:
5065     case DM_DEV_SET_GEOMETRY:
5066         /* data contains only strings */
5067         memcpy(host_data, argptr, guest_data_size);
5068         break;
5069     case DM_TARGET_MSG:
5070         memcpy(host_data, argptr, guest_data_size);
5071         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5072         break;
5073     case DM_TABLE_LOAD:
5074     {
5075         void *gspec = argptr;
5076         void *cur_data = host_data;
5077         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5078         int spec_size = thunk_type_size(dm_arg_type, 0);
5079         int i;
5080 
5081         for (i = 0; i < host_dm->target_count; i++) {
5082             struct dm_target_spec *spec = cur_data;
5083             uint32_t next;
5084             int slen;
5085 
5086             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5087             slen = strlen((char*)gspec + spec_size) + 1;
5088             next = spec->next;
5089             spec->next = sizeof(*spec) + slen;
5090             strcpy((char*)&spec[1], gspec + spec_size);
5091             gspec += next;
5092             cur_data += spec->next;
5093         }
5094         break;
5095     }
5096     default:
5097         ret = -TARGET_EINVAL;
5098         unlock_user(argptr, guest_data, 0);
5099         goto out;
5100     }
5101     unlock_user(argptr, guest_data, 0);
5102 
5103     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5104     if (!is_error(ret)) {
5105         guest_data = arg + host_dm->data_start;
5106         guest_data_size = host_dm->data_size - host_dm->data_start;
5107         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5108         switch (ie->host_cmd) {
5109         case DM_REMOVE_ALL:
5110         case DM_DEV_CREATE:
5111         case DM_DEV_REMOVE:
5112         case DM_DEV_RENAME:
5113         case DM_DEV_SUSPEND:
5114         case DM_DEV_STATUS:
5115         case DM_TABLE_LOAD:
5116         case DM_TABLE_CLEAR:
5117         case DM_TARGET_MSG:
5118         case DM_DEV_SET_GEOMETRY:
5119             /* no return data */
5120             break;
5121         case DM_LIST_DEVICES:
5122         {
5123             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5124             uint32_t remaining_data = guest_data_size;
5125             void *cur_data = argptr;
5126             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5127             int nl_size = 12; /* can't use thunk_size due to alignment */
5128 
5129             while (1) {
5130                 uint32_t next = nl->next;
5131                 if (next) {
5132                     nl->next = nl_size + (strlen(nl->name) + 1);
5133                 }
5134                 if (remaining_data < nl->next) {
5135                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5136                     break;
5137                 }
5138                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5139                 strcpy(cur_data + nl_size, nl->name);
5140                 cur_data += nl->next;
5141                 remaining_data -= nl->next;
5142                 if (!next) {
5143                     break;
5144                 }
5145                 nl = (void*)nl + next;
5146             }
5147             break;
5148         }
5149         case DM_DEV_WAIT:
5150         case DM_TABLE_STATUS:
5151         {
5152             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5153             void *cur_data = argptr;
5154             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5155             int spec_size = thunk_type_size(dm_arg_type, 0);
5156             int i;
5157 
5158             for (i = 0; i < host_dm->target_count; i++) {
5159                 uint32_t next = spec->next;
5160                 int slen = strlen((char*)&spec[1]) + 1;
5161                 spec->next = (cur_data - argptr) + spec_size + slen;
5162                 if (guest_data_size < spec->next) {
5163                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5164                     break;
5165                 }
5166                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5167                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5168                 cur_data = argptr + spec->next;
5169                 spec = (void*)host_dm + host_dm->data_start + next;
5170             }
5171             break;
5172         }
5173         case DM_TABLE_DEPS:
5174         {
5175             void *hdata = (void*)host_dm + host_dm->data_start;
5176             int count = *(uint32_t*)hdata;
5177             uint64_t *hdev = hdata + 8;
5178             uint64_t *gdev = argptr + 8;
5179             int i;
5180 
5181             *(uint32_t*)argptr = tswap32(count);
5182             for (i = 0; i < count; i++) {
5183                 *gdev = tswap64(*hdev);
5184                 gdev++;
5185                 hdev++;
5186             }
5187             break;
5188         }
5189         case DM_LIST_VERSIONS:
5190         {
5191             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5192             uint32_t remaining_data = guest_data_size;
5193             void *cur_data = argptr;
5194             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5195             int vers_size = thunk_type_size(dm_arg_type, 0);
5196 
5197             while (1) {
5198                 uint32_t next = vers->next;
5199                 if (next) {
5200                     vers->next = vers_size + (strlen(vers->name) + 1);
5201                 }
5202                 if (remaining_data < vers->next) {
5203                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5204                     break;
5205                 }
5206                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5207                 strcpy(cur_data + vers_size, vers->name);
5208                 cur_data += vers->next;
5209                 remaining_data -= vers->next;
5210                 if (!next) {
5211                     break;
5212                 }
5213                 vers = (void*)vers + next;
5214             }
5215             break;
5216         }
5217         default:
5218             unlock_user(argptr, guest_data, 0);
5219             ret = -TARGET_EINVAL;
5220             goto out;
5221         }
5222         unlock_user(argptr, guest_data, guest_data_size);
5223 
5224         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5225         if (!argptr) {
5226             ret = -TARGET_EFAULT;
5227             goto out;
5228         }
5229         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5230         unlock_user(argptr, arg, target_size);
5231     }
5232 out:
5233     g_free(big_buf);
5234     return ret;
5235 }
5236 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5237 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5238                                int cmd, abi_long arg)
5239 {
5240     void *argptr;
5241     int target_size;
5242     const argtype *arg_type = ie->arg_type;
5243     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5244     abi_long ret;
5245 
5246     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5247     struct blkpg_partition host_part;
5248 
5249     /* Read and convert blkpg */
5250     arg_type++;
5251     target_size = thunk_type_size(arg_type, 0);
5252     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5253     if (!argptr) {
5254         ret = -TARGET_EFAULT;
5255         goto out;
5256     }
5257     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5258     unlock_user(argptr, arg, 0);
5259 
5260     switch (host_blkpg->op) {
5261     case BLKPG_ADD_PARTITION:
5262     case BLKPG_DEL_PARTITION:
5263         /* payload is struct blkpg_partition */
5264         break;
5265     default:
5266         /* Unknown opcode */
5267         ret = -TARGET_EINVAL;
5268         goto out;
5269     }
5270 
5271     /* Read and convert blkpg->data */
5272     arg = (abi_long)(uintptr_t)host_blkpg->data;
5273     target_size = thunk_type_size(part_arg_type, 0);
5274     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5275     if (!argptr) {
5276         ret = -TARGET_EFAULT;
5277         goto out;
5278     }
5279     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5280     unlock_user(argptr, arg, 0);
5281 
5282     /* Swizzle the data pointer to our local copy and call! */
5283     host_blkpg->data = &host_part;
5284     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5285 
5286 out:
5287     return ret;
5288 }
5289 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5290 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5291                                 int fd, int cmd, abi_long arg)
5292 {
5293     const argtype *arg_type = ie->arg_type;
5294     const StructEntry *se;
5295     const argtype *field_types;
5296     const int *dst_offsets, *src_offsets;
5297     int target_size;
5298     void *argptr;
5299     abi_ulong *target_rt_dev_ptr = NULL;
5300     unsigned long *host_rt_dev_ptr = NULL;
5301     abi_long ret;
5302     int i;
5303 
5304     assert(ie->access == IOC_W);
5305     assert(*arg_type == TYPE_PTR);
5306     arg_type++;
5307     assert(*arg_type == TYPE_STRUCT);
5308     target_size = thunk_type_size(arg_type, 0);
5309     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5310     if (!argptr) {
5311         return -TARGET_EFAULT;
5312     }
5313     arg_type++;
5314     assert(*arg_type == (int)STRUCT_rtentry);
5315     se = struct_entries + *arg_type++;
5316     assert(se->convert[0] == NULL);
5317     /* convert struct here to be able to catch rt_dev string */
5318     field_types = se->field_types;
5319     dst_offsets = se->field_offsets[THUNK_HOST];
5320     src_offsets = se->field_offsets[THUNK_TARGET];
5321     for (i = 0; i < se->nb_fields; i++) {
5322         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5323             assert(*field_types == TYPE_PTRVOID);
5324             target_rt_dev_ptr = argptr + src_offsets[i];
5325             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5326             if (*target_rt_dev_ptr != 0) {
5327                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5328                                                   tswapal(*target_rt_dev_ptr));
5329                 if (!*host_rt_dev_ptr) {
5330                     unlock_user(argptr, arg, 0);
5331                     return -TARGET_EFAULT;
5332                 }
5333             } else {
5334                 *host_rt_dev_ptr = 0;
5335             }
5336             field_types++;
5337             continue;
5338         }
5339         field_types = thunk_convert(buf_temp + dst_offsets[i],
5340                                     argptr + src_offsets[i],
5341                                     field_types, THUNK_HOST);
5342     }
5343     unlock_user(argptr, arg, 0);
5344 
5345     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5346 
5347     assert(host_rt_dev_ptr != NULL);
5348     assert(target_rt_dev_ptr != NULL);
5349     if (*host_rt_dev_ptr != 0) {
5350         unlock_user((void *)*host_rt_dev_ptr,
5351                     *target_rt_dev_ptr, 0);
5352     }
5353     return ret;
5354 }
5355 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5356 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5357                                      int fd, int cmd, abi_long arg)
5358 {
5359     int sig = target_to_host_signal(arg);
5360     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5361 }
5362 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5363 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5364                                     int fd, int cmd, abi_long arg)
5365 {
5366     struct timeval tv;
5367     abi_long ret;
5368 
5369     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5370     if (is_error(ret)) {
5371         return ret;
5372     }
5373 
5374     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5375         if (copy_to_user_timeval(arg, &tv)) {
5376             return -TARGET_EFAULT;
5377         }
5378     } else {
5379         if (copy_to_user_timeval64(arg, &tv)) {
5380             return -TARGET_EFAULT;
5381         }
5382     }
5383 
5384     return ret;
5385 }
5386 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5387 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5388                                       int fd, int cmd, abi_long arg)
5389 {
5390     struct timespec ts;
5391     abi_long ret;
5392 
5393     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5394     if (is_error(ret)) {
5395         return ret;
5396     }
5397 
5398     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5399         if (host_to_target_timespec(arg, &ts)) {
5400             return -TARGET_EFAULT;
5401         }
5402     } else{
5403         if (host_to_target_timespec64(arg, &ts)) {
5404             return -TARGET_EFAULT;
5405         }
5406     }
5407 
5408     return ret;
5409 }
5410 
5411 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5412 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5413                                      int fd, int cmd, abi_long arg)
5414 {
5415     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5416     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5417 }
5418 #endif
5419 
5420 #ifdef HAVE_DRM_H
5421 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5422 static void unlock_drm_version(struct drm_version *host_ver,
5423                                struct target_drm_version *target_ver,
5424                                bool copy)
5425 {
5426     unlock_user(host_ver->name, target_ver->name,
5427                                 copy ? host_ver->name_len : 0);
5428     unlock_user(host_ver->date, target_ver->date,
5429                                 copy ? host_ver->date_len : 0);
5430     unlock_user(host_ver->desc, target_ver->desc,
5431                                 copy ? host_ver->desc_len : 0);
5432 }
5433 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5434 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5435                                           struct target_drm_version *target_ver)
5436 {
5437     memset(host_ver, 0, sizeof(*host_ver));
5438 
5439     __get_user(host_ver->name_len, &target_ver->name_len);
5440     if (host_ver->name_len) {
5441         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5442                                    target_ver->name_len, 0);
5443         if (!host_ver->name) {
5444             return -EFAULT;
5445         }
5446     }
5447 
5448     __get_user(host_ver->date_len, &target_ver->date_len);
5449     if (host_ver->date_len) {
5450         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5451                                    target_ver->date_len, 0);
5452         if (!host_ver->date) {
5453             goto err;
5454         }
5455     }
5456 
5457     __get_user(host_ver->desc_len, &target_ver->desc_len);
5458     if (host_ver->desc_len) {
5459         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5460                                    target_ver->desc_len, 0);
5461         if (!host_ver->desc) {
5462             goto err;
5463         }
5464     }
5465 
5466     return 0;
5467 err:
5468     unlock_drm_version(host_ver, target_ver, false);
5469     return -EFAULT;
5470 }
5471 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5472 static inline void host_to_target_drmversion(
5473                                           struct target_drm_version *target_ver,
5474                                           struct drm_version *host_ver)
5475 {
5476     __put_user(host_ver->version_major, &target_ver->version_major);
5477     __put_user(host_ver->version_minor, &target_ver->version_minor);
5478     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5479     __put_user(host_ver->name_len, &target_ver->name_len);
5480     __put_user(host_ver->date_len, &target_ver->date_len);
5481     __put_user(host_ver->desc_len, &target_ver->desc_len);
5482     unlock_drm_version(host_ver, target_ver, true);
5483 }
5484 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5485 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5486                              int fd, int cmd, abi_long arg)
5487 {
5488     struct drm_version *ver;
5489     struct target_drm_version *target_ver;
5490     abi_long ret;
5491 
5492     switch (ie->host_cmd) {
5493     case DRM_IOCTL_VERSION:
5494         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5495             return -TARGET_EFAULT;
5496         }
5497         ver = (struct drm_version *)buf_temp;
5498         ret = target_to_host_drmversion(ver, target_ver);
5499         if (!is_error(ret)) {
5500             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5501             if (is_error(ret)) {
5502                 unlock_drm_version(ver, target_ver, false);
5503             } else {
5504                 host_to_target_drmversion(target_ver, ver);
5505             }
5506         }
5507         unlock_user_struct(target_ver, arg, 0);
5508         return ret;
5509     }
5510     return -TARGET_ENOSYS;
5511 }
5512 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5513 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5514                                            struct drm_i915_getparam *gparam,
5515                                            int fd, abi_long arg)
5516 {
5517     abi_long ret;
5518     int value;
5519     struct target_drm_i915_getparam *target_gparam;
5520 
5521     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5522         return -TARGET_EFAULT;
5523     }
5524 
5525     __get_user(gparam->param, &target_gparam->param);
5526     gparam->value = &value;
5527     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5528     put_user_s32(value, target_gparam->value);
5529 
5530     unlock_user_struct(target_gparam, arg, 0);
5531     return ret;
5532 }
5533 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5534 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5535                                   int fd, int cmd, abi_long arg)
5536 {
5537     switch (ie->host_cmd) {
5538     case DRM_IOCTL_I915_GETPARAM:
5539         return do_ioctl_drm_i915_getparam(ie,
5540                                           (struct drm_i915_getparam *)buf_temp,
5541                                           fd, arg);
5542     default:
5543         return -TARGET_ENOSYS;
5544     }
5545 }
5546 
5547 #endif
5548 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5549 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5550                                         int fd, int cmd, abi_long arg)
5551 {
5552     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5553     struct tun_filter *target_filter;
5554     char *target_addr;
5555 
5556     assert(ie->access == IOC_W);
5557 
5558     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5559     if (!target_filter) {
5560         return -TARGET_EFAULT;
5561     }
5562     filter->flags = tswap16(target_filter->flags);
5563     filter->count = tswap16(target_filter->count);
5564     unlock_user(target_filter, arg, 0);
5565 
5566     if (filter->count) {
5567         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5568             MAX_STRUCT_SIZE) {
5569             return -TARGET_EFAULT;
5570         }
5571 
5572         target_addr = lock_user(VERIFY_READ,
5573                                 arg + offsetof(struct tun_filter, addr),
5574                                 filter->count * ETH_ALEN, 1);
5575         if (!target_addr) {
5576             return -TARGET_EFAULT;
5577         }
5578         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5579         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5580     }
5581 
5582     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5583 }
5584 
5585 IOCTLEntry ioctl_entries[] = {
5586 #define IOCTL(cmd, access, ...) \
5587     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5588 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5589     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5590 #define IOCTL_IGNORE(cmd) \
5591     { TARGET_ ## cmd, 0, #cmd },
5592 #include "ioctls.h"
5593     { 0, 0, },
5594 };
5595 
5596 /* ??? Implement proper locking for ioctls.  */
5597 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5598 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5599 {
5600     const IOCTLEntry *ie;
5601     const argtype *arg_type;
5602     abi_long ret;
5603     uint8_t buf_temp[MAX_STRUCT_SIZE];
5604     int target_size;
5605     void *argptr;
5606 
5607     ie = ioctl_entries;
5608     for(;;) {
5609         if (ie->target_cmd == 0) {
5610             qemu_log_mask(
5611                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5612             return -TARGET_ENOTTY;
5613         }
5614         if (ie->target_cmd == cmd)
5615             break;
5616         ie++;
5617     }
5618     arg_type = ie->arg_type;
5619     if (ie->do_ioctl) {
5620         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5621     } else if (!ie->host_cmd) {
5622         /* Some architectures define BSD ioctls in their headers
5623            that are not implemented in Linux.  */
5624         return -TARGET_ENOTTY;
5625     }
5626 
5627     switch(arg_type[0]) {
5628     case TYPE_NULL:
5629         /* no argument */
5630         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5631         break;
5632     case TYPE_PTRVOID:
5633     case TYPE_INT:
5634     case TYPE_LONG:
5635     case TYPE_ULONG:
5636         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5637         break;
5638     case TYPE_PTR:
5639         arg_type++;
5640         target_size = thunk_type_size(arg_type, 0);
5641         switch(ie->access) {
5642         case IOC_R:
5643             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5644             if (!is_error(ret)) {
5645                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5646                 if (!argptr)
5647                     return -TARGET_EFAULT;
5648                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5649                 unlock_user(argptr, arg, target_size);
5650             }
5651             break;
5652         case IOC_W:
5653             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5654             if (!argptr)
5655                 return -TARGET_EFAULT;
5656             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5657             unlock_user(argptr, arg, 0);
5658             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5659             break;
5660         default:
5661         case IOC_RW:
5662             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5663             if (!argptr)
5664                 return -TARGET_EFAULT;
5665             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5666             unlock_user(argptr, arg, 0);
5667             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5668             if (!is_error(ret)) {
5669                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5670                 if (!argptr)
5671                     return -TARGET_EFAULT;
5672                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5673                 unlock_user(argptr, arg, target_size);
5674             }
5675             break;
5676         }
5677         break;
5678     default:
5679         qemu_log_mask(LOG_UNIMP,
5680                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5681                       (long)cmd, arg_type[0]);
5682         ret = -TARGET_ENOTTY;
5683         break;
5684     }
5685     return ret;
5686 }
5687 
5688 static const bitmask_transtbl iflag_tbl[] = {
5689         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5690         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5691         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5692         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5693         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5694         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5695         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5696         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5697         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5698         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5699         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5700         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5701         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5702         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5703         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5704 };
5705 
5706 static const bitmask_transtbl oflag_tbl[] = {
5707 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5708 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5709 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5710 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5711 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5712 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5713 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5714 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5715 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5716 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5717 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5718 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5719 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5720 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5721 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5722 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5723 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5724 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5725 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5726 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5727 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5728 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5729 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5730 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5731 };
5732 
5733 static const bitmask_transtbl cflag_tbl[] = {
5734 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5735 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5736 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5737 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5738 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5739 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5740 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5741 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5742 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5743 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5744 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5745 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5746 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5747 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5748 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5749 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5750 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5751 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5752 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5753 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5754 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5755 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5756 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5757 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5758 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5759 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5760 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5761 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5762 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5763 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5764 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5765 };
5766 
5767 static const bitmask_transtbl lflag_tbl[] = {
5768   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5769   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5770   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5771   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5772   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5773   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5774   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5775   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5776   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5777   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5778   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5779   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5780   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5781   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5782   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5783   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5784 };
5785 
target_to_host_termios(void * dst,const void * src)5786 static void target_to_host_termios (void *dst, const void *src)
5787 {
5788     struct host_termios *host = dst;
5789     const struct target_termios *target = src;
5790 
5791     host->c_iflag =
5792         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5793     host->c_oflag =
5794         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5795     host->c_cflag =
5796         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5797     host->c_lflag =
5798         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5799     host->c_line = target->c_line;
5800 
5801     memset(host->c_cc, 0, sizeof(host->c_cc));
5802     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5803     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5804     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5805     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5806     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5807     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5808     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5809     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5810     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5811     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5812     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5813     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5814     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5815     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5816     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5817     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5818     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5819 }
5820 
host_to_target_termios(void * dst,const void * src)5821 static void host_to_target_termios (void *dst, const void *src)
5822 {
5823     struct target_termios *target = dst;
5824     const struct host_termios *host = src;
5825 
5826     target->c_iflag =
5827         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5828     target->c_oflag =
5829         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5830     target->c_cflag =
5831         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5832     target->c_lflag =
5833         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5834     target->c_line = host->c_line;
5835 
5836     memset(target->c_cc, 0, sizeof(target->c_cc));
5837     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5838     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5839     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5840     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5841     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5842     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5843     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5844     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5845     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5846     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5847     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5848     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5849     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5850     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5851     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5852     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5853     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5854 }
5855 
5856 static const StructEntry struct_termios_def = {
5857     .convert = { host_to_target_termios, target_to_host_termios },
5858     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5859     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5860     .print = print_termios,
5861 };
5862 
5863 /* If the host does not provide these bits, they may be safely discarded. */
5864 #ifndef MAP_SYNC
5865 #define MAP_SYNC 0
5866 #endif
5867 #ifndef MAP_UNINITIALIZED
5868 #define MAP_UNINITIALIZED 0
5869 #endif
5870 
5871 static const bitmask_transtbl mmap_flags_tbl[] = {
5872     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5873     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5874       MAP_ANONYMOUS, MAP_ANONYMOUS },
5875     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5876       MAP_GROWSDOWN, MAP_GROWSDOWN },
5877     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5878       MAP_DENYWRITE, MAP_DENYWRITE },
5879     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5880       MAP_EXECUTABLE, MAP_EXECUTABLE },
5881     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5882     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5883       MAP_NORESERVE, MAP_NORESERVE },
5884     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5885     /* MAP_STACK had been ignored by the kernel for quite some time.
5886        Recognize it for the target insofar as we do not want to pass
5887        it through to the host.  */
5888     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5889     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5890     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5891     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5892       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5893     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5894       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5895 };
5896 
5897 /*
5898  * Arrange for legacy / undefined architecture specific flags to be
5899  * ignored by mmap handling code.
5900  */
5901 #ifndef TARGET_MAP_32BIT
5902 #define TARGET_MAP_32BIT 0
5903 #endif
5904 #ifndef TARGET_MAP_HUGE_2MB
5905 #define TARGET_MAP_HUGE_2MB 0
5906 #endif
5907 #ifndef TARGET_MAP_HUGE_1GB
5908 #define TARGET_MAP_HUGE_1GB 0
5909 #endif
5910 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5911 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5912                         int target_flags, int fd, off_t offset)
5913 {
5914     /*
5915      * The historical set of flags that all mmap types implicitly support.
5916      */
5917     enum {
5918         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5919                                | TARGET_MAP_PRIVATE
5920                                | TARGET_MAP_FIXED
5921                                | TARGET_MAP_ANONYMOUS
5922                                | TARGET_MAP_DENYWRITE
5923                                | TARGET_MAP_EXECUTABLE
5924                                | TARGET_MAP_UNINITIALIZED
5925                                | TARGET_MAP_GROWSDOWN
5926                                | TARGET_MAP_LOCKED
5927                                | TARGET_MAP_NORESERVE
5928                                | TARGET_MAP_POPULATE
5929                                | TARGET_MAP_NONBLOCK
5930                                | TARGET_MAP_STACK
5931                                | TARGET_MAP_HUGETLB
5932                                | TARGET_MAP_32BIT
5933                                | TARGET_MAP_HUGE_2MB
5934                                | TARGET_MAP_HUGE_1GB
5935     };
5936     int host_flags;
5937 
5938     switch (target_flags & TARGET_MAP_TYPE) {
5939     case TARGET_MAP_PRIVATE:
5940         host_flags = MAP_PRIVATE;
5941         break;
5942     case TARGET_MAP_SHARED:
5943         host_flags = MAP_SHARED;
5944         break;
5945     case TARGET_MAP_SHARED_VALIDATE:
5946         /*
5947          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5948          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5949          */
5950         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5951             return -TARGET_EOPNOTSUPP;
5952         }
5953         host_flags = MAP_SHARED_VALIDATE;
5954         if (target_flags & TARGET_MAP_SYNC) {
5955             host_flags |= MAP_SYNC;
5956         }
5957         break;
5958     default:
5959         return -TARGET_EINVAL;
5960     }
5961     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5962 
5963     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5964 }
5965 
5966 /*
5967  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5968  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5969  */
5970 #if defined(TARGET_I386)
5971 
5972 /* NOTE: there is really one LDT for all the threads */
5973 static uint8_t *ldt_table;
5974 
read_ldt(abi_ulong ptr,unsigned long bytecount)5975 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5976 {
5977     int size;
5978     void *p;
5979 
5980     if (!ldt_table)
5981         return 0;
5982     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5983     if (size > bytecount)
5984         size = bytecount;
5985     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5986     if (!p)
5987         return -TARGET_EFAULT;
5988     /* ??? Should this by byteswapped?  */
5989     memcpy(p, ldt_table, size);
5990     unlock_user(p, ptr, size);
5991     return size;
5992 }
5993 
5994 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)5995 static abi_long write_ldt(CPUX86State *env,
5996                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5997 {
5998     struct target_modify_ldt_ldt_s ldt_info;
5999     struct target_modify_ldt_ldt_s *target_ldt_info;
6000     int seg_32bit, contents, read_exec_only, limit_in_pages;
6001     int seg_not_present, useable, lm;
6002     uint32_t *lp, entry_1, entry_2;
6003 
6004     if (bytecount != sizeof(ldt_info))
6005         return -TARGET_EINVAL;
6006     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6007         return -TARGET_EFAULT;
6008     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6009     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6010     ldt_info.limit = tswap32(target_ldt_info->limit);
6011     ldt_info.flags = tswap32(target_ldt_info->flags);
6012     unlock_user_struct(target_ldt_info, ptr, 0);
6013 
6014     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6015         return -TARGET_EINVAL;
6016     seg_32bit = ldt_info.flags & 1;
6017     contents = (ldt_info.flags >> 1) & 3;
6018     read_exec_only = (ldt_info.flags >> 3) & 1;
6019     limit_in_pages = (ldt_info.flags >> 4) & 1;
6020     seg_not_present = (ldt_info.flags >> 5) & 1;
6021     useable = (ldt_info.flags >> 6) & 1;
6022 #ifdef TARGET_ABI32
6023     lm = 0;
6024 #else
6025     lm = (ldt_info.flags >> 7) & 1;
6026 #endif
6027     if (contents == 3) {
6028         if (oldmode)
6029             return -TARGET_EINVAL;
6030         if (seg_not_present == 0)
6031             return -TARGET_EINVAL;
6032     }
6033     /* allocate the LDT */
6034     if (!ldt_table) {
6035         env->ldt.base = target_mmap(0,
6036                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6037                                     PROT_READ|PROT_WRITE,
6038                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6039         if (env->ldt.base == -1)
6040             return -TARGET_ENOMEM;
6041         memset(g2h_untagged(env->ldt.base), 0,
6042                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6043         env->ldt.limit = 0xffff;
6044         ldt_table = g2h_untagged(env->ldt.base);
6045     }
6046 
6047     /* NOTE: same code as Linux kernel */
6048     /* Allow LDTs to be cleared by the user. */
6049     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6050         if (oldmode ||
6051             (contents == 0		&&
6052              read_exec_only == 1	&&
6053              seg_32bit == 0		&&
6054              limit_in_pages == 0	&&
6055              seg_not_present == 1	&&
6056              useable == 0 )) {
6057             entry_1 = 0;
6058             entry_2 = 0;
6059             goto install;
6060         }
6061     }
6062 
6063     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6064         (ldt_info.limit & 0x0ffff);
6065     entry_2 = (ldt_info.base_addr & 0xff000000) |
6066         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6067         (ldt_info.limit & 0xf0000) |
6068         ((read_exec_only ^ 1) << 9) |
6069         (contents << 10) |
6070         ((seg_not_present ^ 1) << 15) |
6071         (seg_32bit << 22) |
6072         (limit_in_pages << 23) |
6073         (lm << 21) |
6074         0x7000;
6075     if (!oldmode)
6076         entry_2 |= (useable << 20);
6077 
6078     /* Install the new entry ...  */
6079 install:
6080     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6081     lp[0] = tswap32(entry_1);
6082     lp[1] = tswap32(entry_2);
6083     return 0;
6084 }
6085 
6086 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6087 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6088                               unsigned long bytecount)
6089 {
6090     abi_long ret;
6091 
6092     switch (func) {
6093     case 0:
6094         ret = read_ldt(ptr, bytecount);
6095         break;
6096     case 1:
6097         ret = write_ldt(env, ptr, bytecount, 1);
6098         break;
6099     case 0x11:
6100         ret = write_ldt(env, ptr, bytecount, 0);
6101         break;
6102     default:
6103         ret = -TARGET_ENOSYS;
6104         break;
6105     }
6106     return ret;
6107 }
6108 
6109 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6110 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6111 {
6112     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6113     struct target_modify_ldt_ldt_s ldt_info;
6114     struct target_modify_ldt_ldt_s *target_ldt_info;
6115     int seg_32bit, contents, read_exec_only, limit_in_pages;
6116     int seg_not_present, useable, lm;
6117     uint32_t *lp, entry_1, entry_2;
6118     int i;
6119 
6120     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6121     if (!target_ldt_info)
6122         return -TARGET_EFAULT;
6123     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6124     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6125     ldt_info.limit = tswap32(target_ldt_info->limit);
6126     ldt_info.flags = tswap32(target_ldt_info->flags);
6127     if (ldt_info.entry_number == -1) {
6128         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6129             if (gdt_table[i] == 0) {
6130                 ldt_info.entry_number = i;
6131                 target_ldt_info->entry_number = tswap32(i);
6132                 break;
6133             }
6134         }
6135     }
6136     unlock_user_struct(target_ldt_info, ptr, 1);
6137 
6138     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6139         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6140            return -TARGET_EINVAL;
6141     seg_32bit = ldt_info.flags & 1;
6142     contents = (ldt_info.flags >> 1) & 3;
6143     read_exec_only = (ldt_info.flags >> 3) & 1;
6144     limit_in_pages = (ldt_info.flags >> 4) & 1;
6145     seg_not_present = (ldt_info.flags >> 5) & 1;
6146     useable = (ldt_info.flags >> 6) & 1;
6147 #ifdef TARGET_ABI32
6148     lm = 0;
6149 #else
6150     lm = (ldt_info.flags >> 7) & 1;
6151 #endif
6152 
6153     if (contents == 3) {
6154         if (seg_not_present == 0)
6155             return -TARGET_EINVAL;
6156     }
6157 
6158     /* NOTE: same code as Linux kernel */
6159     /* Allow LDTs to be cleared by the user. */
6160     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6161         if ((contents == 0             &&
6162              read_exec_only == 1       &&
6163              seg_32bit == 0            &&
6164              limit_in_pages == 0       &&
6165              seg_not_present == 1      &&
6166              useable == 0 )) {
6167             entry_1 = 0;
6168             entry_2 = 0;
6169             goto install;
6170         }
6171     }
6172 
6173     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6174         (ldt_info.limit & 0x0ffff);
6175     entry_2 = (ldt_info.base_addr & 0xff000000) |
6176         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6177         (ldt_info.limit & 0xf0000) |
6178         ((read_exec_only ^ 1) << 9) |
6179         (contents << 10) |
6180         ((seg_not_present ^ 1) << 15) |
6181         (seg_32bit << 22) |
6182         (limit_in_pages << 23) |
6183         (useable << 20) |
6184         (lm << 21) |
6185         0x7000;
6186 
6187     /* Install the new entry ...  */
6188 install:
6189     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6190     lp[0] = tswap32(entry_1);
6191     lp[1] = tswap32(entry_2);
6192     return 0;
6193 }
6194 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6195 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6196 {
6197     struct target_modify_ldt_ldt_s *target_ldt_info;
6198     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6199     uint32_t base_addr, limit, flags;
6200     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6201     int seg_not_present, useable, lm;
6202     uint32_t *lp, entry_1, entry_2;
6203 
6204     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6205     if (!target_ldt_info)
6206         return -TARGET_EFAULT;
6207     idx = tswap32(target_ldt_info->entry_number);
6208     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6209         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6210         unlock_user_struct(target_ldt_info, ptr, 1);
6211         return -TARGET_EINVAL;
6212     }
6213     lp = (uint32_t *)(gdt_table + idx);
6214     entry_1 = tswap32(lp[0]);
6215     entry_2 = tswap32(lp[1]);
6216 
6217     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6218     contents = (entry_2 >> 10) & 3;
6219     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6220     seg_32bit = (entry_2 >> 22) & 1;
6221     limit_in_pages = (entry_2 >> 23) & 1;
6222     useable = (entry_2 >> 20) & 1;
6223 #ifdef TARGET_ABI32
6224     lm = 0;
6225 #else
6226     lm = (entry_2 >> 21) & 1;
6227 #endif
6228     flags = (seg_32bit << 0) | (contents << 1) |
6229         (read_exec_only << 3) | (limit_in_pages << 4) |
6230         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6231     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6232     base_addr = (entry_1 >> 16) |
6233         (entry_2 & 0xff000000) |
6234         ((entry_2 & 0xff) << 16);
6235     target_ldt_info->base_addr = tswapal(base_addr);
6236     target_ldt_info->limit = tswap32(limit);
6237     target_ldt_info->flags = tswap32(flags);
6238     unlock_user_struct(target_ldt_info, ptr, 1);
6239     return 0;
6240 }
6241 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6242 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6243 {
6244     return -TARGET_ENOSYS;
6245 }
6246 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6247 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6248 {
6249     abi_long ret = 0;
6250     abi_ulong val;
6251     int idx;
6252 
6253     switch(code) {
6254     case TARGET_ARCH_SET_GS:
6255     case TARGET_ARCH_SET_FS:
6256         if (code == TARGET_ARCH_SET_GS)
6257             idx = R_GS;
6258         else
6259             idx = R_FS;
6260         cpu_x86_load_seg(env, idx, 0);
6261         env->segs[idx].base = addr;
6262         break;
6263     case TARGET_ARCH_GET_GS:
6264     case TARGET_ARCH_GET_FS:
6265         if (code == TARGET_ARCH_GET_GS)
6266             idx = R_GS;
6267         else
6268             idx = R_FS;
6269         val = env->segs[idx].base;
6270         if (put_user(val, addr, abi_ulong))
6271             ret = -TARGET_EFAULT;
6272         break;
6273     default:
6274         ret = -TARGET_EINVAL;
6275         break;
6276     }
6277     return ret;
6278 }
6279 #endif /* defined(TARGET_ABI32 */
6280 #endif /* defined(TARGET_I386) */
6281 
6282 /*
6283  * These constants are generic.  Supply any that are missing from the host.
6284  */
6285 #ifndef PR_SET_NAME
6286 # define PR_SET_NAME    15
6287 # define PR_GET_NAME    16
6288 #endif
6289 #ifndef PR_SET_FP_MODE
6290 # define PR_SET_FP_MODE 45
6291 # define PR_GET_FP_MODE 46
6292 # define PR_FP_MODE_FR   (1 << 0)
6293 # define PR_FP_MODE_FRE  (1 << 1)
6294 #endif
6295 #ifndef PR_SVE_SET_VL
6296 # define PR_SVE_SET_VL  50
6297 # define PR_SVE_GET_VL  51
6298 # define PR_SVE_VL_LEN_MASK  0xffff
6299 # define PR_SVE_VL_INHERIT   (1 << 17)
6300 #endif
6301 #ifndef PR_PAC_RESET_KEYS
6302 # define PR_PAC_RESET_KEYS  54
6303 # define PR_PAC_APIAKEY   (1 << 0)
6304 # define PR_PAC_APIBKEY   (1 << 1)
6305 # define PR_PAC_APDAKEY   (1 << 2)
6306 # define PR_PAC_APDBKEY   (1 << 3)
6307 # define PR_PAC_APGAKEY   (1 << 4)
6308 #endif
6309 #ifndef PR_SET_TAGGED_ADDR_CTRL
6310 # define PR_SET_TAGGED_ADDR_CTRL 55
6311 # define PR_GET_TAGGED_ADDR_CTRL 56
6312 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6313 #endif
6314 #ifndef PR_SET_IO_FLUSHER
6315 # define PR_SET_IO_FLUSHER 57
6316 # define PR_GET_IO_FLUSHER 58
6317 #endif
6318 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6319 # define PR_SET_SYSCALL_USER_DISPATCH 59
6320 #endif
6321 #ifndef PR_SME_SET_VL
6322 # define PR_SME_SET_VL  63
6323 # define PR_SME_GET_VL  64
6324 # define PR_SME_VL_LEN_MASK  0xffff
6325 # define PR_SME_VL_INHERIT   (1 << 17)
6326 #endif
6327 
6328 #include "target_prctl.h"
6329 
do_prctl_inval0(CPUArchState * env)6330 static abi_long do_prctl_inval0(CPUArchState *env)
6331 {
6332     return -TARGET_EINVAL;
6333 }
6334 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6335 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6336 {
6337     return -TARGET_EINVAL;
6338 }
6339 
6340 #ifndef do_prctl_get_fp_mode
6341 #define do_prctl_get_fp_mode do_prctl_inval0
6342 #endif
6343 #ifndef do_prctl_set_fp_mode
6344 #define do_prctl_set_fp_mode do_prctl_inval1
6345 #endif
6346 #ifndef do_prctl_sve_get_vl
6347 #define do_prctl_sve_get_vl do_prctl_inval0
6348 #endif
6349 #ifndef do_prctl_sve_set_vl
6350 #define do_prctl_sve_set_vl do_prctl_inval1
6351 #endif
6352 #ifndef do_prctl_reset_keys
6353 #define do_prctl_reset_keys do_prctl_inval1
6354 #endif
6355 #ifndef do_prctl_set_tagged_addr_ctrl
6356 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6357 #endif
6358 #ifndef do_prctl_get_tagged_addr_ctrl
6359 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6360 #endif
6361 #ifndef do_prctl_get_unalign
6362 #define do_prctl_get_unalign do_prctl_inval1
6363 #endif
6364 #ifndef do_prctl_set_unalign
6365 #define do_prctl_set_unalign do_prctl_inval1
6366 #endif
6367 #ifndef do_prctl_sme_get_vl
6368 #define do_prctl_sme_get_vl do_prctl_inval0
6369 #endif
6370 #ifndef do_prctl_sme_set_vl
6371 #define do_prctl_sme_set_vl do_prctl_inval1
6372 #endif
6373 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6374 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6375                          abi_long arg3, abi_long arg4, abi_long arg5)
6376 {
6377     abi_long ret;
6378 
6379     switch (option) {
6380     case PR_GET_PDEATHSIG:
6381         {
6382             int deathsig;
6383             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6384                                   arg3, arg4, arg5));
6385             if (!is_error(ret) &&
6386                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6387                 return -TARGET_EFAULT;
6388             }
6389             return ret;
6390         }
6391     case PR_SET_PDEATHSIG:
6392         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6393                                arg3, arg4, arg5));
6394     case PR_GET_NAME:
6395         {
6396             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6397             if (!name) {
6398                 return -TARGET_EFAULT;
6399             }
6400             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6401                                   arg3, arg4, arg5));
6402             unlock_user(name, arg2, 16);
6403             return ret;
6404         }
6405     case PR_SET_NAME:
6406         {
6407             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6408             if (!name) {
6409                 return -TARGET_EFAULT;
6410             }
6411             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6412                                   arg3, arg4, arg5));
6413             unlock_user(name, arg2, 0);
6414             return ret;
6415         }
6416     case PR_GET_FP_MODE:
6417         return do_prctl_get_fp_mode(env);
6418     case PR_SET_FP_MODE:
6419         return do_prctl_set_fp_mode(env, arg2);
6420     case PR_SVE_GET_VL:
6421         return do_prctl_sve_get_vl(env);
6422     case PR_SVE_SET_VL:
6423         return do_prctl_sve_set_vl(env, arg2);
6424     case PR_SME_GET_VL:
6425         return do_prctl_sme_get_vl(env);
6426     case PR_SME_SET_VL:
6427         return do_prctl_sme_set_vl(env, arg2);
6428     case PR_PAC_RESET_KEYS:
6429         if (arg3 || arg4 || arg5) {
6430             return -TARGET_EINVAL;
6431         }
6432         return do_prctl_reset_keys(env, arg2);
6433     case PR_SET_TAGGED_ADDR_CTRL:
6434         if (arg3 || arg4 || arg5) {
6435             return -TARGET_EINVAL;
6436         }
6437         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6438     case PR_GET_TAGGED_ADDR_CTRL:
6439         if (arg2 || arg3 || arg4 || arg5) {
6440             return -TARGET_EINVAL;
6441         }
6442         return do_prctl_get_tagged_addr_ctrl(env);
6443 
6444     case PR_GET_UNALIGN:
6445         return do_prctl_get_unalign(env, arg2);
6446     case PR_SET_UNALIGN:
6447         return do_prctl_set_unalign(env, arg2);
6448 
6449     case PR_CAP_AMBIENT:
6450     case PR_CAPBSET_READ:
6451     case PR_CAPBSET_DROP:
6452     case PR_GET_DUMPABLE:
6453     case PR_SET_DUMPABLE:
6454     case PR_GET_KEEPCAPS:
6455     case PR_SET_KEEPCAPS:
6456     case PR_GET_SECUREBITS:
6457     case PR_SET_SECUREBITS:
6458     case PR_GET_TIMING:
6459     case PR_SET_TIMING:
6460     case PR_GET_TIMERSLACK:
6461     case PR_SET_TIMERSLACK:
6462     case PR_MCE_KILL:
6463     case PR_MCE_KILL_GET:
6464     case PR_GET_NO_NEW_PRIVS:
6465     case PR_SET_NO_NEW_PRIVS:
6466     case PR_GET_IO_FLUSHER:
6467     case PR_SET_IO_FLUSHER:
6468     case PR_SET_CHILD_SUBREAPER:
6469     case PR_GET_SPECULATION_CTRL:
6470     case PR_SET_SPECULATION_CTRL:
6471         /* Some prctl options have no pointer arguments and we can pass on. */
6472         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6473 
6474     case PR_GET_CHILD_SUBREAPER:
6475         {
6476             int val;
6477             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6478                                   arg3, arg4, arg5));
6479             if (!is_error(ret) && put_user_s32(val, arg2)) {
6480                 return -TARGET_EFAULT;
6481             }
6482             return ret;
6483         }
6484 
6485     case PR_GET_TID_ADDRESS:
6486         {
6487             TaskState *ts = get_task_state(env_cpu(env));
6488             return put_user_ual(ts->child_tidptr, arg2);
6489         }
6490 
6491     case PR_GET_FPEXC:
6492     case PR_SET_FPEXC:
6493         /* Was used for SPE on PowerPC. */
6494         return -TARGET_EINVAL;
6495 
6496     case PR_GET_ENDIAN:
6497     case PR_SET_ENDIAN:
6498     case PR_GET_FPEMU:
6499     case PR_SET_FPEMU:
6500     case PR_SET_MM:
6501     case PR_GET_SECCOMP:
6502     case PR_SET_SECCOMP:
6503     case PR_SET_SYSCALL_USER_DISPATCH:
6504     case PR_GET_THP_DISABLE:
6505     case PR_SET_THP_DISABLE:
6506     case PR_GET_TSC:
6507     case PR_SET_TSC:
6508         /* Disable to prevent the target disabling stuff we need. */
6509         return -TARGET_EINVAL;
6510 
6511     default:
6512         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6513                       option);
6514         return -TARGET_EINVAL;
6515     }
6516 }
6517 
6518 #define NEW_STACK_SIZE 0x40000
6519 
6520 
6521 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6522 typedef struct {
6523     CPUArchState *env;
6524     pthread_mutex_t mutex;
6525     pthread_cond_t cond;
6526     pthread_t thread;
6527     uint32_t tid;
6528     abi_ulong child_tidptr;
6529     abi_ulong parent_tidptr;
6530     sigset_t sigmask;
6531 } new_thread_info;
6532 
clone_func(void * arg)6533 static void *clone_func(void *arg)
6534 {
6535     new_thread_info *info = arg;
6536     CPUArchState *env;
6537     CPUState *cpu;
6538     TaskState *ts;
6539 
6540     rcu_register_thread();
6541     tcg_register_thread();
6542     env = info->env;
6543     cpu = env_cpu(env);
6544     thread_cpu = cpu;
6545     ts = get_task_state(cpu);
6546     info->tid = sys_gettid();
6547     task_settid(ts);
6548     if (info->child_tidptr)
6549         put_user_u32(info->tid, info->child_tidptr);
6550     if (info->parent_tidptr)
6551         put_user_u32(info->tid, info->parent_tidptr);
6552     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6553     /* Enable signals.  */
6554     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6555     /* Signal to the parent that we're ready.  */
6556     pthread_mutex_lock(&info->mutex);
6557     pthread_cond_broadcast(&info->cond);
6558     pthread_mutex_unlock(&info->mutex);
6559     /* Wait until the parent has finished initializing the tls state.  */
6560     pthread_mutex_lock(&clone_lock);
6561     pthread_mutex_unlock(&clone_lock);
6562     cpu_loop(env);
6563     /* never exits */
6564     return NULL;
6565 }
6566 
6567 /* do_fork() Must return host values and target errnos (unlike most
6568    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6569 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6570                    abi_ulong parent_tidptr, target_ulong newtls,
6571                    abi_ulong child_tidptr)
6572 {
6573     CPUState *cpu = env_cpu(env);
6574     int ret;
6575     TaskState *ts;
6576     CPUState *new_cpu;
6577     CPUArchState *new_env;
6578     sigset_t sigmask;
6579 
6580     flags &= ~CLONE_IGNORED_FLAGS;
6581 
6582     /* Emulate vfork() with fork() */
6583     if (flags & CLONE_VFORK)
6584         flags &= ~(CLONE_VFORK | CLONE_VM);
6585 
6586     if (flags & CLONE_VM) {
6587         TaskState *parent_ts = get_task_state(cpu);
6588         new_thread_info info;
6589         pthread_attr_t attr;
6590 
6591         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6592             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6593             return -TARGET_EINVAL;
6594         }
6595 
6596         ts = g_new0(TaskState, 1);
6597         init_task_state(ts);
6598 
6599         /* Grab a mutex so that thread setup appears atomic.  */
6600         pthread_mutex_lock(&clone_lock);
6601 
6602         /*
6603          * If this is our first additional thread, we need to ensure we
6604          * generate code for parallel execution and flush old translations.
6605          * Do this now so that the copy gets CF_PARALLEL too.
6606          */
6607         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6608             tcg_cflags_set(cpu, CF_PARALLEL);
6609             tb_flush(cpu);
6610         }
6611 
6612         /* we create a new CPU instance. */
6613         new_env = cpu_copy(env);
6614         /* Init regs that differ from the parent.  */
6615         cpu_clone_regs_child(new_env, newsp, flags);
6616         cpu_clone_regs_parent(env, flags);
6617         new_cpu = env_cpu(new_env);
6618         new_cpu->opaque = ts;
6619         ts->bprm = parent_ts->bprm;
6620         ts->info = parent_ts->info;
6621         ts->signal_mask = parent_ts->signal_mask;
6622 
6623         if (flags & CLONE_CHILD_CLEARTID) {
6624             ts->child_tidptr = child_tidptr;
6625         }
6626 
6627         if (flags & CLONE_SETTLS) {
6628             cpu_set_tls (new_env, newtls);
6629         }
6630 
6631         memset(&info, 0, sizeof(info));
6632         pthread_mutex_init(&info.mutex, NULL);
6633         pthread_mutex_lock(&info.mutex);
6634         pthread_cond_init(&info.cond, NULL);
6635         info.env = new_env;
6636         if (flags & CLONE_CHILD_SETTID) {
6637             info.child_tidptr = child_tidptr;
6638         }
6639         if (flags & CLONE_PARENT_SETTID) {
6640             info.parent_tidptr = parent_tidptr;
6641         }
6642 
6643         ret = pthread_attr_init(&attr);
6644         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6645         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6646         /* It is not safe to deliver signals until the child has finished
6647            initializing, so temporarily block all signals.  */
6648         sigfillset(&sigmask);
6649         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6650         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6651 
6652         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6653         /* TODO: Free new CPU state if thread creation failed.  */
6654 
6655         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6656         pthread_attr_destroy(&attr);
6657         if (ret == 0) {
6658             /* Wait for the child to initialize.  */
6659             pthread_cond_wait(&info.cond, &info.mutex);
6660             ret = info.tid;
6661         } else {
6662             ret = -1;
6663         }
6664         pthread_mutex_unlock(&info.mutex);
6665         pthread_cond_destroy(&info.cond);
6666         pthread_mutex_destroy(&info.mutex);
6667         pthread_mutex_unlock(&clone_lock);
6668     } else {
6669         /* if no CLONE_VM, we consider it is a fork */
6670         if (flags & CLONE_INVALID_FORK_FLAGS) {
6671             return -TARGET_EINVAL;
6672         }
6673 
6674         /* We can't support custom termination signals */
6675         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6676             return -TARGET_EINVAL;
6677         }
6678 
6679 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6680         if (flags & CLONE_PIDFD) {
6681             return -TARGET_EINVAL;
6682         }
6683 #endif
6684 
6685         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6686         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6687             return -TARGET_EINVAL;
6688         }
6689 
6690         if (block_signals()) {
6691             return -QEMU_ERESTARTSYS;
6692         }
6693 
6694         fork_start();
6695         ret = fork();
6696         if (ret == 0) {
6697             /* Child Process.  */
6698             cpu_clone_regs_child(env, newsp, flags);
6699             fork_end(ret);
6700             /* There is a race condition here.  The parent process could
6701                theoretically read the TID in the child process before the child
6702                tid is set.  This would require using either ptrace
6703                (not implemented) or having *_tidptr to point at a shared memory
6704                mapping.  We can't repeat the spinlock hack used above because
6705                the child process gets its own copy of the lock.  */
6706             if (flags & CLONE_CHILD_SETTID)
6707                 put_user_u32(sys_gettid(), child_tidptr);
6708             if (flags & CLONE_PARENT_SETTID)
6709                 put_user_u32(sys_gettid(), parent_tidptr);
6710             ts = get_task_state(cpu);
6711             if (flags & CLONE_SETTLS)
6712                 cpu_set_tls (env, newtls);
6713             if (flags & CLONE_CHILD_CLEARTID)
6714                 ts->child_tidptr = child_tidptr;
6715         } else {
6716             cpu_clone_regs_parent(env, flags);
6717             if (flags & CLONE_PIDFD) {
6718                 int pid_fd = 0;
6719 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6720                 int pid_child = ret;
6721                 pid_fd = pidfd_open(pid_child, 0);
6722                 if (pid_fd >= 0) {
6723                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6724                                                | FD_CLOEXEC);
6725                 } else {
6726                         pid_fd = 0;
6727                 }
6728 #endif
6729                 put_user_u32(pid_fd, parent_tidptr);
6730             }
6731             fork_end(ret);
6732         }
6733         g_assert(!cpu_in_exclusive_context(cpu));
6734     }
6735     return ret;
6736 }
6737 
6738 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6739 static int target_to_host_fcntl_cmd(int cmd)
6740 {
6741     int ret;
6742 
6743     switch(cmd) {
6744     case TARGET_F_DUPFD:
6745     case TARGET_F_GETFD:
6746     case TARGET_F_SETFD:
6747     case TARGET_F_GETFL:
6748     case TARGET_F_SETFL:
6749     case TARGET_F_OFD_GETLK:
6750     case TARGET_F_OFD_SETLK:
6751     case TARGET_F_OFD_SETLKW:
6752         ret = cmd;
6753         break;
6754     case TARGET_F_GETLK:
6755         ret = F_GETLK;
6756         break;
6757     case TARGET_F_SETLK:
6758         ret = F_SETLK;
6759         break;
6760     case TARGET_F_SETLKW:
6761         ret = F_SETLKW;
6762         break;
6763     case TARGET_F_GETOWN:
6764         ret = F_GETOWN;
6765         break;
6766     case TARGET_F_SETOWN:
6767         ret = F_SETOWN;
6768         break;
6769     case TARGET_F_GETSIG:
6770         ret = F_GETSIG;
6771         break;
6772     case TARGET_F_SETSIG:
6773         ret = F_SETSIG;
6774         break;
6775 #if TARGET_ABI_BITS == 32
6776     case TARGET_F_GETLK64:
6777         ret = F_GETLK;
6778         break;
6779     case TARGET_F_SETLK64:
6780         ret = F_SETLK;
6781         break;
6782     case TARGET_F_SETLKW64:
6783         ret = F_SETLKW;
6784         break;
6785 #endif
6786     case TARGET_F_SETLEASE:
6787         ret = F_SETLEASE;
6788         break;
6789     case TARGET_F_GETLEASE:
6790         ret = F_GETLEASE;
6791         break;
6792 #ifdef F_DUPFD_CLOEXEC
6793     case TARGET_F_DUPFD_CLOEXEC:
6794         ret = F_DUPFD_CLOEXEC;
6795         break;
6796 #endif
6797     case TARGET_F_NOTIFY:
6798         ret = F_NOTIFY;
6799         break;
6800 #ifdef F_GETOWN_EX
6801     case TARGET_F_GETOWN_EX:
6802         ret = F_GETOWN_EX;
6803         break;
6804 #endif
6805 #ifdef F_SETOWN_EX
6806     case TARGET_F_SETOWN_EX:
6807         ret = F_SETOWN_EX;
6808         break;
6809 #endif
6810 #ifdef F_SETPIPE_SZ
6811     case TARGET_F_SETPIPE_SZ:
6812         ret = F_SETPIPE_SZ;
6813         break;
6814     case TARGET_F_GETPIPE_SZ:
6815         ret = F_GETPIPE_SZ;
6816         break;
6817 #endif
6818 #ifdef F_ADD_SEALS
6819     case TARGET_F_ADD_SEALS:
6820         ret = F_ADD_SEALS;
6821         break;
6822     case TARGET_F_GET_SEALS:
6823         ret = F_GET_SEALS;
6824         break;
6825 #endif
6826     default:
6827         ret = -TARGET_EINVAL;
6828         break;
6829     }
6830 
6831 #if defined(__powerpc64__)
6832     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6833      * is not supported by kernel. The glibc fcntl call actually adjusts
6834      * them to 5, 6 and 7 before making the syscall(). Since we make the
6835      * syscall directly, adjust to what is supported by the kernel.
6836      */
6837     if (ret >= F_GETLK && ret <= F_SETLKW) {
6838         ret -= F_GETLK - 5;
6839     }
6840 #endif
6841 
6842     return ret;
6843 }
6844 
6845 #define FLOCK_TRANSTBL \
6846     switch (type) { \
6847     TRANSTBL_CONVERT(F_RDLCK); \
6848     TRANSTBL_CONVERT(F_WRLCK); \
6849     TRANSTBL_CONVERT(F_UNLCK); \
6850     }
6851 
target_to_host_flock(int type)6852 static int target_to_host_flock(int type)
6853 {
6854 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6855     FLOCK_TRANSTBL
6856 #undef  TRANSTBL_CONVERT
6857     return -TARGET_EINVAL;
6858 }
6859 
host_to_target_flock(int type)6860 static int host_to_target_flock(int type)
6861 {
6862 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6863     FLOCK_TRANSTBL
6864 #undef  TRANSTBL_CONVERT
6865     /* if we don't know how to convert the value coming
6866      * from the host we copy to the target field as-is
6867      */
6868     return type;
6869 }
6870 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6871 static inline abi_long copy_from_user_flock(struct flock *fl,
6872                                             abi_ulong target_flock_addr)
6873 {
6874     struct target_flock *target_fl;
6875     int l_type;
6876 
6877     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6878         return -TARGET_EFAULT;
6879     }
6880 
6881     __get_user(l_type, &target_fl->l_type);
6882     l_type = target_to_host_flock(l_type);
6883     if (l_type < 0) {
6884         return l_type;
6885     }
6886     fl->l_type = l_type;
6887     __get_user(fl->l_whence, &target_fl->l_whence);
6888     __get_user(fl->l_start, &target_fl->l_start);
6889     __get_user(fl->l_len, &target_fl->l_len);
6890     __get_user(fl->l_pid, &target_fl->l_pid);
6891     unlock_user_struct(target_fl, target_flock_addr, 0);
6892     return 0;
6893 }
6894 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6895 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6896                                           const struct flock *fl)
6897 {
6898     struct target_flock *target_fl;
6899     short l_type;
6900 
6901     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6902         return -TARGET_EFAULT;
6903     }
6904 
6905     l_type = host_to_target_flock(fl->l_type);
6906     __put_user(l_type, &target_fl->l_type);
6907     __put_user(fl->l_whence, &target_fl->l_whence);
6908     __put_user(fl->l_start, &target_fl->l_start);
6909     __put_user(fl->l_len, &target_fl->l_len);
6910     __put_user(fl->l_pid, &target_fl->l_pid);
6911     unlock_user_struct(target_fl, target_flock_addr, 1);
6912     return 0;
6913 }
6914 
6915 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6916 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6917 
6918 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6919 struct target_oabi_flock64 {
6920     abi_short l_type;
6921     abi_short l_whence;
6922     abi_llong l_start;
6923     abi_llong l_len;
6924     abi_int   l_pid;
6925 } QEMU_PACKED;
6926 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6927 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6928                                                    abi_ulong target_flock_addr)
6929 {
6930     struct target_oabi_flock64 *target_fl;
6931     int l_type;
6932 
6933     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6934         return -TARGET_EFAULT;
6935     }
6936 
6937     __get_user(l_type, &target_fl->l_type);
6938     l_type = target_to_host_flock(l_type);
6939     if (l_type < 0) {
6940         return l_type;
6941     }
6942     fl->l_type = l_type;
6943     __get_user(fl->l_whence, &target_fl->l_whence);
6944     __get_user(fl->l_start, &target_fl->l_start);
6945     __get_user(fl->l_len, &target_fl->l_len);
6946     __get_user(fl->l_pid, &target_fl->l_pid);
6947     unlock_user_struct(target_fl, target_flock_addr, 0);
6948     return 0;
6949 }
6950 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6951 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6952                                                  const struct flock *fl)
6953 {
6954     struct target_oabi_flock64 *target_fl;
6955     short l_type;
6956 
6957     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6958         return -TARGET_EFAULT;
6959     }
6960 
6961     l_type = host_to_target_flock(fl->l_type);
6962     __put_user(l_type, &target_fl->l_type);
6963     __put_user(fl->l_whence, &target_fl->l_whence);
6964     __put_user(fl->l_start, &target_fl->l_start);
6965     __put_user(fl->l_len, &target_fl->l_len);
6966     __put_user(fl->l_pid, &target_fl->l_pid);
6967     unlock_user_struct(target_fl, target_flock_addr, 1);
6968     return 0;
6969 }
6970 #endif
6971 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6972 static inline abi_long copy_from_user_flock64(struct flock *fl,
6973                                               abi_ulong target_flock_addr)
6974 {
6975     struct target_flock64 *target_fl;
6976     int l_type;
6977 
6978     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6979         return -TARGET_EFAULT;
6980     }
6981 
6982     __get_user(l_type, &target_fl->l_type);
6983     l_type = target_to_host_flock(l_type);
6984     if (l_type < 0) {
6985         return l_type;
6986     }
6987     fl->l_type = l_type;
6988     __get_user(fl->l_whence, &target_fl->l_whence);
6989     __get_user(fl->l_start, &target_fl->l_start);
6990     __get_user(fl->l_len, &target_fl->l_len);
6991     __get_user(fl->l_pid, &target_fl->l_pid);
6992     unlock_user_struct(target_fl, target_flock_addr, 0);
6993     return 0;
6994 }
6995 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)6996 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6997                                             const struct flock *fl)
6998 {
6999     struct target_flock64 *target_fl;
7000     short l_type;
7001 
7002     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7003         return -TARGET_EFAULT;
7004     }
7005 
7006     l_type = host_to_target_flock(fl->l_type);
7007     __put_user(l_type, &target_fl->l_type);
7008     __put_user(fl->l_whence, &target_fl->l_whence);
7009     __put_user(fl->l_start, &target_fl->l_start);
7010     __put_user(fl->l_len, &target_fl->l_len);
7011     __put_user(fl->l_pid, &target_fl->l_pid);
7012     unlock_user_struct(target_fl, target_flock_addr, 1);
7013     return 0;
7014 }
7015 
do_fcntl(int fd,int cmd,abi_ulong arg)7016 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7017 {
7018     struct flock fl;
7019 #ifdef F_GETOWN_EX
7020     struct f_owner_ex fox;
7021     struct target_f_owner_ex *target_fox;
7022 #endif
7023     abi_long ret;
7024     int host_cmd = target_to_host_fcntl_cmd(cmd);
7025 
7026     if (host_cmd == -TARGET_EINVAL)
7027 	    return host_cmd;
7028 
7029     switch(cmd) {
7030     case TARGET_F_GETLK:
7031         ret = copy_from_user_flock(&fl, arg);
7032         if (ret) {
7033             return ret;
7034         }
7035         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7036         if (ret == 0) {
7037             ret = copy_to_user_flock(arg, &fl);
7038         }
7039         break;
7040 
7041     case TARGET_F_SETLK:
7042     case TARGET_F_SETLKW:
7043         ret = copy_from_user_flock(&fl, arg);
7044         if (ret) {
7045             return ret;
7046         }
7047         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7048         break;
7049 
7050     case TARGET_F_GETLK64:
7051     case TARGET_F_OFD_GETLK:
7052         ret = copy_from_user_flock64(&fl, arg);
7053         if (ret) {
7054             return ret;
7055         }
7056         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7057         if (ret == 0) {
7058             ret = copy_to_user_flock64(arg, &fl);
7059         }
7060         break;
7061     case TARGET_F_SETLK64:
7062     case TARGET_F_SETLKW64:
7063     case TARGET_F_OFD_SETLK:
7064     case TARGET_F_OFD_SETLKW:
7065         ret = copy_from_user_flock64(&fl, arg);
7066         if (ret) {
7067             return ret;
7068         }
7069         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7070         break;
7071 
7072     case TARGET_F_GETFL:
7073         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7074         if (ret >= 0) {
7075             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7076             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7077             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7078                 ret |= TARGET_O_LARGEFILE;
7079             }
7080         }
7081         break;
7082 
7083     case TARGET_F_SETFL:
7084         ret = get_errno(safe_fcntl(fd, host_cmd,
7085                                    target_to_host_bitmask(arg,
7086                                                           fcntl_flags_tbl)));
7087         break;
7088 
7089 #ifdef F_GETOWN_EX
7090     case TARGET_F_GETOWN_EX:
7091         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7092         if (ret >= 0) {
7093             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7094                 return -TARGET_EFAULT;
7095             target_fox->type = tswap32(fox.type);
7096             target_fox->pid = tswap32(fox.pid);
7097             unlock_user_struct(target_fox, arg, 1);
7098         }
7099         break;
7100 #endif
7101 
7102 #ifdef F_SETOWN_EX
7103     case TARGET_F_SETOWN_EX:
7104         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7105             return -TARGET_EFAULT;
7106         fox.type = tswap32(target_fox->type);
7107         fox.pid = tswap32(target_fox->pid);
7108         unlock_user_struct(target_fox, arg, 0);
7109         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7110         break;
7111 #endif
7112 
7113     case TARGET_F_SETSIG:
7114         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7115         break;
7116 
7117     case TARGET_F_GETSIG:
7118         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7119         break;
7120 
7121     case TARGET_F_SETOWN:
7122     case TARGET_F_GETOWN:
7123     case TARGET_F_SETLEASE:
7124     case TARGET_F_GETLEASE:
7125     case TARGET_F_SETPIPE_SZ:
7126     case TARGET_F_GETPIPE_SZ:
7127     case TARGET_F_ADD_SEALS:
7128     case TARGET_F_GET_SEALS:
7129         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7130         break;
7131 
7132     default:
7133         ret = get_errno(safe_fcntl(fd, cmd, arg));
7134         break;
7135     }
7136     return ret;
7137 }
7138 
7139 #ifdef USE_UID16
7140 
high2lowuid(int uid)7141 static inline int high2lowuid(int uid)
7142 {
7143     if (uid > 65535)
7144         return 65534;
7145     else
7146         return uid;
7147 }
7148 
high2lowgid(int gid)7149 static inline int high2lowgid(int gid)
7150 {
7151     if (gid > 65535)
7152         return 65534;
7153     else
7154         return gid;
7155 }
7156 
low2highuid(int uid)7157 static inline int low2highuid(int uid)
7158 {
7159     if ((int16_t)uid == -1)
7160         return -1;
7161     else
7162         return uid;
7163 }
7164 
low2highgid(int gid)7165 static inline int low2highgid(int gid)
7166 {
7167     if ((int16_t)gid == -1)
7168         return -1;
7169     else
7170         return gid;
7171 }
tswapid(int id)7172 static inline int tswapid(int id)
7173 {
7174     return tswap16(id);
7175 }
7176 
7177 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7178 
7179 #else /* !USE_UID16 */
high2lowuid(int uid)7180 static inline int high2lowuid(int uid)
7181 {
7182     return uid;
7183 }
high2lowgid(int gid)7184 static inline int high2lowgid(int gid)
7185 {
7186     return gid;
7187 }
low2highuid(int uid)7188 static inline int low2highuid(int uid)
7189 {
7190     return uid;
7191 }
low2highgid(int gid)7192 static inline int low2highgid(int gid)
7193 {
7194     return gid;
7195 }
tswapid(int id)7196 static inline int tswapid(int id)
7197 {
7198     return tswap32(id);
7199 }
7200 
7201 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7202 
7203 #endif /* USE_UID16 */
7204 
7205 /* We must do direct syscalls for setting UID/GID, because we want to
7206  * implement the Linux system call semantics of "change only for this thread",
7207  * not the libc/POSIX semantics of "change for all threads in process".
7208  * (See http://ewontfix.com/17/ for more details.)
7209  * We use the 32-bit version of the syscalls if present; if it is not
7210  * then either the host architecture supports 32-bit UIDs natively with
7211  * the standard syscall, or the 16-bit UID is the best we can do.
7212  */
7213 #ifdef __NR_setuid32
7214 #define __NR_sys_setuid __NR_setuid32
7215 #else
7216 #define __NR_sys_setuid __NR_setuid
7217 #endif
7218 #ifdef __NR_setgid32
7219 #define __NR_sys_setgid __NR_setgid32
7220 #else
7221 #define __NR_sys_setgid __NR_setgid
7222 #endif
7223 #ifdef __NR_setresuid32
7224 #define __NR_sys_setresuid __NR_setresuid32
7225 #else
7226 #define __NR_sys_setresuid __NR_setresuid
7227 #endif
7228 #ifdef __NR_setresgid32
7229 #define __NR_sys_setresgid __NR_setresgid32
7230 #else
7231 #define __NR_sys_setresgid __NR_setresgid
7232 #endif
7233 #ifdef __NR_setgroups32
7234 #define __NR_sys_setgroups __NR_setgroups32
7235 #else
7236 #define __NR_sys_setgroups __NR_setgroups
7237 #endif
7238 #ifdef __NR_sys_setreuid32
7239 #define __NR_sys_setreuid __NR_setreuid32
7240 #else
7241 #define __NR_sys_setreuid __NR_setreuid
7242 #endif
7243 #ifdef __NR_sys_setregid32
7244 #define __NR_sys_setregid __NR_setregid32
7245 #else
7246 #define __NR_sys_setregid __NR_setregid
7247 #endif
7248 
7249 _syscall1(int, sys_setuid, uid_t, uid)
7250 _syscall1(int, sys_setgid, gid_t, gid)
7251 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7252 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7253 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7254 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7255 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7256 
syscall_init(void)7257 void syscall_init(void)
7258 {
7259     IOCTLEntry *ie;
7260     const argtype *arg_type;
7261     int size;
7262 
7263     thunk_init(STRUCT_MAX);
7264 
7265 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7266 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7267 #include "syscall_types.h"
7268 #undef STRUCT
7269 #undef STRUCT_SPECIAL
7270 
7271     /* we patch the ioctl size if necessary. We rely on the fact that
7272        no ioctl has all the bits at '1' in the size field */
7273     ie = ioctl_entries;
7274     while (ie->target_cmd != 0) {
7275         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7276             TARGET_IOC_SIZEMASK) {
7277             arg_type = ie->arg_type;
7278             if (arg_type[0] != TYPE_PTR) {
7279                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7280                         ie->target_cmd);
7281                 exit(1);
7282             }
7283             arg_type++;
7284             size = thunk_type_size(arg_type, 0);
7285             ie->target_cmd = (ie->target_cmd &
7286                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7287                 (size << TARGET_IOC_SIZESHIFT);
7288         }
7289 
7290         /* automatic consistency check if same arch */
7291 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7292     (defined(__x86_64__) && defined(TARGET_X86_64))
7293         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7294             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7295                     ie->name, ie->target_cmd, ie->host_cmd);
7296         }
7297 #endif
7298         ie++;
7299     }
7300 }
7301 
7302 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7303 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7304                                          abi_long arg2,
7305                                          abi_long arg3,
7306                                          abi_long arg4)
7307 {
7308     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7309         arg2 = arg3;
7310         arg3 = arg4;
7311     }
7312     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7313 }
7314 #endif
7315 
7316 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7317 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7318                                           abi_long arg2,
7319                                           abi_long arg3,
7320                                           abi_long arg4)
7321 {
7322     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7323         arg2 = arg3;
7324         arg3 = arg4;
7325     }
7326     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7327 }
7328 #endif
7329 
7330 #if defined(TARGET_NR_timer_settime) || \
7331     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7332 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7333                                                  abi_ulong target_addr)
7334 {
7335     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7336                                 offsetof(struct target_itimerspec,
7337                                          it_interval)) ||
7338         target_to_host_timespec(&host_its->it_value, target_addr +
7339                                 offsetof(struct target_itimerspec,
7340                                          it_value))) {
7341         return -TARGET_EFAULT;
7342     }
7343 
7344     return 0;
7345 }
7346 #endif
7347 
7348 #if defined(TARGET_NR_timer_settime64) || \
7349     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7350 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7351                                                    abi_ulong target_addr)
7352 {
7353     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7354                                   offsetof(struct target__kernel_itimerspec,
7355                                            it_interval)) ||
7356         target_to_host_timespec64(&host_its->it_value, target_addr +
7357                                   offsetof(struct target__kernel_itimerspec,
7358                                            it_value))) {
7359         return -TARGET_EFAULT;
7360     }
7361 
7362     return 0;
7363 }
7364 #endif
7365 
7366 #if ((defined(TARGET_NR_timerfd_gettime) || \
7367       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7368       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7369 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7370                                                  struct itimerspec *host_its)
7371 {
7372     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7373                                                        it_interval),
7374                                 &host_its->it_interval) ||
7375         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7376                                                        it_value),
7377                                 &host_its->it_value)) {
7378         return -TARGET_EFAULT;
7379     }
7380     return 0;
7381 }
7382 #endif
7383 
7384 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7385       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7386       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7387 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7388                                                    struct itimerspec *host_its)
7389 {
7390     if (host_to_target_timespec64(target_addr +
7391                                   offsetof(struct target__kernel_itimerspec,
7392                                            it_interval),
7393                                   &host_its->it_interval) ||
7394         host_to_target_timespec64(target_addr +
7395                                   offsetof(struct target__kernel_itimerspec,
7396                                            it_value),
7397                                   &host_its->it_value)) {
7398         return -TARGET_EFAULT;
7399     }
7400     return 0;
7401 }
7402 #endif
7403 
7404 #if defined(TARGET_NR_adjtimex) || \
7405     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7406 static inline abi_long target_to_host_timex(struct timex *host_tx,
7407                                             abi_long target_addr)
7408 {
7409     struct target_timex *target_tx;
7410 
7411     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7412         return -TARGET_EFAULT;
7413     }
7414 
7415     __get_user(host_tx->modes, &target_tx->modes);
7416     __get_user(host_tx->offset, &target_tx->offset);
7417     __get_user(host_tx->freq, &target_tx->freq);
7418     __get_user(host_tx->maxerror, &target_tx->maxerror);
7419     __get_user(host_tx->esterror, &target_tx->esterror);
7420     __get_user(host_tx->status, &target_tx->status);
7421     __get_user(host_tx->constant, &target_tx->constant);
7422     __get_user(host_tx->precision, &target_tx->precision);
7423     __get_user(host_tx->tolerance, &target_tx->tolerance);
7424     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7425     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7426     __get_user(host_tx->tick, &target_tx->tick);
7427     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7428     __get_user(host_tx->jitter, &target_tx->jitter);
7429     __get_user(host_tx->shift, &target_tx->shift);
7430     __get_user(host_tx->stabil, &target_tx->stabil);
7431     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7432     __get_user(host_tx->calcnt, &target_tx->calcnt);
7433     __get_user(host_tx->errcnt, &target_tx->errcnt);
7434     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7435     __get_user(host_tx->tai, &target_tx->tai);
7436 
7437     unlock_user_struct(target_tx, target_addr, 0);
7438     return 0;
7439 }
7440 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7441 static inline abi_long host_to_target_timex(abi_long target_addr,
7442                                             struct timex *host_tx)
7443 {
7444     struct target_timex *target_tx;
7445 
7446     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7447         return -TARGET_EFAULT;
7448     }
7449 
7450     __put_user(host_tx->modes, &target_tx->modes);
7451     __put_user(host_tx->offset, &target_tx->offset);
7452     __put_user(host_tx->freq, &target_tx->freq);
7453     __put_user(host_tx->maxerror, &target_tx->maxerror);
7454     __put_user(host_tx->esterror, &target_tx->esterror);
7455     __put_user(host_tx->status, &target_tx->status);
7456     __put_user(host_tx->constant, &target_tx->constant);
7457     __put_user(host_tx->precision, &target_tx->precision);
7458     __put_user(host_tx->tolerance, &target_tx->tolerance);
7459     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7460     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7461     __put_user(host_tx->tick, &target_tx->tick);
7462     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7463     __put_user(host_tx->jitter, &target_tx->jitter);
7464     __put_user(host_tx->shift, &target_tx->shift);
7465     __put_user(host_tx->stabil, &target_tx->stabil);
7466     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7467     __put_user(host_tx->calcnt, &target_tx->calcnt);
7468     __put_user(host_tx->errcnt, &target_tx->errcnt);
7469     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7470     __put_user(host_tx->tai, &target_tx->tai);
7471 
7472     unlock_user_struct(target_tx, target_addr, 1);
7473     return 0;
7474 }
7475 #endif
7476 
7477 
7478 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7479 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7480                                               abi_long target_addr)
7481 {
7482     struct target__kernel_timex *target_tx;
7483 
7484     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7485                                  offsetof(struct target__kernel_timex,
7486                                           time))) {
7487         return -TARGET_EFAULT;
7488     }
7489 
7490     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7491         return -TARGET_EFAULT;
7492     }
7493 
7494     __get_user(host_tx->modes, &target_tx->modes);
7495     __get_user(host_tx->offset, &target_tx->offset);
7496     __get_user(host_tx->freq, &target_tx->freq);
7497     __get_user(host_tx->maxerror, &target_tx->maxerror);
7498     __get_user(host_tx->esterror, &target_tx->esterror);
7499     __get_user(host_tx->status, &target_tx->status);
7500     __get_user(host_tx->constant, &target_tx->constant);
7501     __get_user(host_tx->precision, &target_tx->precision);
7502     __get_user(host_tx->tolerance, &target_tx->tolerance);
7503     __get_user(host_tx->tick, &target_tx->tick);
7504     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7505     __get_user(host_tx->jitter, &target_tx->jitter);
7506     __get_user(host_tx->shift, &target_tx->shift);
7507     __get_user(host_tx->stabil, &target_tx->stabil);
7508     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7509     __get_user(host_tx->calcnt, &target_tx->calcnt);
7510     __get_user(host_tx->errcnt, &target_tx->errcnt);
7511     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7512     __get_user(host_tx->tai, &target_tx->tai);
7513 
7514     unlock_user_struct(target_tx, target_addr, 0);
7515     return 0;
7516 }
7517 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7518 static inline abi_long host_to_target_timex64(abi_long target_addr,
7519                                               struct timex *host_tx)
7520 {
7521     struct target__kernel_timex *target_tx;
7522 
7523    if (copy_to_user_timeval64(target_addr +
7524                               offsetof(struct target__kernel_timex, time),
7525                               &host_tx->time)) {
7526         return -TARGET_EFAULT;
7527     }
7528 
7529     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7530         return -TARGET_EFAULT;
7531     }
7532 
7533     __put_user(host_tx->modes, &target_tx->modes);
7534     __put_user(host_tx->offset, &target_tx->offset);
7535     __put_user(host_tx->freq, &target_tx->freq);
7536     __put_user(host_tx->maxerror, &target_tx->maxerror);
7537     __put_user(host_tx->esterror, &target_tx->esterror);
7538     __put_user(host_tx->status, &target_tx->status);
7539     __put_user(host_tx->constant, &target_tx->constant);
7540     __put_user(host_tx->precision, &target_tx->precision);
7541     __put_user(host_tx->tolerance, &target_tx->tolerance);
7542     __put_user(host_tx->tick, &target_tx->tick);
7543     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7544     __put_user(host_tx->jitter, &target_tx->jitter);
7545     __put_user(host_tx->shift, &target_tx->shift);
7546     __put_user(host_tx->stabil, &target_tx->stabil);
7547     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7548     __put_user(host_tx->calcnt, &target_tx->calcnt);
7549     __put_user(host_tx->errcnt, &target_tx->errcnt);
7550     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7551     __put_user(host_tx->tai, &target_tx->tai);
7552 
7553     unlock_user_struct(target_tx, target_addr, 1);
7554     return 0;
7555 }
7556 #endif
7557 
7558 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7559 #define sigev_notify_thread_id _sigev_un._tid
7560 #endif
7561 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7562 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7563                                                abi_ulong target_addr)
7564 {
7565     struct target_sigevent *target_sevp;
7566 
7567     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7568         return -TARGET_EFAULT;
7569     }
7570 
7571     /* This union is awkward on 64 bit systems because it has a 32 bit
7572      * integer and a pointer in it; we follow the conversion approach
7573      * used for handling sigval types in signal.c so the guest should get
7574      * the correct value back even if we did a 64 bit byteswap and it's
7575      * using the 32 bit integer.
7576      */
7577     host_sevp->sigev_value.sival_ptr =
7578         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7579     host_sevp->sigev_signo =
7580         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7581     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7582     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7583 
7584     unlock_user_struct(target_sevp, target_addr, 1);
7585     return 0;
7586 }
7587 
7588 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7589 static inline int target_to_host_mlockall_arg(int arg)
7590 {
7591     int result = 0;
7592 
7593     if (arg & TARGET_MCL_CURRENT) {
7594         result |= MCL_CURRENT;
7595     }
7596     if (arg & TARGET_MCL_FUTURE) {
7597         result |= MCL_FUTURE;
7598     }
7599 #ifdef MCL_ONFAULT
7600     if (arg & TARGET_MCL_ONFAULT) {
7601         result |= MCL_ONFAULT;
7602     }
7603 #endif
7604 
7605     return result;
7606 }
7607 #endif
7608 
target_to_host_msync_arg(abi_long arg)7609 static inline int target_to_host_msync_arg(abi_long arg)
7610 {
7611     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7612            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7613            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7614            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7615 }
7616 
7617 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7618      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7619      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7620 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7621                                              abi_ulong target_addr,
7622                                              struct stat *host_st)
7623 {
7624 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7625     if (cpu_env->eabi) {
7626         struct target_eabi_stat64 *target_st;
7627 
7628         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7629             return -TARGET_EFAULT;
7630         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7631         __put_user(host_st->st_dev, &target_st->st_dev);
7632         __put_user(host_st->st_ino, &target_st->st_ino);
7633 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7634         __put_user(host_st->st_ino, &target_st->__st_ino);
7635 #endif
7636         __put_user(host_st->st_mode, &target_st->st_mode);
7637         __put_user(host_st->st_nlink, &target_st->st_nlink);
7638         __put_user(host_st->st_uid, &target_st->st_uid);
7639         __put_user(host_st->st_gid, &target_st->st_gid);
7640         __put_user(host_st->st_rdev, &target_st->st_rdev);
7641         __put_user(host_st->st_size, &target_st->st_size);
7642         __put_user(host_st->st_blksize, &target_st->st_blksize);
7643         __put_user(host_st->st_blocks, &target_st->st_blocks);
7644         __put_user(host_st->st_atime, &target_st->target_st_atime);
7645         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7646         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7647 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7648         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7649         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7650         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7651 #endif
7652         unlock_user_struct(target_st, target_addr, 1);
7653     } else
7654 #endif
7655     {
7656 #if defined(TARGET_HAS_STRUCT_STAT64)
7657         struct target_stat64 *target_st;
7658 #else
7659         struct target_stat *target_st;
7660 #endif
7661 
7662         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7663             return -TARGET_EFAULT;
7664         memset(target_st, 0, sizeof(*target_st));
7665         __put_user(host_st->st_dev, &target_st->st_dev);
7666         __put_user(host_st->st_ino, &target_st->st_ino);
7667 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7668         __put_user(host_st->st_ino, &target_st->__st_ino);
7669 #endif
7670         __put_user(host_st->st_mode, &target_st->st_mode);
7671         __put_user(host_st->st_nlink, &target_st->st_nlink);
7672         __put_user(host_st->st_uid, &target_st->st_uid);
7673         __put_user(host_st->st_gid, &target_st->st_gid);
7674         __put_user(host_st->st_rdev, &target_st->st_rdev);
7675         /* XXX: better use of kernel struct */
7676         __put_user(host_st->st_size, &target_st->st_size);
7677         __put_user(host_st->st_blksize, &target_st->st_blksize);
7678         __put_user(host_st->st_blocks, &target_st->st_blocks);
7679         __put_user(host_st->st_atime, &target_st->target_st_atime);
7680         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7681         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7682 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7683         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7684         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7685         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7686 #endif
7687         unlock_user_struct(target_st, target_addr, 1);
7688     }
7689 
7690     return 0;
7691 }
7692 #endif
7693 
7694 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7695 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7696                                             abi_ulong target_addr)
7697 {
7698     struct target_statx *target_stx;
7699 
7700     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7701         return -TARGET_EFAULT;
7702     }
7703     memset(target_stx, 0, sizeof(*target_stx));
7704 
7705     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7706     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7707     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7708     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7709     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7710     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7711     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7712     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7713     __put_user(host_stx->stx_size, &target_stx->stx_size);
7714     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7715     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7716     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7717     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7718     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7719     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7720     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7721     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7722     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7723     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7724     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7725     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7726     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7727     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7728 
7729     unlock_user_struct(target_stx, target_addr, 1);
7730 
7731     return 0;
7732 }
7733 #endif
7734 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7735 static int do_sys_futex(int *uaddr, int op, int val,
7736                          const struct timespec *timeout, int *uaddr2,
7737                          int val3)
7738 {
7739 #if HOST_LONG_BITS == 64
7740 #if defined(__NR_futex)
7741     /* always a 64-bit time_t, it doesn't define _time64 version  */
7742     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7743 
7744 #endif
7745 #else /* HOST_LONG_BITS == 64 */
7746 #if defined(__NR_futex_time64)
7747     if (sizeof(timeout->tv_sec) == 8) {
7748         /* _time64 function on 32bit arch */
7749         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7750     }
7751 #endif
7752 #if defined(__NR_futex)
7753     /* old function on 32bit arch */
7754     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7755 #endif
7756 #endif /* HOST_LONG_BITS == 64 */
7757     g_assert_not_reached();
7758 }
7759 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7760 static int do_safe_futex(int *uaddr, int op, int val,
7761                          const struct timespec *timeout, int *uaddr2,
7762                          int val3)
7763 {
7764 #if HOST_LONG_BITS == 64
7765 #if defined(__NR_futex)
7766     /* always a 64-bit time_t, it doesn't define _time64 version  */
7767     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7768 #endif
7769 #else /* HOST_LONG_BITS == 64 */
7770 #if defined(__NR_futex_time64)
7771     if (sizeof(timeout->tv_sec) == 8) {
7772         /* _time64 function on 32bit arch */
7773         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7774                                            val3));
7775     }
7776 #endif
7777 #if defined(__NR_futex)
7778     /* old function on 32bit arch */
7779     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7780 #endif
7781 #endif /* HOST_LONG_BITS == 64 */
7782     return -TARGET_ENOSYS;
7783 }
7784 
7785 /* ??? Using host futex calls even when target atomic operations
7786    are not really atomic probably breaks things.  However implementing
7787    futexes locally would make futexes shared between multiple processes
7788    tricky.  However they're probably useless because guest atomic
7789    operations won't work either.  */
7790 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7791 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7792                     int op, int val, target_ulong timeout,
7793                     target_ulong uaddr2, int val3)
7794 {
7795     struct timespec ts, *pts = NULL;
7796     void *haddr2 = NULL;
7797     int base_op;
7798 
7799     /* We assume FUTEX_* constants are the same on both host and target. */
7800 #ifdef FUTEX_CMD_MASK
7801     base_op = op & FUTEX_CMD_MASK;
7802 #else
7803     base_op = op;
7804 #endif
7805     switch (base_op) {
7806     case FUTEX_WAIT:
7807     case FUTEX_WAIT_BITSET:
7808         val = tswap32(val);
7809         break;
7810     case FUTEX_WAIT_REQUEUE_PI:
7811         val = tswap32(val);
7812         haddr2 = g2h(cpu, uaddr2);
7813         break;
7814     case FUTEX_LOCK_PI:
7815     case FUTEX_LOCK_PI2:
7816         break;
7817     case FUTEX_WAKE:
7818     case FUTEX_WAKE_BITSET:
7819     case FUTEX_TRYLOCK_PI:
7820     case FUTEX_UNLOCK_PI:
7821         timeout = 0;
7822         break;
7823     case FUTEX_FD:
7824         val = target_to_host_signal(val);
7825         timeout = 0;
7826         break;
7827     case FUTEX_CMP_REQUEUE:
7828     case FUTEX_CMP_REQUEUE_PI:
7829         val3 = tswap32(val3);
7830         /* fall through */
7831     case FUTEX_REQUEUE:
7832     case FUTEX_WAKE_OP:
7833         /*
7834          * For these, the 4th argument is not TIMEOUT, but VAL2.
7835          * But the prototype of do_safe_futex takes a pointer, so
7836          * insert casts to satisfy the compiler.  We do not need
7837          * to tswap VAL2 since it's not compared to guest memory.
7838           */
7839         pts = (struct timespec *)(uintptr_t)timeout;
7840         timeout = 0;
7841         haddr2 = g2h(cpu, uaddr2);
7842         break;
7843     default:
7844         return -TARGET_ENOSYS;
7845     }
7846     if (timeout) {
7847         pts = &ts;
7848         if (time64
7849             ? target_to_host_timespec64(pts, timeout)
7850             : target_to_host_timespec(pts, timeout)) {
7851             return -TARGET_EFAULT;
7852         }
7853     }
7854     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7855 }
7856 #endif
7857 
7858 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7859 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7860                                      abi_long handle, abi_long mount_id,
7861                                      abi_long flags)
7862 {
7863     struct file_handle *target_fh;
7864     struct file_handle *fh;
7865     int mid = 0;
7866     abi_long ret;
7867     char *name;
7868     unsigned int size, total_size;
7869 
7870     if (get_user_s32(size, handle)) {
7871         return -TARGET_EFAULT;
7872     }
7873 
7874     name = lock_user_string(pathname);
7875     if (!name) {
7876         return -TARGET_EFAULT;
7877     }
7878 
7879     total_size = sizeof(struct file_handle) + size;
7880     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7881     if (!target_fh) {
7882         unlock_user(name, pathname, 0);
7883         return -TARGET_EFAULT;
7884     }
7885 
7886     fh = g_malloc0(total_size);
7887     fh->handle_bytes = size;
7888 
7889     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7890     unlock_user(name, pathname, 0);
7891 
7892     /* man name_to_handle_at(2):
7893      * Other than the use of the handle_bytes field, the caller should treat
7894      * the file_handle structure as an opaque data type
7895      */
7896 
7897     memcpy(target_fh, fh, total_size);
7898     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7899     target_fh->handle_type = tswap32(fh->handle_type);
7900     g_free(fh);
7901     unlock_user(target_fh, handle, total_size);
7902 
7903     if (put_user_s32(mid, mount_id)) {
7904         return -TARGET_EFAULT;
7905     }
7906 
7907     return ret;
7908 
7909 }
7910 #endif
7911 
7912 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7913 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7914                                      abi_long flags)
7915 {
7916     struct file_handle *target_fh;
7917     struct file_handle *fh;
7918     unsigned int size, total_size;
7919     abi_long ret;
7920 
7921     if (get_user_s32(size, handle)) {
7922         return -TARGET_EFAULT;
7923     }
7924 
7925     total_size = sizeof(struct file_handle) + size;
7926     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7927     if (!target_fh) {
7928         return -TARGET_EFAULT;
7929     }
7930 
7931     fh = g_memdup(target_fh, total_size);
7932     fh->handle_bytes = size;
7933     fh->handle_type = tswap32(target_fh->handle_type);
7934 
7935     ret = get_errno(open_by_handle_at(mount_fd, fh,
7936                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7937 
7938     g_free(fh);
7939 
7940     unlock_user(target_fh, handle, total_size);
7941 
7942     return ret;
7943 }
7944 #endif
7945 
7946 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7947 
do_signalfd4(int fd,abi_long mask,int flags)7948 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7949 {
7950     int host_flags;
7951     target_sigset_t *target_mask;
7952     sigset_t host_mask;
7953     abi_long ret;
7954 
7955     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7956         return -TARGET_EINVAL;
7957     }
7958     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7959         return -TARGET_EFAULT;
7960     }
7961 
7962     target_to_host_sigset(&host_mask, target_mask);
7963 
7964     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7965 
7966     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7967     if (ret >= 0) {
7968         fd_trans_register(ret, &target_signalfd_trans);
7969     }
7970 
7971     unlock_user_struct(target_mask, mask, 0);
7972 
7973     return ret;
7974 }
7975 #endif
7976 
7977 /* Map host to target signal numbers for the wait family of syscalls.
7978    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)7979 int host_to_target_waitstatus(int status)
7980 {
7981     if (WIFSIGNALED(status)) {
7982         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7983     }
7984     if (WIFSTOPPED(status)) {
7985         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7986                | (status & 0xff);
7987     }
7988     return status;
7989 }
7990 
open_self_cmdline(CPUArchState * cpu_env,int fd)7991 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7992 {
7993     CPUState *cpu = env_cpu(cpu_env);
7994     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7995     int i;
7996 
7997     for (i = 0; i < bprm->argc; i++) {
7998         size_t len = strlen(bprm->argv[i]) + 1;
7999 
8000         if (write(fd, bprm->argv[i], len) != len) {
8001             return -1;
8002         }
8003     }
8004 
8005     return 0;
8006 }
8007 
8008 struct open_self_maps_data {
8009     TaskState *ts;
8010     IntervalTreeRoot *host_maps;
8011     int fd;
8012     bool smaps;
8013 };
8014 
8015 /*
8016  * Subroutine to output one line of /proc/self/maps,
8017  * or one region of /proc/self/smaps.
8018  */
8019 
8020 #ifdef TARGET_HPPA
8021 # define test_stack(S, E, L)  (E == L)
8022 #else
8023 # define test_stack(S, E, L)  (S == L)
8024 #endif
8025 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8026 static void open_self_maps_4(const struct open_self_maps_data *d,
8027                              const MapInfo *mi, abi_ptr start,
8028                              abi_ptr end, unsigned flags)
8029 {
8030     const struct image_info *info = d->ts->info;
8031     const char *path = mi->path;
8032     uint64_t offset;
8033     int fd = d->fd;
8034     int count;
8035 
8036     if (test_stack(start, end, info->stack_limit)) {
8037         path = "[stack]";
8038     } else if (start == info->brk) {
8039         path = "[heap]";
8040     } else if (start == info->vdso) {
8041         path = "[vdso]";
8042 #ifdef TARGET_X86_64
8043     } else if (start == TARGET_VSYSCALL_PAGE) {
8044         path = "[vsyscall]";
8045 #endif
8046     }
8047 
8048     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8049     offset = mi->offset;
8050     if (mi->dev) {
8051         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8052         offset += hstart - mi->itree.start;
8053     }
8054 
8055     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8056                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8057                     start, end,
8058                     (flags & PAGE_READ) ? 'r' : '-',
8059                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8060                     (flags & PAGE_EXEC) ? 'x' : '-',
8061                     mi->is_priv ? 'p' : 's',
8062                     offset, major(mi->dev), minor(mi->dev),
8063                     (uint64_t)mi->inode);
8064     if (path) {
8065         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8066     } else {
8067         dprintf(fd, "\n");
8068     }
8069 
8070     if (d->smaps) {
8071         unsigned long size = end - start;
8072         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8073         unsigned long size_kb = size >> 10;
8074 
8075         dprintf(fd, "Size:                  %lu kB\n"
8076                 "KernelPageSize:        %lu kB\n"
8077                 "MMUPageSize:           %lu kB\n"
8078                 "Rss:                   0 kB\n"
8079                 "Pss:                   0 kB\n"
8080                 "Pss_Dirty:             0 kB\n"
8081                 "Shared_Clean:          0 kB\n"
8082                 "Shared_Dirty:          0 kB\n"
8083                 "Private_Clean:         0 kB\n"
8084                 "Private_Dirty:         0 kB\n"
8085                 "Referenced:            0 kB\n"
8086                 "Anonymous:             %lu kB\n"
8087                 "LazyFree:              0 kB\n"
8088                 "AnonHugePages:         0 kB\n"
8089                 "ShmemPmdMapped:        0 kB\n"
8090                 "FilePmdMapped:         0 kB\n"
8091                 "Shared_Hugetlb:        0 kB\n"
8092                 "Private_Hugetlb:       0 kB\n"
8093                 "Swap:                  0 kB\n"
8094                 "SwapPss:               0 kB\n"
8095                 "Locked:                0 kB\n"
8096                 "THPeligible:    0\n"
8097                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8098                 size_kb, page_size_kb, page_size_kb,
8099                 (flags & PAGE_ANON ? size_kb : 0),
8100                 (flags & PAGE_READ) ? " rd" : "",
8101                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8102                 (flags & PAGE_EXEC) ? " ex" : "",
8103                 mi->is_priv ? "" : " sh",
8104                 (flags & PAGE_READ) ? " mr" : "",
8105                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8106                 (flags & PAGE_EXEC) ? " me" : "",
8107                 mi->is_priv ? "" : " ms");
8108     }
8109 }
8110 
8111 /*
8112  * Callback for walk_memory_regions, when read_self_maps() fails.
8113  * Proceed without the benefit of host /proc/self/maps cross-check.
8114  */
open_self_maps_3(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8115 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8116                             target_ulong guest_end, unsigned long flags)
8117 {
8118     static const MapInfo mi = { .is_priv = true };
8119 
8120     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8121     return 0;
8122 }
8123 
8124 /*
8125  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8126  */
open_self_maps_2(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8127 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8128                             target_ulong guest_end, unsigned long flags)
8129 {
8130     const struct open_self_maps_data *d = opaque;
8131     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8132     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8133 
8134 #ifdef TARGET_X86_64
8135     /*
8136      * Because of the extremely high position of the page within the guest
8137      * virtual address space, this is not backed by host memory at all.
8138      * Therefore the loop below would fail.  This is the only instance
8139      * of not having host backing memory.
8140      */
8141     if (guest_start == TARGET_VSYSCALL_PAGE) {
8142         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8143     }
8144 #endif
8145 
8146     while (1) {
8147         IntervalTreeNode *n =
8148             interval_tree_iter_first(d->host_maps, host_start, host_start);
8149         MapInfo *mi = container_of(n, MapInfo, itree);
8150         uintptr_t this_hlast = MIN(host_last, n->last);
8151         target_ulong this_gend = h2g(this_hlast) + 1;
8152 
8153         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8154 
8155         if (this_hlast == host_last) {
8156             return 0;
8157         }
8158         host_start = this_hlast + 1;
8159         guest_start = h2g(host_start);
8160     }
8161 }
8162 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8163 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8164 {
8165     struct open_self_maps_data d = {
8166         .ts = get_task_state(env_cpu(env)),
8167         .fd = fd,
8168         .smaps = smaps
8169     };
8170 
8171     mmap_lock();
8172     d.host_maps = read_self_maps();
8173     if (d.host_maps) {
8174         walk_memory_regions(&d, open_self_maps_2);
8175         free_self_maps(d.host_maps);
8176     } else {
8177         walk_memory_regions(&d, open_self_maps_3);
8178     }
8179     mmap_unlock();
8180     return 0;
8181 }
8182 
open_self_maps(CPUArchState * cpu_env,int fd)8183 static int open_self_maps(CPUArchState *cpu_env, int fd)
8184 {
8185     return open_self_maps_1(cpu_env, fd, false);
8186 }
8187 
open_self_smaps(CPUArchState * cpu_env,int fd)8188 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8189 {
8190     return open_self_maps_1(cpu_env, fd, true);
8191 }
8192 
open_self_stat(CPUArchState * cpu_env,int fd)8193 static int open_self_stat(CPUArchState *cpu_env, int fd)
8194 {
8195     CPUState *cpu = env_cpu(cpu_env);
8196     TaskState *ts = get_task_state(cpu);
8197     g_autoptr(GString) buf = g_string_new(NULL);
8198     int i;
8199 
8200     for (i = 0; i < 44; i++) {
8201         if (i == 0) {
8202             /* pid */
8203             g_string_printf(buf, FMT_pid " ", getpid());
8204         } else if (i == 1) {
8205             /* app name */
8206             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8207             bin = bin ? bin + 1 : ts->bprm->argv[0];
8208             g_string_printf(buf, "(%.15s) ", bin);
8209         } else if (i == 2) {
8210             /* task state */
8211             g_string_assign(buf, "R "); /* we are running right now */
8212         } else if (i == 3) {
8213             /* ppid */
8214             g_string_printf(buf, FMT_pid " ", getppid());
8215         } else if (i == 19) {
8216             /* num_threads */
8217             int cpus = 0;
8218             WITH_RCU_READ_LOCK_GUARD() {
8219                 CPUState *cpu_iter;
8220                 CPU_FOREACH(cpu_iter) {
8221                     cpus++;
8222                 }
8223             }
8224             g_string_printf(buf, "%d ", cpus);
8225         } else if (i == 21) {
8226             /* starttime */
8227             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8228         } else if (i == 27) {
8229             /* stack bottom */
8230             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8231         } else {
8232             /* for the rest, there is MasterCard */
8233             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8234         }
8235 
8236         if (write(fd, buf->str, buf->len) != buf->len) {
8237             return -1;
8238         }
8239     }
8240 
8241     return 0;
8242 }
8243 
open_self_auxv(CPUArchState * cpu_env,int fd)8244 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8245 {
8246     CPUState *cpu = env_cpu(cpu_env);
8247     TaskState *ts = get_task_state(cpu);
8248     abi_ulong auxv = ts->info->saved_auxv;
8249     abi_ulong len = ts->info->auxv_len;
8250     char *ptr;
8251 
8252     /*
8253      * Auxiliary vector is stored in target process stack.
8254      * read in whole auxv vector and copy it to file
8255      */
8256     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8257     if (ptr != NULL) {
8258         while (len > 0) {
8259             ssize_t r;
8260             r = write(fd, ptr, len);
8261             if (r <= 0) {
8262                 break;
8263             }
8264             len -= r;
8265             ptr += r;
8266         }
8267         lseek(fd, 0, SEEK_SET);
8268         unlock_user(ptr, auxv, len);
8269     }
8270 
8271     return 0;
8272 }
8273 
is_proc_myself(const char * filename,const char * entry)8274 static int is_proc_myself(const char *filename, const char *entry)
8275 {
8276     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8277         filename += strlen("/proc/");
8278         if (!strncmp(filename, "self/", strlen("self/"))) {
8279             filename += strlen("self/");
8280         } else if (*filename >= '1' && *filename <= '9') {
8281             char myself[80];
8282             snprintf(myself, sizeof(myself), "%d/", getpid());
8283             if (!strncmp(filename, myself, strlen(myself))) {
8284                 filename += strlen(myself);
8285             } else {
8286                 return 0;
8287             }
8288         } else {
8289             return 0;
8290         }
8291         if (!strcmp(filename, entry)) {
8292             return 1;
8293         }
8294     }
8295     return 0;
8296 }
8297 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8298 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8299                       const char *fmt, int code)
8300 {
8301     if (logfile) {
8302         CPUState *cs = env_cpu(env);
8303 
8304         fprintf(logfile, fmt, code);
8305         fprintf(logfile, "Failing executable: %s\n", exec_path);
8306         cpu_dump_state(cs, logfile, 0);
8307         open_self_maps(env, fileno(logfile));
8308     }
8309 }
8310 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8311 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8312 {
8313     /* dump to console */
8314     excp_dump_file(stderr, env, fmt, code);
8315 
8316     /* dump to log file */
8317     if (qemu_log_separate()) {
8318         FILE *logfile = qemu_log_trylock();
8319 
8320         excp_dump_file(logfile, env, fmt, code);
8321         qemu_log_unlock(logfile);
8322     }
8323 }
8324 
8325 #include "target_proc.h"
8326 
8327 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8328     defined(HAVE_ARCH_PROC_CPUINFO) || \
8329     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8330 static int is_proc(const char *filename, const char *entry)
8331 {
8332     return strcmp(filename, entry) == 0;
8333 }
8334 #endif
8335 
8336 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8337 static int open_net_route(CPUArchState *cpu_env, int fd)
8338 {
8339     FILE *fp;
8340     char *line = NULL;
8341     size_t len = 0;
8342     ssize_t read;
8343 
8344     fp = fopen("/proc/net/route", "r");
8345     if (fp == NULL) {
8346         return -1;
8347     }
8348 
8349     /* read header */
8350 
8351     read = getline(&line, &len, fp);
8352     dprintf(fd, "%s", line);
8353 
8354     /* read routes */
8355 
8356     while ((read = getline(&line, &len, fp)) != -1) {
8357         char iface[16];
8358         uint32_t dest, gw, mask;
8359         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8360         int fields;
8361 
8362         fields = sscanf(line,
8363                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8364                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8365                         &mask, &mtu, &window, &irtt);
8366         if (fields != 11) {
8367             continue;
8368         }
8369         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8370                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8371                 metric, tswap32(mask), mtu, window, irtt);
8372     }
8373 
8374     free(line);
8375     fclose(fp);
8376 
8377     return 0;
8378 }
8379 #endif
8380 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8381 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8382                               const char *fname, int flags, mode_t mode,
8383                               int openat2_resolve, bool safe)
8384 {
8385     g_autofree char *proc_name = NULL;
8386     const char *pathname;
8387     struct fake_open {
8388         const char *filename;
8389         int (*fill)(CPUArchState *cpu_env, int fd);
8390         int (*cmp)(const char *s1, const char *s2);
8391     };
8392     const struct fake_open *fake_open;
8393     static const struct fake_open fakes[] = {
8394         { "maps", open_self_maps, is_proc_myself },
8395         { "smaps", open_self_smaps, is_proc_myself },
8396         { "stat", open_self_stat, is_proc_myself },
8397         { "auxv", open_self_auxv, is_proc_myself },
8398         { "cmdline", open_self_cmdline, is_proc_myself },
8399 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8400         { "/proc/net/route", open_net_route, is_proc },
8401 #endif
8402 #if defined(HAVE_ARCH_PROC_CPUINFO)
8403         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8404 #endif
8405 #if defined(HAVE_ARCH_PROC_HARDWARE)
8406         { "/proc/hardware", open_hardware, is_proc },
8407 #endif
8408         { NULL, NULL, NULL }
8409     };
8410 
8411     /* if this is a file from /proc/ filesystem, expand full name */
8412     proc_name = realpath(fname, NULL);
8413     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8414         pathname = proc_name;
8415     } else {
8416         pathname = fname;
8417     }
8418 
8419     if (is_proc_myself(pathname, "exe")) {
8420         /* Honor openat2 resolve flags */
8421         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8422             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8423             errno = ELOOP;
8424             return -1;
8425         }
8426         if (safe) {
8427             return safe_openat(dirfd, exec_path, flags, mode);
8428         } else {
8429             return openat(dirfd, exec_path, flags, mode);
8430         }
8431     }
8432 
8433     for (fake_open = fakes; fake_open->filename; fake_open++) {
8434         if (fake_open->cmp(pathname, fake_open->filename)) {
8435             break;
8436         }
8437     }
8438 
8439     if (fake_open->filename) {
8440         const char *tmpdir;
8441         char filename[PATH_MAX];
8442         int fd, r;
8443 
8444         fd = memfd_create("qemu-open", 0);
8445         if (fd < 0) {
8446             if (errno != ENOSYS) {
8447                 return fd;
8448             }
8449             /* create temporary file to map stat to */
8450             tmpdir = getenv("TMPDIR");
8451             if (!tmpdir)
8452                 tmpdir = "/tmp";
8453             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8454             fd = mkstemp(filename);
8455             if (fd < 0) {
8456                 return fd;
8457             }
8458             unlink(filename);
8459         }
8460 
8461         if ((r = fake_open->fill(cpu_env, fd))) {
8462             int e = errno;
8463             close(fd);
8464             errno = e;
8465             return r;
8466         }
8467         lseek(fd, 0, SEEK_SET);
8468 
8469         return fd;
8470     }
8471 
8472     return -2;
8473 }
8474 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8475 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8476                     int flags, mode_t mode, bool safe)
8477 {
8478     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8479     if (fd > -2) {
8480         return fd;
8481     }
8482 
8483     if (safe) {
8484         return safe_openat(dirfd, path(pathname), flags, mode);
8485     } else {
8486         return openat(dirfd, path(pathname), flags, mode);
8487     }
8488 }
8489 
8490 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8491 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8492                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8493                       abi_ulong guest_size)
8494 {
8495     struct open_how_ver0 how = {0};
8496     char *pathname;
8497     int ret;
8498 
8499     if (guest_size < sizeof(struct target_open_how_ver0)) {
8500         return -TARGET_EINVAL;
8501     }
8502     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8503     if (ret) {
8504         if (ret == -TARGET_E2BIG) {
8505             qemu_log_mask(LOG_UNIMP,
8506                           "Unimplemented openat2 open_how size: "
8507                           TARGET_ABI_FMT_lu "\n", guest_size);
8508         }
8509         return ret;
8510     }
8511     pathname = lock_user_string(guest_pathname);
8512     if (!pathname) {
8513         return -TARGET_EFAULT;
8514     }
8515 
8516     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8517     how.mode = tswap64(how.mode);
8518     how.resolve = tswap64(how.resolve);
8519     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8520                                 how.resolve, true);
8521     if (fd > -2) {
8522         ret = get_errno(fd);
8523     } else {
8524         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8525                                      sizeof(struct open_how_ver0)));
8526     }
8527 
8528     fd_trans_unregister(ret);
8529     unlock_user(pathname, guest_pathname, 0);
8530     return ret;
8531 }
8532 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8533 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8534 {
8535     ssize_t ret;
8536 
8537     if (!pathname || !buf) {
8538         errno = EFAULT;
8539         return -1;
8540     }
8541 
8542     if (!bufsiz) {
8543         /* Short circuit this for the magic exe check. */
8544         errno = EINVAL;
8545         return -1;
8546     }
8547 
8548     if (is_proc_myself((const char *)pathname, "exe")) {
8549         /*
8550          * Don't worry about sign mismatch as earlier mapping
8551          * logic would have thrown a bad address error.
8552          */
8553         ret = MIN(strlen(exec_path), bufsiz);
8554         /* We cannot NUL terminate the string. */
8555         memcpy(buf, exec_path, ret);
8556     } else {
8557         ret = readlink(path(pathname), buf, bufsiz);
8558     }
8559 
8560     return ret;
8561 }
8562 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8563 static int do_execv(CPUArchState *cpu_env, int dirfd,
8564                     abi_long pathname, abi_long guest_argp,
8565                     abi_long guest_envp, int flags, bool is_execveat)
8566 {
8567     int ret;
8568     char **argp, **envp;
8569     int argc, envc;
8570     abi_ulong gp;
8571     abi_ulong addr;
8572     char **q;
8573     void *p;
8574 
8575     argc = 0;
8576 
8577     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8578         if (get_user_ual(addr, gp)) {
8579             return -TARGET_EFAULT;
8580         }
8581         if (!addr) {
8582             break;
8583         }
8584         argc++;
8585     }
8586     envc = 0;
8587     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8588         if (get_user_ual(addr, gp)) {
8589             return -TARGET_EFAULT;
8590         }
8591         if (!addr) {
8592             break;
8593         }
8594         envc++;
8595     }
8596 
8597     argp = g_new0(char *, argc + 1);
8598     envp = g_new0(char *, envc + 1);
8599 
8600     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8601         if (get_user_ual(addr, gp)) {
8602             goto execve_efault;
8603         }
8604         if (!addr) {
8605             break;
8606         }
8607         *q = lock_user_string(addr);
8608         if (!*q) {
8609             goto execve_efault;
8610         }
8611     }
8612     *q = NULL;
8613 
8614     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8615         if (get_user_ual(addr, gp)) {
8616             goto execve_efault;
8617         }
8618         if (!addr) {
8619             break;
8620         }
8621         *q = lock_user_string(addr);
8622         if (!*q) {
8623             goto execve_efault;
8624         }
8625     }
8626     *q = NULL;
8627 
8628     /*
8629      * Although execve() is not an interruptible syscall it is
8630      * a special case where we must use the safe_syscall wrapper:
8631      * if we allow a signal to happen before we make the host
8632      * syscall then we will 'lose' it, because at the point of
8633      * execve the process leaves QEMU's control. So we use the
8634      * safe syscall wrapper to ensure that we either take the
8635      * signal as a guest signal, or else it does not happen
8636      * before the execve completes and makes it the other
8637      * program's problem.
8638      */
8639     p = lock_user_string(pathname);
8640     if (!p) {
8641         goto execve_efault;
8642     }
8643 
8644     const char *exe = p;
8645     if (is_proc_myself(p, "exe")) {
8646         exe = exec_path;
8647     }
8648     ret = is_execveat
8649         ? safe_execveat(dirfd, exe, argp, envp, flags)
8650         : safe_execve(exe, argp, envp);
8651     ret = get_errno(ret);
8652 
8653     unlock_user(p, pathname, 0);
8654 
8655     goto execve_end;
8656 
8657 execve_efault:
8658     ret = -TARGET_EFAULT;
8659 
8660 execve_end:
8661     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8662         if (get_user_ual(addr, gp) || !addr) {
8663             break;
8664         }
8665         unlock_user(*q, addr, 0);
8666     }
8667     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8668         if (get_user_ual(addr, gp) || !addr) {
8669             break;
8670         }
8671         unlock_user(*q, addr, 0);
8672     }
8673 
8674     g_free(argp);
8675     g_free(envp);
8676     return ret;
8677 }
8678 
8679 #define TIMER_MAGIC 0x0caf0000
8680 #define TIMER_MAGIC_MASK 0xffff0000
8681 
8682 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8683 static target_timer_t get_timer_id(abi_long arg)
8684 {
8685     target_timer_t timerid = arg;
8686 
8687     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8688         return -TARGET_EINVAL;
8689     }
8690 
8691     timerid &= 0xffff;
8692 
8693     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8694         return -TARGET_EINVAL;
8695     }
8696 
8697     return timerid;
8698 }
8699 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8700 static int target_to_host_cpu_mask(unsigned long *host_mask,
8701                                    size_t host_size,
8702                                    abi_ulong target_addr,
8703                                    size_t target_size)
8704 {
8705     unsigned target_bits = sizeof(abi_ulong) * 8;
8706     unsigned host_bits = sizeof(*host_mask) * 8;
8707     abi_ulong *target_mask;
8708     unsigned i, j;
8709 
8710     assert(host_size >= target_size);
8711 
8712     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8713     if (!target_mask) {
8714         return -TARGET_EFAULT;
8715     }
8716     memset(host_mask, 0, host_size);
8717 
8718     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8719         unsigned bit = i * target_bits;
8720         abi_ulong val;
8721 
8722         __get_user(val, &target_mask[i]);
8723         for (j = 0; j < target_bits; j++, bit++) {
8724             if (val & (1UL << j)) {
8725                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8726             }
8727         }
8728     }
8729 
8730     unlock_user(target_mask, target_addr, 0);
8731     return 0;
8732 }
8733 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8734 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8735                                    size_t host_size,
8736                                    abi_ulong target_addr,
8737                                    size_t target_size)
8738 {
8739     unsigned target_bits = sizeof(abi_ulong) * 8;
8740     unsigned host_bits = sizeof(*host_mask) * 8;
8741     abi_ulong *target_mask;
8742     unsigned i, j;
8743 
8744     assert(host_size >= target_size);
8745 
8746     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8747     if (!target_mask) {
8748         return -TARGET_EFAULT;
8749     }
8750 
8751     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8752         unsigned bit = i * target_bits;
8753         abi_ulong val = 0;
8754 
8755         for (j = 0; j < target_bits; j++, bit++) {
8756             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8757                 val |= 1UL << j;
8758             }
8759         }
8760         __put_user(val, &target_mask[i]);
8761     }
8762 
8763     unlock_user(target_mask, target_addr, target_size);
8764     return 0;
8765 }
8766 
8767 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8768 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8769 {
8770     g_autofree void *hdirp = NULL;
8771     void *tdirp;
8772     int hlen, hoff, toff;
8773     int hreclen, treclen;
8774     off_t prev_diroff = 0;
8775 
8776     hdirp = g_try_malloc(count);
8777     if (!hdirp) {
8778         return -TARGET_ENOMEM;
8779     }
8780 
8781 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8782     hlen = sys_getdents(dirfd, hdirp, count);
8783 #else
8784     hlen = sys_getdents64(dirfd, hdirp, count);
8785 #endif
8786 
8787     hlen = get_errno(hlen);
8788     if (is_error(hlen)) {
8789         return hlen;
8790     }
8791 
8792     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8793     if (!tdirp) {
8794         return -TARGET_EFAULT;
8795     }
8796 
8797     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8798 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8799         struct linux_dirent *hde = hdirp + hoff;
8800 #else
8801         struct linux_dirent64 *hde = hdirp + hoff;
8802 #endif
8803         struct target_dirent *tde = tdirp + toff;
8804         int namelen;
8805         uint8_t type;
8806 
8807         namelen = strlen(hde->d_name);
8808         hreclen = hde->d_reclen;
8809         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8810         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8811 
8812         if (toff + treclen > count) {
8813             /*
8814              * If the host struct is smaller than the target struct, or
8815              * requires less alignment and thus packs into less space,
8816              * then the host can return more entries than we can pass
8817              * on to the guest.
8818              */
8819             if (toff == 0) {
8820                 toff = -TARGET_EINVAL; /* result buffer is too small */
8821                 break;
8822             }
8823             /*
8824              * Return what we have, resetting the file pointer to the
8825              * location of the first record not returned.
8826              */
8827             lseek(dirfd, prev_diroff, SEEK_SET);
8828             break;
8829         }
8830 
8831         prev_diroff = hde->d_off;
8832         tde->d_ino = tswapal(hde->d_ino);
8833         tde->d_off = tswapal(hde->d_off);
8834         tde->d_reclen = tswap16(treclen);
8835         memcpy(tde->d_name, hde->d_name, namelen + 1);
8836 
8837         /*
8838          * The getdents type is in what was formerly a padding byte at the
8839          * end of the structure.
8840          */
8841 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8842         type = *((uint8_t *)hde + hreclen - 1);
8843 #else
8844         type = hde->d_type;
8845 #endif
8846         *((uint8_t *)tde + treclen - 1) = type;
8847     }
8848 
8849     unlock_user(tdirp, arg2, toff);
8850     return toff;
8851 }
8852 #endif /* TARGET_NR_getdents */
8853 
8854 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8855 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8856 {
8857     g_autofree void *hdirp = NULL;
8858     void *tdirp;
8859     int hlen, hoff, toff;
8860     int hreclen, treclen;
8861     off_t prev_diroff = 0;
8862 
8863     hdirp = g_try_malloc(count);
8864     if (!hdirp) {
8865         return -TARGET_ENOMEM;
8866     }
8867 
8868     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8869     if (is_error(hlen)) {
8870         return hlen;
8871     }
8872 
8873     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8874     if (!tdirp) {
8875         return -TARGET_EFAULT;
8876     }
8877 
8878     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8879         struct linux_dirent64 *hde = hdirp + hoff;
8880         struct target_dirent64 *tde = tdirp + toff;
8881         int namelen;
8882 
8883         namelen = strlen(hde->d_name) + 1;
8884         hreclen = hde->d_reclen;
8885         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8886         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8887 
8888         if (toff + treclen > count) {
8889             /*
8890              * If the host struct is smaller than the target struct, or
8891              * requires less alignment and thus packs into less space,
8892              * then the host can return more entries than we can pass
8893              * on to the guest.
8894              */
8895             if (toff == 0) {
8896                 toff = -TARGET_EINVAL; /* result buffer is too small */
8897                 break;
8898             }
8899             /*
8900              * Return what we have, resetting the file pointer to the
8901              * location of the first record not returned.
8902              */
8903             lseek(dirfd, prev_diroff, SEEK_SET);
8904             break;
8905         }
8906 
8907         prev_diroff = hde->d_off;
8908         tde->d_ino = tswap64(hde->d_ino);
8909         tde->d_off = tswap64(hde->d_off);
8910         tde->d_reclen = tswap16(treclen);
8911         tde->d_type = hde->d_type;
8912         memcpy(tde->d_name, hde->d_name, namelen);
8913     }
8914 
8915     unlock_user(tdirp, arg2, toff);
8916     return toff;
8917 }
8918 #endif /* TARGET_NR_getdents64 */
8919 
8920 #if defined(TARGET_NR_riscv_hwprobe)
8921 
8922 #define RISCV_HWPROBE_KEY_MVENDORID     0
8923 #define RISCV_HWPROBE_KEY_MARCHID       1
8924 #define RISCV_HWPROBE_KEY_MIMPID        2
8925 
8926 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8927 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8928 
8929 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8930 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8931 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8932 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8933 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8934 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8935 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8936 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8937 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8938 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8939 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8940 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8941 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8942 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8943 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8944 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8945 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8946 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8947 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8948 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8949 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8950 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8951 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8952 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8953 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8954 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8955 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8956 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8957 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8958 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8959 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8960 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8961 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8962 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8963 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8964 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8965 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8966 
8967 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8968 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8969 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8970 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8971 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8972 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8973 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8974 
8975 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8976 
8977 struct riscv_hwprobe {
8978     abi_llong  key;
8979     abi_ullong value;
8980 };
8981 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)8982 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8983                                     struct riscv_hwprobe *pair,
8984                                     size_t pair_count)
8985 {
8986     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8987 
8988     for (; pair_count > 0; pair_count--, pair++) {
8989         abi_llong key;
8990         abi_ullong value;
8991         __put_user(0, &pair->value);
8992         __get_user(key, &pair->key);
8993         switch (key) {
8994         case RISCV_HWPROBE_KEY_MVENDORID:
8995             __put_user(cfg->mvendorid, &pair->value);
8996             break;
8997         case RISCV_HWPROBE_KEY_MARCHID:
8998             __put_user(cfg->marchid, &pair->value);
8999             break;
9000         case RISCV_HWPROBE_KEY_MIMPID:
9001             __put_user(cfg->mimpid, &pair->value);
9002             break;
9003         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9004             value = riscv_has_ext(env, RVI) &&
9005                     riscv_has_ext(env, RVM) &&
9006                     riscv_has_ext(env, RVA) ?
9007                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9008             __put_user(value, &pair->value);
9009             break;
9010         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9011             value = riscv_has_ext(env, RVF) &&
9012                     riscv_has_ext(env, RVD) ?
9013                     RISCV_HWPROBE_IMA_FD : 0;
9014             value |= riscv_has_ext(env, RVC) ?
9015                      RISCV_HWPROBE_IMA_C : 0;
9016             value |= riscv_has_ext(env, RVV) ?
9017                      RISCV_HWPROBE_IMA_V : 0;
9018             value |= cfg->ext_zba ?
9019                      RISCV_HWPROBE_EXT_ZBA : 0;
9020             value |= cfg->ext_zbb ?
9021                      RISCV_HWPROBE_EXT_ZBB : 0;
9022             value |= cfg->ext_zbs ?
9023                      RISCV_HWPROBE_EXT_ZBS : 0;
9024             value |= cfg->ext_zicboz ?
9025                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9026             value |= cfg->ext_zbc ?
9027                      RISCV_HWPROBE_EXT_ZBC : 0;
9028             value |= cfg->ext_zbkb ?
9029                      RISCV_HWPROBE_EXT_ZBKB : 0;
9030             value |= cfg->ext_zbkc ?
9031                      RISCV_HWPROBE_EXT_ZBKC : 0;
9032             value |= cfg->ext_zbkx ?
9033                      RISCV_HWPROBE_EXT_ZBKX : 0;
9034             value |= cfg->ext_zknd ?
9035                      RISCV_HWPROBE_EXT_ZKND : 0;
9036             value |= cfg->ext_zkne ?
9037                      RISCV_HWPROBE_EXT_ZKNE : 0;
9038             value |= cfg->ext_zknh ?
9039                      RISCV_HWPROBE_EXT_ZKNH : 0;
9040             value |= cfg->ext_zksed ?
9041                      RISCV_HWPROBE_EXT_ZKSED : 0;
9042             value |= cfg->ext_zksh ?
9043                      RISCV_HWPROBE_EXT_ZKSH : 0;
9044             value |= cfg->ext_zkt ?
9045                      RISCV_HWPROBE_EXT_ZKT : 0;
9046             value |= cfg->ext_zvbb ?
9047                      RISCV_HWPROBE_EXT_ZVBB : 0;
9048             value |= cfg->ext_zvbc ?
9049                      RISCV_HWPROBE_EXT_ZVBC : 0;
9050             value |= cfg->ext_zvkb ?
9051                      RISCV_HWPROBE_EXT_ZVKB : 0;
9052             value |= cfg->ext_zvkg ?
9053                      RISCV_HWPROBE_EXT_ZVKG : 0;
9054             value |= cfg->ext_zvkned ?
9055                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9056             value |= cfg->ext_zvknha ?
9057                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9058             value |= cfg->ext_zvknhb ?
9059                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9060             value |= cfg->ext_zvksed ?
9061                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9062             value |= cfg->ext_zvksh ?
9063                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9064             value |= cfg->ext_zvkt ?
9065                      RISCV_HWPROBE_EXT_ZVKT : 0;
9066             value |= cfg->ext_zfh ?
9067                      RISCV_HWPROBE_EXT_ZFH : 0;
9068             value |= cfg->ext_zfhmin ?
9069                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9070             value |= cfg->ext_zihintntl ?
9071                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9072             value |= cfg->ext_zvfh ?
9073                      RISCV_HWPROBE_EXT_ZVFH : 0;
9074             value |= cfg->ext_zvfhmin ?
9075                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9076             value |= cfg->ext_zfa ?
9077                      RISCV_HWPROBE_EXT_ZFA : 0;
9078             value |= cfg->ext_ztso ?
9079                      RISCV_HWPROBE_EXT_ZTSO : 0;
9080             value |= cfg->ext_zacas ?
9081                      RISCV_HWPROBE_EXT_ZACAS : 0;
9082             value |= cfg->ext_zicond ?
9083                      RISCV_HWPROBE_EXT_ZICOND : 0;
9084             __put_user(value, &pair->value);
9085             break;
9086         case RISCV_HWPROBE_KEY_CPUPERF_0:
9087             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9088             break;
9089         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9090             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9091             __put_user(value, &pair->value);
9092             break;
9093         default:
9094             __put_user(-1, &pair->key);
9095             break;
9096         }
9097     }
9098 }
9099 
cpu_set_valid(abi_long arg3,abi_long arg4)9100 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9101 {
9102     int ret, i, tmp;
9103     size_t host_mask_size, target_mask_size;
9104     unsigned long *host_mask;
9105 
9106     /*
9107      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9108      * arg3 contains the cpu count.
9109      */
9110     tmp = (8 * sizeof(abi_ulong));
9111     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9112     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9113                      ~(sizeof(*host_mask) - 1);
9114 
9115     host_mask = alloca(host_mask_size);
9116 
9117     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9118                                   arg4, target_mask_size);
9119     if (ret != 0) {
9120         return ret;
9121     }
9122 
9123     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9124         if (host_mask[i] != 0) {
9125             return 0;
9126         }
9127     }
9128     return -TARGET_EINVAL;
9129 }
9130 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9131 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9132                                  abi_long arg2, abi_long arg3,
9133                                  abi_long arg4, abi_long arg5)
9134 {
9135     int ret;
9136     struct riscv_hwprobe *host_pairs;
9137 
9138     /* flags must be 0 */
9139     if (arg5 != 0) {
9140         return -TARGET_EINVAL;
9141     }
9142 
9143     /* check cpu_set */
9144     if (arg3 != 0) {
9145         ret = cpu_set_valid(arg3, arg4);
9146         if (ret != 0) {
9147             return ret;
9148         }
9149     } else if (arg4 != 0) {
9150         return -TARGET_EINVAL;
9151     }
9152 
9153     /* no pairs */
9154     if (arg2 == 0) {
9155         return 0;
9156     }
9157 
9158     host_pairs = lock_user(VERIFY_WRITE, arg1,
9159                            sizeof(*host_pairs) * (size_t)arg2, 0);
9160     if (host_pairs == NULL) {
9161         return -TARGET_EFAULT;
9162     }
9163     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9164     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9165     return 0;
9166 }
9167 #endif /* TARGET_NR_riscv_hwprobe */
9168 
9169 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9170 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9171 #endif
9172 
9173 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9174 #define __NR_sys_open_tree __NR_open_tree
9175 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9176           unsigned int, __flags)
9177 #endif
9178 
9179 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9180 #define __NR_sys_move_mount __NR_move_mount
9181 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9182            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9183 #endif
9184 
9185 /* This is an internal helper for do_syscall so that it is easier
9186  * to have a single return point, so that actions, such as logging
9187  * of syscall results, can be performed.
9188  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9189  */
9190 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9191                             abi_long arg2, abi_long arg3, abi_long arg4,
9192                             abi_long arg5, abi_long arg6, abi_long arg7,
9193                             abi_long arg8)
9194 {
9195     CPUState *cpu = env_cpu(cpu_env);
9196     abi_long ret;
9197 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9198     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9199     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9200     || defined(TARGET_NR_statx)
9201     struct stat st;
9202 #endif
9203 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9204     || defined(TARGET_NR_fstatfs)
9205     struct statfs stfs;
9206 #endif
9207     void *p;
9208 
9209     switch(num) {
9210     case TARGET_NR_exit:
9211         /* In old applications this may be used to implement _exit(2).
9212            However in threaded applications it is used for thread termination,
9213            and _exit_group is used for application termination.
9214            Do thread termination if we have more then one thread.  */
9215 
9216         if (block_signals()) {
9217             return -QEMU_ERESTARTSYS;
9218         }
9219 
9220         pthread_mutex_lock(&clone_lock);
9221 
9222         if (CPU_NEXT(first_cpu)) {
9223             TaskState *ts = get_task_state(cpu);
9224 
9225             if (ts->child_tidptr) {
9226                 put_user_u32(0, ts->child_tidptr);
9227                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9228                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9229             }
9230 
9231             object_unparent(OBJECT(cpu));
9232             object_unref(OBJECT(cpu));
9233             /*
9234              * At this point the CPU should be unrealized and removed
9235              * from cpu lists. We can clean-up the rest of the thread
9236              * data without the lock held.
9237              */
9238 
9239             pthread_mutex_unlock(&clone_lock);
9240 
9241             thread_cpu = NULL;
9242             g_free(ts);
9243             rcu_unregister_thread();
9244             pthread_exit(NULL);
9245         }
9246 
9247         pthread_mutex_unlock(&clone_lock);
9248         preexit_cleanup(cpu_env, arg1);
9249         _exit(arg1);
9250         return 0; /* avoid warning */
9251     case TARGET_NR_read:
9252         if (arg2 == 0 && arg3 == 0) {
9253             return get_errno(safe_read(arg1, 0, 0));
9254         } else {
9255             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9256                 return -TARGET_EFAULT;
9257             ret = get_errno(safe_read(arg1, p, arg3));
9258             if (ret >= 0 &&
9259                 fd_trans_host_to_target_data(arg1)) {
9260                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9261             }
9262             unlock_user(p, arg2, ret);
9263         }
9264         return ret;
9265     case TARGET_NR_write:
9266         if (arg2 == 0 && arg3 == 0) {
9267             return get_errno(safe_write(arg1, 0, 0));
9268         }
9269         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9270             return -TARGET_EFAULT;
9271         if (fd_trans_target_to_host_data(arg1)) {
9272             void *copy = g_malloc(arg3);
9273             memcpy(copy, p, arg3);
9274             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9275             if (ret >= 0) {
9276                 ret = get_errno(safe_write(arg1, copy, ret));
9277             }
9278             g_free(copy);
9279         } else {
9280             ret = get_errno(safe_write(arg1, p, arg3));
9281         }
9282         unlock_user(p, arg2, 0);
9283         return ret;
9284 
9285 #ifdef TARGET_NR_open
9286     case TARGET_NR_open:
9287         if (!(p = lock_user_string(arg1)))
9288             return -TARGET_EFAULT;
9289         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9290                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9291                                   arg3, true));
9292         fd_trans_unregister(ret);
9293         unlock_user(p, arg1, 0);
9294         return ret;
9295 #endif
9296     case TARGET_NR_openat:
9297         if (!(p = lock_user_string(arg2)))
9298             return -TARGET_EFAULT;
9299         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9300                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9301                                   arg4, true));
9302         fd_trans_unregister(ret);
9303         unlock_user(p, arg2, 0);
9304         return ret;
9305     case TARGET_NR_openat2:
9306         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9307         return ret;
9308 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9309     case TARGET_NR_name_to_handle_at:
9310         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9311         return ret;
9312 #endif
9313 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9314     case TARGET_NR_open_by_handle_at:
9315         ret = do_open_by_handle_at(arg1, arg2, arg3);
9316         fd_trans_unregister(ret);
9317         return ret;
9318 #endif
9319 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9320     case TARGET_NR_pidfd_open:
9321         return get_errno(pidfd_open(arg1, arg2));
9322 #endif
9323 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9324     case TARGET_NR_pidfd_send_signal:
9325         {
9326             siginfo_t uinfo, *puinfo;
9327 
9328             if (arg3) {
9329                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9330                 if (!p) {
9331                     return -TARGET_EFAULT;
9332                  }
9333                  target_to_host_siginfo(&uinfo, p);
9334                  unlock_user(p, arg3, 0);
9335                  puinfo = &uinfo;
9336             } else {
9337                  puinfo = NULL;
9338             }
9339             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9340                                               puinfo, arg4));
9341         }
9342         return ret;
9343 #endif
9344 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9345     case TARGET_NR_pidfd_getfd:
9346         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9347 #endif
9348     case TARGET_NR_close:
9349         fd_trans_unregister(arg1);
9350         return get_errno(close(arg1));
9351 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9352     case TARGET_NR_close_range:
9353         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9354         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9355             abi_long fd, maxfd;
9356             maxfd = MIN(arg2, target_fd_max);
9357             for (fd = arg1; fd < maxfd; fd++) {
9358                 fd_trans_unregister(fd);
9359             }
9360         }
9361         return ret;
9362 #endif
9363 
9364     case TARGET_NR_brk:
9365         return do_brk(arg1);
9366 #ifdef TARGET_NR_fork
9367     case TARGET_NR_fork:
9368         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9369 #endif
9370 #ifdef TARGET_NR_waitpid
9371     case TARGET_NR_waitpid:
9372         {
9373             int status;
9374             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9375             if (!is_error(ret) && arg2 && ret
9376                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9377                 return -TARGET_EFAULT;
9378         }
9379         return ret;
9380 #endif
9381 #ifdef TARGET_NR_waitid
9382     case TARGET_NR_waitid:
9383         {
9384             struct rusage ru;
9385             siginfo_t info;
9386 
9387             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9388                                         arg4, (arg5 ? &ru : NULL)));
9389             if (!is_error(ret)) {
9390                 if (arg3) {
9391                     p = lock_user(VERIFY_WRITE, arg3,
9392                                   sizeof(target_siginfo_t), 0);
9393                     if (!p) {
9394                         return -TARGET_EFAULT;
9395                     }
9396                     host_to_target_siginfo(p, &info);
9397                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9398                 }
9399                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9400                     return -TARGET_EFAULT;
9401                 }
9402             }
9403         }
9404         return ret;
9405 #endif
9406 #ifdef TARGET_NR_creat /* not on alpha */
9407     case TARGET_NR_creat:
9408         if (!(p = lock_user_string(arg1)))
9409             return -TARGET_EFAULT;
9410         ret = get_errno(creat(p, arg2));
9411         fd_trans_unregister(ret);
9412         unlock_user(p, arg1, 0);
9413         return ret;
9414 #endif
9415 #ifdef TARGET_NR_link
9416     case TARGET_NR_link:
9417         {
9418             void * p2;
9419             p = lock_user_string(arg1);
9420             p2 = lock_user_string(arg2);
9421             if (!p || !p2)
9422                 ret = -TARGET_EFAULT;
9423             else
9424                 ret = get_errno(link(p, p2));
9425             unlock_user(p2, arg2, 0);
9426             unlock_user(p, arg1, 0);
9427         }
9428         return ret;
9429 #endif
9430 #if defined(TARGET_NR_linkat)
9431     case TARGET_NR_linkat:
9432         {
9433             void * p2 = NULL;
9434             if (!arg2 || !arg4)
9435                 return -TARGET_EFAULT;
9436             p  = lock_user_string(arg2);
9437             p2 = lock_user_string(arg4);
9438             if (!p || !p2)
9439                 ret = -TARGET_EFAULT;
9440             else
9441                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9442             unlock_user(p, arg2, 0);
9443             unlock_user(p2, arg4, 0);
9444         }
9445         return ret;
9446 #endif
9447 #ifdef TARGET_NR_unlink
9448     case TARGET_NR_unlink:
9449         if (!(p = lock_user_string(arg1)))
9450             return -TARGET_EFAULT;
9451         ret = get_errno(unlink(p));
9452         unlock_user(p, arg1, 0);
9453         return ret;
9454 #endif
9455 #if defined(TARGET_NR_unlinkat)
9456     case TARGET_NR_unlinkat:
9457         if (!(p = lock_user_string(arg2)))
9458             return -TARGET_EFAULT;
9459         ret = get_errno(unlinkat(arg1, p, arg3));
9460         unlock_user(p, arg2, 0);
9461         return ret;
9462 #endif
9463     case TARGET_NR_execveat:
9464         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9465     case TARGET_NR_execve:
9466         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9467     case TARGET_NR_chdir:
9468         if (!(p = lock_user_string(arg1)))
9469             return -TARGET_EFAULT;
9470         ret = get_errno(chdir(p));
9471         unlock_user(p, arg1, 0);
9472         return ret;
9473 #ifdef TARGET_NR_time
9474     case TARGET_NR_time:
9475         {
9476             time_t host_time;
9477             ret = get_errno(time(&host_time));
9478             if (!is_error(ret)
9479                 && arg1
9480                 && put_user_sal(host_time, arg1))
9481                 return -TARGET_EFAULT;
9482         }
9483         return ret;
9484 #endif
9485 #ifdef TARGET_NR_mknod
9486     case TARGET_NR_mknod:
9487         if (!(p = lock_user_string(arg1)))
9488             return -TARGET_EFAULT;
9489         ret = get_errno(mknod(p, arg2, arg3));
9490         unlock_user(p, arg1, 0);
9491         return ret;
9492 #endif
9493 #if defined(TARGET_NR_mknodat)
9494     case TARGET_NR_mknodat:
9495         if (!(p = lock_user_string(arg2)))
9496             return -TARGET_EFAULT;
9497         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9498         unlock_user(p, arg2, 0);
9499         return ret;
9500 #endif
9501 #ifdef TARGET_NR_chmod
9502     case TARGET_NR_chmod:
9503         if (!(p = lock_user_string(arg1)))
9504             return -TARGET_EFAULT;
9505         ret = get_errno(chmod(p, arg2));
9506         unlock_user(p, arg1, 0);
9507         return ret;
9508 #endif
9509 #ifdef TARGET_NR_lseek
9510     case TARGET_NR_lseek:
9511         return get_errno(lseek(arg1, arg2, arg3));
9512 #endif
9513 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9514     /* Alpha specific */
9515     case TARGET_NR_getxpid:
9516         cpu_env->ir[IR_A4] = getppid();
9517         return get_errno(getpid());
9518 #endif
9519 #ifdef TARGET_NR_getpid
9520     case TARGET_NR_getpid:
9521         return get_errno(getpid());
9522 #endif
9523     case TARGET_NR_mount:
9524         {
9525             /* need to look at the data field */
9526             void *p2, *p3;
9527 
9528             if (arg1) {
9529                 p = lock_user_string(arg1);
9530                 if (!p) {
9531                     return -TARGET_EFAULT;
9532                 }
9533             } else {
9534                 p = NULL;
9535             }
9536 
9537             p2 = lock_user_string(arg2);
9538             if (!p2) {
9539                 if (arg1) {
9540                     unlock_user(p, arg1, 0);
9541                 }
9542                 return -TARGET_EFAULT;
9543             }
9544 
9545             if (arg3) {
9546                 p3 = lock_user_string(arg3);
9547                 if (!p3) {
9548                     if (arg1) {
9549                         unlock_user(p, arg1, 0);
9550                     }
9551                     unlock_user(p2, arg2, 0);
9552                     return -TARGET_EFAULT;
9553                 }
9554             } else {
9555                 p3 = NULL;
9556             }
9557 
9558             /* FIXME - arg5 should be locked, but it isn't clear how to
9559              * do that since it's not guaranteed to be a NULL-terminated
9560              * string.
9561              */
9562             if (!arg5) {
9563                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9564             } else {
9565                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9566             }
9567             ret = get_errno(ret);
9568 
9569             if (arg1) {
9570                 unlock_user(p, arg1, 0);
9571             }
9572             unlock_user(p2, arg2, 0);
9573             if (arg3) {
9574                 unlock_user(p3, arg3, 0);
9575             }
9576         }
9577         return ret;
9578 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9579 #if defined(TARGET_NR_umount)
9580     case TARGET_NR_umount:
9581 #endif
9582 #if defined(TARGET_NR_oldumount)
9583     case TARGET_NR_oldumount:
9584 #endif
9585         if (!(p = lock_user_string(arg1)))
9586             return -TARGET_EFAULT;
9587         ret = get_errno(umount(p));
9588         unlock_user(p, arg1, 0);
9589         return ret;
9590 #endif
9591 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9592     case TARGET_NR_move_mount:
9593         {
9594             void *p2, *p4;
9595 
9596             if (!arg2 || !arg4) {
9597                 return -TARGET_EFAULT;
9598             }
9599 
9600             p2 = lock_user_string(arg2);
9601             if (!p2) {
9602                 return -TARGET_EFAULT;
9603             }
9604 
9605             p4 = lock_user_string(arg4);
9606             if (!p4) {
9607                 unlock_user(p2, arg2, 0);
9608                 return -TARGET_EFAULT;
9609             }
9610             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9611 
9612             unlock_user(p2, arg2, 0);
9613             unlock_user(p4, arg4, 0);
9614 
9615             return ret;
9616         }
9617 #endif
9618 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9619     case TARGET_NR_open_tree:
9620         {
9621             void *p2;
9622             int host_flags;
9623 
9624             if (!arg2) {
9625                 return -TARGET_EFAULT;
9626             }
9627 
9628             p2 = lock_user_string(arg2);
9629             if (!p2) {
9630                 return -TARGET_EFAULT;
9631             }
9632 
9633             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9634             if (arg3 & TARGET_O_CLOEXEC) {
9635                 host_flags |= O_CLOEXEC;
9636             }
9637 
9638             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9639 
9640             unlock_user(p2, arg2, 0);
9641 
9642             return ret;
9643         }
9644 #endif
9645 #ifdef TARGET_NR_stime /* not on alpha */
9646     case TARGET_NR_stime:
9647         {
9648             struct timespec ts;
9649             ts.tv_nsec = 0;
9650             if (get_user_sal(ts.tv_sec, arg1)) {
9651                 return -TARGET_EFAULT;
9652             }
9653             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9654         }
9655 #endif
9656 #ifdef TARGET_NR_alarm /* not on alpha */
9657     case TARGET_NR_alarm:
9658         return alarm(arg1);
9659 #endif
9660 #ifdef TARGET_NR_pause /* not on alpha */
9661     case TARGET_NR_pause:
9662         if (!block_signals()) {
9663             sigsuspend(&get_task_state(cpu)->signal_mask);
9664         }
9665         return -TARGET_EINTR;
9666 #endif
9667 #ifdef TARGET_NR_utime
9668     case TARGET_NR_utime:
9669         {
9670             struct utimbuf tbuf, *host_tbuf;
9671             struct target_utimbuf *target_tbuf;
9672             if (arg2) {
9673                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9674                     return -TARGET_EFAULT;
9675                 tbuf.actime = tswapal(target_tbuf->actime);
9676                 tbuf.modtime = tswapal(target_tbuf->modtime);
9677                 unlock_user_struct(target_tbuf, arg2, 0);
9678                 host_tbuf = &tbuf;
9679             } else {
9680                 host_tbuf = NULL;
9681             }
9682             if (!(p = lock_user_string(arg1)))
9683                 return -TARGET_EFAULT;
9684             ret = get_errno(utime(p, host_tbuf));
9685             unlock_user(p, arg1, 0);
9686         }
9687         return ret;
9688 #endif
9689 #ifdef TARGET_NR_utimes
9690     case TARGET_NR_utimes:
9691         {
9692             struct timeval *tvp, tv[2];
9693             if (arg2) {
9694                 if (copy_from_user_timeval(&tv[0], arg2)
9695                     || copy_from_user_timeval(&tv[1],
9696                                               arg2 + sizeof(struct target_timeval)))
9697                     return -TARGET_EFAULT;
9698                 tvp = tv;
9699             } else {
9700                 tvp = NULL;
9701             }
9702             if (!(p = lock_user_string(arg1)))
9703                 return -TARGET_EFAULT;
9704             ret = get_errno(utimes(p, tvp));
9705             unlock_user(p, arg1, 0);
9706         }
9707         return ret;
9708 #endif
9709 #if defined(TARGET_NR_futimesat)
9710     case TARGET_NR_futimesat:
9711         {
9712             struct timeval *tvp, tv[2];
9713             if (arg3) {
9714                 if (copy_from_user_timeval(&tv[0], arg3)
9715                     || copy_from_user_timeval(&tv[1],
9716                                               arg3 + sizeof(struct target_timeval)))
9717                     return -TARGET_EFAULT;
9718                 tvp = tv;
9719             } else {
9720                 tvp = NULL;
9721             }
9722             if (!(p = lock_user_string(arg2))) {
9723                 return -TARGET_EFAULT;
9724             }
9725             ret = get_errno(futimesat(arg1, path(p), tvp));
9726             unlock_user(p, arg2, 0);
9727         }
9728         return ret;
9729 #endif
9730 #ifdef TARGET_NR_access
9731     case TARGET_NR_access:
9732         if (!(p = lock_user_string(arg1))) {
9733             return -TARGET_EFAULT;
9734         }
9735         ret = get_errno(access(path(p), arg2));
9736         unlock_user(p, arg1, 0);
9737         return ret;
9738 #endif
9739 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9740     case TARGET_NR_faccessat:
9741         if (!(p = lock_user_string(arg2))) {
9742             return -TARGET_EFAULT;
9743         }
9744         ret = get_errno(faccessat(arg1, p, arg3, 0));
9745         unlock_user(p, arg2, 0);
9746         return ret;
9747 #endif
9748 #if defined(TARGET_NR_faccessat2)
9749     case TARGET_NR_faccessat2:
9750         if (!(p = lock_user_string(arg2))) {
9751             return -TARGET_EFAULT;
9752         }
9753         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9754         unlock_user(p, arg2, 0);
9755         return ret;
9756 #endif
9757 #ifdef TARGET_NR_nice /* not on alpha */
9758     case TARGET_NR_nice:
9759         return get_errno(nice(arg1));
9760 #endif
9761     case TARGET_NR_sync:
9762         sync();
9763         return 0;
9764 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9765     case TARGET_NR_syncfs:
9766         return get_errno(syncfs(arg1));
9767 #endif
9768     case TARGET_NR_kill:
9769         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9770 #ifdef TARGET_NR_rename
9771     case TARGET_NR_rename:
9772         {
9773             void *p2;
9774             p = lock_user_string(arg1);
9775             p2 = lock_user_string(arg2);
9776             if (!p || !p2)
9777                 ret = -TARGET_EFAULT;
9778             else
9779                 ret = get_errno(rename(p, p2));
9780             unlock_user(p2, arg2, 0);
9781             unlock_user(p, arg1, 0);
9782         }
9783         return ret;
9784 #endif
9785 #if defined(TARGET_NR_renameat)
9786     case TARGET_NR_renameat:
9787         {
9788             void *p2;
9789             p  = lock_user_string(arg2);
9790             p2 = lock_user_string(arg4);
9791             if (!p || !p2)
9792                 ret = -TARGET_EFAULT;
9793             else
9794                 ret = get_errno(renameat(arg1, p, arg3, p2));
9795             unlock_user(p2, arg4, 0);
9796             unlock_user(p, arg2, 0);
9797         }
9798         return ret;
9799 #endif
9800 #if defined(TARGET_NR_renameat2)
9801     case TARGET_NR_renameat2:
9802         {
9803             void *p2;
9804             p  = lock_user_string(arg2);
9805             p2 = lock_user_string(arg4);
9806             if (!p || !p2) {
9807                 ret = -TARGET_EFAULT;
9808             } else {
9809                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9810             }
9811             unlock_user(p2, arg4, 0);
9812             unlock_user(p, arg2, 0);
9813         }
9814         return ret;
9815 #endif
9816 #ifdef TARGET_NR_mkdir
9817     case TARGET_NR_mkdir:
9818         if (!(p = lock_user_string(arg1)))
9819             return -TARGET_EFAULT;
9820         ret = get_errno(mkdir(p, arg2));
9821         unlock_user(p, arg1, 0);
9822         return ret;
9823 #endif
9824 #if defined(TARGET_NR_mkdirat)
9825     case TARGET_NR_mkdirat:
9826         if (!(p = lock_user_string(arg2)))
9827             return -TARGET_EFAULT;
9828         ret = get_errno(mkdirat(arg1, p, arg3));
9829         unlock_user(p, arg2, 0);
9830         return ret;
9831 #endif
9832 #ifdef TARGET_NR_rmdir
9833     case TARGET_NR_rmdir:
9834         if (!(p = lock_user_string(arg1)))
9835             return -TARGET_EFAULT;
9836         ret = get_errno(rmdir(p));
9837         unlock_user(p, arg1, 0);
9838         return ret;
9839 #endif
9840     case TARGET_NR_dup:
9841         ret = get_errno(dup(arg1));
9842         if (ret >= 0) {
9843             fd_trans_dup(arg1, ret);
9844         }
9845         return ret;
9846 #ifdef TARGET_NR_pipe
9847     case TARGET_NR_pipe:
9848         return do_pipe(cpu_env, arg1, 0, 0);
9849 #endif
9850 #ifdef TARGET_NR_pipe2
9851     case TARGET_NR_pipe2:
9852         return do_pipe(cpu_env, arg1,
9853                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9854 #endif
9855     case TARGET_NR_times:
9856         {
9857             struct target_tms *tmsp;
9858             struct tms tms;
9859             ret = get_errno(times(&tms));
9860             if (arg1) {
9861                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9862                 if (!tmsp)
9863                     return -TARGET_EFAULT;
9864                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9865                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9866                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9867                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9868             }
9869             if (!is_error(ret))
9870                 ret = host_to_target_clock_t(ret);
9871         }
9872         return ret;
9873     case TARGET_NR_acct:
9874         if (arg1 == 0) {
9875             ret = get_errno(acct(NULL));
9876         } else {
9877             if (!(p = lock_user_string(arg1))) {
9878                 return -TARGET_EFAULT;
9879             }
9880             ret = get_errno(acct(path(p)));
9881             unlock_user(p, arg1, 0);
9882         }
9883         return ret;
9884 #ifdef TARGET_NR_umount2
9885     case TARGET_NR_umount2:
9886         if (!(p = lock_user_string(arg1)))
9887             return -TARGET_EFAULT;
9888         ret = get_errno(umount2(p, arg2));
9889         unlock_user(p, arg1, 0);
9890         return ret;
9891 #endif
9892     case TARGET_NR_ioctl:
9893         return do_ioctl(arg1, arg2, arg3);
9894 #ifdef TARGET_NR_fcntl
9895     case TARGET_NR_fcntl:
9896         return do_fcntl(arg1, arg2, arg3);
9897 #endif
9898     case TARGET_NR_setpgid:
9899         return get_errno(setpgid(arg1, arg2));
9900     case TARGET_NR_umask:
9901         return get_errno(umask(arg1));
9902     case TARGET_NR_chroot:
9903         if (!(p = lock_user_string(arg1)))
9904             return -TARGET_EFAULT;
9905         ret = get_errno(chroot(p));
9906         unlock_user(p, arg1, 0);
9907         return ret;
9908 #ifdef TARGET_NR_dup2
9909     case TARGET_NR_dup2:
9910         ret = get_errno(dup2(arg1, arg2));
9911         if (ret >= 0) {
9912             fd_trans_dup(arg1, arg2);
9913         }
9914         return ret;
9915 #endif
9916 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9917     case TARGET_NR_dup3:
9918     {
9919         int host_flags;
9920 
9921         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9922             return -EINVAL;
9923         }
9924         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9925         ret = get_errno(dup3(arg1, arg2, host_flags));
9926         if (ret >= 0) {
9927             fd_trans_dup(arg1, arg2);
9928         }
9929         return ret;
9930     }
9931 #endif
9932 #ifdef TARGET_NR_getppid /* not on alpha */
9933     case TARGET_NR_getppid:
9934         return get_errno(getppid());
9935 #endif
9936 #ifdef TARGET_NR_getpgrp
9937     case TARGET_NR_getpgrp:
9938         return get_errno(getpgrp());
9939 #endif
9940     case TARGET_NR_setsid:
9941         return get_errno(setsid());
9942 #ifdef TARGET_NR_sigaction
9943     case TARGET_NR_sigaction:
9944         {
9945 #if defined(TARGET_MIPS)
9946 	    struct target_sigaction act, oact, *pact, *old_act;
9947 
9948 	    if (arg2) {
9949                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9950                     return -TARGET_EFAULT;
9951 		act._sa_handler = old_act->_sa_handler;
9952 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9953 		act.sa_flags = old_act->sa_flags;
9954 		unlock_user_struct(old_act, arg2, 0);
9955 		pact = &act;
9956 	    } else {
9957 		pact = NULL;
9958 	    }
9959 
9960         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9961 
9962 	    if (!is_error(ret) && arg3) {
9963                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9964                     return -TARGET_EFAULT;
9965 		old_act->_sa_handler = oact._sa_handler;
9966 		old_act->sa_flags = oact.sa_flags;
9967 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9968 		old_act->sa_mask.sig[1] = 0;
9969 		old_act->sa_mask.sig[2] = 0;
9970 		old_act->sa_mask.sig[3] = 0;
9971 		unlock_user_struct(old_act, arg3, 1);
9972 	    }
9973 #else
9974             struct target_old_sigaction *old_act;
9975             struct target_sigaction act, oact, *pact;
9976             if (arg2) {
9977                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9978                     return -TARGET_EFAULT;
9979                 act._sa_handler = old_act->_sa_handler;
9980                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9981                 act.sa_flags = old_act->sa_flags;
9982 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9983                 act.sa_restorer = old_act->sa_restorer;
9984 #endif
9985                 unlock_user_struct(old_act, arg2, 0);
9986                 pact = &act;
9987             } else {
9988                 pact = NULL;
9989             }
9990             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9991             if (!is_error(ret) && arg3) {
9992                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9993                     return -TARGET_EFAULT;
9994                 old_act->_sa_handler = oact._sa_handler;
9995                 old_act->sa_mask = oact.sa_mask.sig[0];
9996                 old_act->sa_flags = oact.sa_flags;
9997 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9998                 old_act->sa_restorer = oact.sa_restorer;
9999 #endif
10000                 unlock_user_struct(old_act, arg3, 1);
10001             }
10002 #endif
10003         }
10004         return ret;
10005 #endif
10006     case TARGET_NR_rt_sigaction:
10007         {
10008             /*
10009              * For Alpha and SPARC this is a 5 argument syscall, with
10010              * a 'restorer' parameter which must be copied into the
10011              * sa_restorer field of the sigaction struct.
10012              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10013              * and arg5 is the sigsetsize.
10014              */
10015 #if defined(TARGET_ALPHA)
10016             target_ulong sigsetsize = arg4;
10017             target_ulong restorer = arg5;
10018 #elif defined(TARGET_SPARC)
10019             target_ulong restorer = arg4;
10020             target_ulong sigsetsize = arg5;
10021 #else
10022             target_ulong sigsetsize = arg4;
10023             target_ulong restorer = 0;
10024 #endif
10025             struct target_sigaction *act = NULL;
10026             struct target_sigaction *oact = NULL;
10027 
10028             if (sigsetsize != sizeof(target_sigset_t)) {
10029                 return -TARGET_EINVAL;
10030             }
10031             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10032                 return -TARGET_EFAULT;
10033             }
10034             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10035                 ret = -TARGET_EFAULT;
10036             } else {
10037                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10038                 if (oact) {
10039                     unlock_user_struct(oact, arg3, 1);
10040                 }
10041             }
10042             if (act) {
10043                 unlock_user_struct(act, arg2, 0);
10044             }
10045         }
10046         return ret;
10047 #ifdef TARGET_NR_sgetmask /* not on alpha */
10048     case TARGET_NR_sgetmask:
10049         {
10050             sigset_t cur_set;
10051             abi_ulong target_set;
10052             ret = do_sigprocmask(0, NULL, &cur_set);
10053             if (!ret) {
10054                 host_to_target_old_sigset(&target_set, &cur_set);
10055                 ret = target_set;
10056             }
10057         }
10058         return ret;
10059 #endif
10060 #ifdef TARGET_NR_ssetmask /* not on alpha */
10061     case TARGET_NR_ssetmask:
10062         {
10063             sigset_t set, oset;
10064             abi_ulong target_set = arg1;
10065             target_to_host_old_sigset(&set, &target_set);
10066             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10067             if (!ret) {
10068                 host_to_target_old_sigset(&target_set, &oset);
10069                 ret = target_set;
10070             }
10071         }
10072         return ret;
10073 #endif
10074 #ifdef TARGET_NR_sigprocmask
10075     case TARGET_NR_sigprocmask:
10076         {
10077 #if defined(TARGET_ALPHA)
10078             sigset_t set, oldset;
10079             abi_ulong mask;
10080             int how;
10081 
10082             switch (arg1) {
10083             case TARGET_SIG_BLOCK:
10084                 how = SIG_BLOCK;
10085                 break;
10086             case TARGET_SIG_UNBLOCK:
10087                 how = SIG_UNBLOCK;
10088                 break;
10089             case TARGET_SIG_SETMASK:
10090                 how = SIG_SETMASK;
10091                 break;
10092             default:
10093                 return -TARGET_EINVAL;
10094             }
10095             mask = arg2;
10096             target_to_host_old_sigset(&set, &mask);
10097 
10098             ret = do_sigprocmask(how, &set, &oldset);
10099             if (!is_error(ret)) {
10100                 host_to_target_old_sigset(&mask, &oldset);
10101                 ret = mask;
10102                 cpu_env->ir[IR_V0] = 0; /* force no error */
10103             }
10104 #else
10105             sigset_t set, oldset, *set_ptr;
10106             int how;
10107 
10108             if (arg2) {
10109                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10110                 if (!p) {
10111                     return -TARGET_EFAULT;
10112                 }
10113                 target_to_host_old_sigset(&set, p);
10114                 unlock_user(p, arg2, 0);
10115                 set_ptr = &set;
10116                 switch (arg1) {
10117                 case TARGET_SIG_BLOCK:
10118                     how = SIG_BLOCK;
10119                     break;
10120                 case TARGET_SIG_UNBLOCK:
10121                     how = SIG_UNBLOCK;
10122                     break;
10123                 case TARGET_SIG_SETMASK:
10124                     how = SIG_SETMASK;
10125                     break;
10126                 default:
10127                     return -TARGET_EINVAL;
10128                 }
10129             } else {
10130                 how = 0;
10131                 set_ptr = NULL;
10132             }
10133             ret = do_sigprocmask(how, set_ptr, &oldset);
10134             if (!is_error(ret) && arg3) {
10135                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10136                     return -TARGET_EFAULT;
10137                 host_to_target_old_sigset(p, &oldset);
10138                 unlock_user(p, arg3, sizeof(target_sigset_t));
10139             }
10140 #endif
10141         }
10142         return ret;
10143 #endif
10144     case TARGET_NR_rt_sigprocmask:
10145         {
10146             int how = arg1;
10147             sigset_t set, oldset, *set_ptr;
10148 
10149             if (arg4 != sizeof(target_sigset_t)) {
10150                 return -TARGET_EINVAL;
10151             }
10152 
10153             if (arg2) {
10154                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10155                 if (!p) {
10156                     return -TARGET_EFAULT;
10157                 }
10158                 target_to_host_sigset(&set, p);
10159                 unlock_user(p, arg2, 0);
10160                 set_ptr = &set;
10161                 switch(how) {
10162                 case TARGET_SIG_BLOCK:
10163                     how = SIG_BLOCK;
10164                     break;
10165                 case TARGET_SIG_UNBLOCK:
10166                     how = SIG_UNBLOCK;
10167                     break;
10168                 case TARGET_SIG_SETMASK:
10169                     how = SIG_SETMASK;
10170                     break;
10171                 default:
10172                     return -TARGET_EINVAL;
10173                 }
10174             } else {
10175                 how = 0;
10176                 set_ptr = NULL;
10177             }
10178             ret = do_sigprocmask(how, set_ptr, &oldset);
10179             if (!is_error(ret) && arg3) {
10180                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10181                     return -TARGET_EFAULT;
10182                 host_to_target_sigset(p, &oldset);
10183                 unlock_user(p, arg3, sizeof(target_sigset_t));
10184             }
10185         }
10186         return ret;
10187 #ifdef TARGET_NR_sigpending
10188     case TARGET_NR_sigpending:
10189         {
10190             sigset_t set;
10191             ret = get_errno(sigpending(&set));
10192             if (!is_error(ret)) {
10193                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10194                     return -TARGET_EFAULT;
10195                 host_to_target_old_sigset(p, &set);
10196                 unlock_user(p, arg1, sizeof(target_sigset_t));
10197             }
10198         }
10199         return ret;
10200 #endif
10201     case TARGET_NR_rt_sigpending:
10202         {
10203             sigset_t set;
10204 
10205             /* Yes, this check is >, not != like most. We follow the kernel's
10206              * logic and it does it like this because it implements
10207              * NR_sigpending through the same code path, and in that case
10208              * the old_sigset_t is smaller in size.
10209              */
10210             if (arg2 > sizeof(target_sigset_t)) {
10211                 return -TARGET_EINVAL;
10212             }
10213 
10214             ret = get_errno(sigpending(&set));
10215             if (!is_error(ret)) {
10216                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10217                     return -TARGET_EFAULT;
10218                 host_to_target_sigset(p, &set);
10219                 unlock_user(p, arg1, sizeof(target_sigset_t));
10220             }
10221         }
10222         return ret;
10223 #ifdef TARGET_NR_sigsuspend
10224     case TARGET_NR_sigsuspend:
10225         {
10226             sigset_t *set;
10227 
10228 #if defined(TARGET_ALPHA)
10229             TaskState *ts = get_task_state(cpu);
10230             /* target_to_host_old_sigset will bswap back */
10231             abi_ulong mask = tswapal(arg1);
10232             set = &ts->sigsuspend_mask;
10233             target_to_host_old_sigset(set, &mask);
10234 #else
10235             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10236             if (ret != 0) {
10237                 return ret;
10238             }
10239 #endif
10240             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10241             finish_sigsuspend_mask(ret);
10242         }
10243         return ret;
10244 #endif
10245     case TARGET_NR_rt_sigsuspend:
10246         {
10247             sigset_t *set;
10248 
10249             ret = process_sigsuspend_mask(&set, arg1, arg2);
10250             if (ret != 0) {
10251                 return ret;
10252             }
10253             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10254             finish_sigsuspend_mask(ret);
10255         }
10256         return ret;
10257 #ifdef TARGET_NR_rt_sigtimedwait
10258     case TARGET_NR_rt_sigtimedwait:
10259         {
10260             sigset_t set;
10261             struct timespec uts, *puts;
10262             siginfo_t uinfo;
10263 
10264             if (arg4 != sizeof(target_sigset_t)) {
10265                 return -TARGET_EINVAL;
10266             }
10267 
10268             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10269                 return -TARGET_EFAULT;
10270             target_to_host_sigset(&set, p);
10271             unlock_user(p, arg1, 0);
10272             if (arg3) {
10273                 puts = &uts;
10274                 if (target_to_host_timespec(puts, arg3)) {
10275                     return -TARGET_EFAULT;
10276                 }
10277             } else {
10278                 puts = NULL;
10279             }
10280             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10281                                                  SIGSET_T_SIZE));
10282             if (!is_error(ret)) {
10283                 if (arg2) {
10284                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10285                                   0);
10286                     if (!p) {
10287                         return -TARGET_EFAULT;
10288                     }
10289                     host_to_target_siginfo(p, &uinfo);
10290                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10291                 }
10292                 ret = host_to_target_signal(ret);
10293             }
10294         }
10295         return ret;
10296 #endif
10297 #ifdef TARGET_NR_rt_sigtimedwait_time64
10298     case TARGET_NR_rt_sigtimedwait_time64:
10299         {
10300             sigset_t set;
10301             struct timespec uts, *puts;
10302             siginfo_t uinfo;
10303 
10304             if (arg4 != sizeof(target_sigset_t)) {
10305                 return -TARGET_EINVAL;
10306             }
10307 
10308             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10309             if (!p) {
10310                 return -TARGET_EFAULT;
10311             }
10312             target_to_host_sigset(&set, p);
10313             unlock_user(p, arg1, 0);
10314             if (arg3) {
10315                 puts = &uts;
10316                 if (target_to_host_timespec64(puts, arg3)) {
10317                     return -TARGET_EFAULT;
10318                 }
10319             } else {
10320                 puts = NULL;
10321             }
10322             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10323                                                  SIGSET_T_SIZE));
10324             if (!is_error(ret)) {
10325                 if (arg2) {
10326                     p = lock_user(VERIFY_WRITE, arg2,
10327                                   sizeof(target_siginfo_t), 0);
10328                     if (!p) {
10329                         return -TARGET_EFAULT;
10330                     }
10331                     host_to_target_siginfo(p, &uinfo);
10332                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10333                 }
10334                 ret = host_to_target_signal(ret);
10335             }
10336         }
10337         return ret;
10338 #endif
10339     case TARGET_NR_rt_sigqueueinfo:
10340         {
10341             siginfo_t uinfo;
10342 
10343             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10344             if (!p) {
10345                 return -TARGET_EFAULT;
10346             }
10347             target_to_host_siginfo(&uinfo, p);
10348             unlock_user(p, arg3, 0);
10349             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10350         }
10351         return ret;
10352     case TARGET_NR_rt_tgsigqueueinfo:
10353         {
10354             siginfo_t uinfo;
10355 
10356             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10357             if (!p) {
10358                 return -TARGET_EFAULT;
10359             }
10360             target_to_host_siginfo(&uinfo, p);
10361             unlock_user(p, arg4, 0);
10362             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10363         }
10364         return ret;
10365 #ifdef TARGET_NR_sigreturn
10366     case TARGET_NR_sigreturn:
10367         if (block_signals()) {
10368             return -QEMU_ERESTARTSYS;
10369         }
10370         return do_sigreturn(cpu_env);
10371 #endif
10372     case TARGET_NR_rt_sigreturn:
10373         if (block_signals()) {
10374             return -QEMU_ERESTARTSYS;
10375         }
10376         return do_rt_sigreturn(cpu_env);
10377     case TARGET_NR_sethostname:
10378         if (!(p = lock_user_string(arg1)))
10379             return -TARGET_EFAULT;
10380         ret = get_errno(sethostname(p, arg2));
10381         unlock_user(p, arg1, 0);
10382         return ret;
10383 #ifdef TARGET_NR_setrlimit
10384     case TARGET_NR_setrlimit:
10385         {
10386             int resource = target_to_host_resource(arg1);
10387             struct target_rlimit *target_rlim;
10388             struct rlimit rlim;
10389             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10390                 return -TARGET_EFAULT;
10391             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10392             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10393             unlock_user_struct(target_rlim, arg2, 0);
10394             /*
10395              * If we just passed through resource limit settings for memory then
10396              * they would also apply to QEMU's own allocations, and QEMU will
10397              * crash or hang or die if its allocations fail. Ideally we would
10398              * track the guest allocations in QEMU and apply the limits ourselves.
10399              * For now, just tell the guest the call succeeded but don't actually
10400              * limit anything.
10401              */
10402             if (resource != RLIMIT_AS &&
10403                 resource != RLIMIT_DATA &&
10404                 resource != RLIMIT_STACK) {
10405                 return get_errno(setrlimit(resource, &rlim));
10406             } else {
10407                 return 0;
10408             }
10409         }
10410 #endif
10411 #ifdef TARGET_NR_getrlimit
10412     case TARGET_NR_getrlimit:
10413         {
10414             int resource = target_to_host_resource(arg1);
10415             struct target_rlimit *target_rlim;
10416             struct rlimit rlim;
10417 
10418             ret = get_errno(getrlimit(resource, &rlim));
10419             if (!is_error(ret)) {
10420                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10421                     return -TARGET_EFAULT;
10422                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10423                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10424                 unlock_user_struct(target_rlim, arg2, 1);
10425             }
10426         }
10427         return ret;
10428 #endif
10429     case TARGET_NR_getrusage:
10430         {
10431             struct rusage rusage;
10432             ret = get_errno(getrusage(arg1, &rusage));
10433             if (!is_error(ret)) {
10434                 ret = host_to_target_rusage(arg2, &rusage);
10435             }
10436         }
10437         return ret;
10438 #if defined(TARGET_NR_gettimeofday)
10439     case TARGET_NR_gettimeofday:
10440         {
10441             struct timeval tv;
10442             struct timezone tz;
10443 
10444             ret = get_errno(gettimeofday(&tv, &tz));
10445             if (!is_error(ret)) {
10446                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10447                     return -TARGET_EFAULT;
10448                 }
10449                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10450                     return -TARGET_EFAULT;
10451                 }
10452             }
10453         }
10454         return ret;
10455 #endif
10456 #if defined(TARGET_NR_settimeofday)
10457     case TARGET_NR_settimeofday:
10458         {
10459             struct timeval tv, *ptv = NULL;
10460             struct timezone tz, *ptz = NULL;
10461 
10462             if (arg1) {
10463                 if (copy_from_user_timeval(&tv, arg1)) {
10464                     return -TARGET_EFAULT;
10465                 }
10466                 ptv = &tv;
10467             }
10468 
10469             if (arg2) {
10470                 if (copy_from_user_timezone(&tz, arg2)) {
10471                     return -TARGET_EFAULT;
10472                 }
10473                 ptz = &tz;
10474             }
10475 
10476             return get_errno(settimeofday(ptv, ptz));
10477         }
10478 #endif
10479 #if defined(TARGET_NR_select)
10480     case TARGET_NR_select:
10481 #if defined(TARGET_WANT_NI_OLD_SELECT)
10482         /* some architectures used to have old_select here
10483          * but now ENOSYS it.
10484          */
10485         ret = -TARGET_ENOSYS;
10486 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10487         ret = do_old_select(arg1);
10488 #else
10489         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10490 #endif
10491         return ret;
10492 #endif
10493 #ifdef TARGET_NR_pselect6
10494     case TARGET_NR_pselect6:
10495         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10496 #endif
10497 #ifdef TARGET_NR_pselect6_time64
10498     case TARGET_NR_pselect6_time64:
10499         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10500 #endif
10501 #ifdef TARGET_NR_symlink
10502     case TARGET_NR_symlink:
10503         {
10504             void *p2;
10505             p = lock_user_string(arg1);
10506             p2 = lock_user_string(arg2);
10507             if (!p || !p2)
10508                 ret = -TARGET_EFAULT;
10509             else
10510                 ret = get_errno(symlink(p, p2));
10511             unlock_user(p2, arg2, 0);
10512             unlock_user(p, arg1, 0);
10513         }
10514         return ret;
10515 #endif
10516 #if defined(TARGET_NR_symlinkat)
10517     case TARGET_NR_symlinkat:
10518         {
10519             void *p2;
10520             p  = lock_user_string(arg1);
10521             p2 = lock_user_string(arg3);
10522             if (!p || !p2)
10523                 ret = -TARGET_EFAULT;
10524             else
10525                 ret = get_errno(symlinkat(p, arg2, p2));
10526             unlock_user(p2, arg3, 0);
10527             unlock_user(p, arg1, 0);
10528         }
10529         return ret;
10530 #endif
10531 #ifdef TARGET_NR_readlink
10532     case TARGET_NR_readlink:
10533         {
10534             void *p2;
10535             p = lock_user_string(arg1);
10536             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10537             ret = get_errno(do_guest_readlink(p, p2, arg3));
10538             unlock_user(p2, arg2, ret);
10539             unlock_user(p, arg1, 0);
10540         }
10541         return ret;
10542 #endif
10543 #if defined(TARGET_NR_readlinkat)
10544     case TARGET_NR_readlinkat:
10545         {
10546             void *p2;
10547             p  = lock_user_string(arg2);
10548             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10549             if (!p || !p2) {
10550                 ret = -TARGET_EFAULT;
10551             } else if (!arg4) {
10552                 /* Short circuit this for the magic exe check. */
10553                 ret = -TARGET_EINVAL;
10554             } else if (is_proc_myself((const char *)p, "exe")) {
10555                 /*
10556                  * Don't worry about sign mismatch as earlier mapping
10557                  * logic would have thrown a bad address error.
10558                  */
10559                 ret = MIN(strlen(exec_path), arg4);
10560                 /* We cannot NUL terminate the string. */
10561                 memcpy(p2, exec_path, ret);
10562             } else {
10563                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10564             }
10565             unlock_user(p2, arg3, ret);
10566             unlock_user(p, arg2, 0);
10567         }
10568         return ret;
10569 #endif
10570 #ifdef TARGET_NR_swapon
10571     case TARGET_NR_swapon:
10572         if (!(p = lock_user_string(arg1)))
10573             return -TARGET_EFAULT;
10574         ret = get_errno(swapon(p, arg2));
10575         unlock_user(p, arg1, 0);
10576         return ret;
10577 #endif
10578     case TARGET_NR_reboot:
10579         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10580            /* arg4 must be ignored in all other cases */
10581            p = lock_user_string(arg4);
10582            if (!p) {
10583                return -TARGET_EFAULT;
10584            }
10585            ret = get_errno(reboot(arg1, arg2, arg3, p));
10586            unlock_user(p, arg4, 0);
10587         } else {
10588            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10589         }
10590         return ret;
10591 #ifdef TARGET_NR_mmap
10592     case TARGET_NR_mmap:
10593 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10594         {
10595             abi_ulong *v;
10596             abi_ulong v1, v2, v3, v4, v5, v6;
10597             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10598                 return -TARGET_EFAULT;
10599             v1 = tswapal(v[0]);
10600             v2 = tswapal(v[1]);
10601             v3 = tswapal(v[2]);
10602             v4 = tswapal(v[3]);
10603             v5 = tswapal(v[4]);
10604             v6 = tswapal(v[5]);
10605             unlock_user(v, arg1, 0);
10606             return do_mmap(v1, v2, v3, v4, v5, v6);
10607         }
10608 #else
10609         /* mmap pointers are always untagged */
10610         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10611 #endif
10612 #endif
10613 #ifdef TARGET_NR_mmap2
10614     case TARGET_NR_mmap2:
10615 #ifndef MMAP_SHIFT
10616 #define MMAP_SHIFT 12
10617 #endif
10618         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10619                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10620 #endif
10621     case TARGET_NR_munmap:
10622         arg1 = cpu_untagged_addr(cpu, arg1);
10623         return get_errno(target_munmap(arg1, arg2));
10624     case TARGET_NR_mprotect:
10625         arg1 = cpu_untagged_addr(cpu, arg1);
10626         {
10627             TaskState *ts = get_task_state(cpu);
10628             /* Special hack to detect libc making the stack executable.  */
10629             if ((arg3 & PROT_GROWSDOWN)
10630                 && arg1 >= ts->info->stack_limit
10631                 && arg1 <= ts->info->start_stack) {
10632                 arg3 &= ~PROT_GROWSDOWN;
10633                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10634                 arg1 = ts->info->stack_limit;
10635             }
10636         }
10637         return get_errno(target_mprotect(arg1, arg2, arg3));
10638 #ifdef TARGET_NR_mremap
10639     case TARGET_NR_mremap:
10640         arg1 = cpu_untagged_addr(cpu, arg1);
10641         /* mremap new_addr (arg5) is always untagged */
10642         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10643 #endif
10644         /* ??? msync/mlock/munlock are broken for softmmu.  */
10645 #ifdef TARGET_NR_msync
10646     case TARGET_NR_msync:
10647         return get_errno(msync(g2h(cpu, arg1), arg2,
10648                                target_to_host_msync_arg(arg3)));
10649 #endif
10650 #ifdef TARGET_NR_mlock
10651     case TARGET_NR_mlock:
10652         return get_errno(mlock(g2h(cpu, arg1), arg2));
10653 #endif
10654 #ifdef TARGET_NR_munlock
10655     case TARGET_NR_munlock:
10656         return get_errno(munlock(g2h(cpu, arg1), arg2));
10657 #endif
10658 #ifdef TARGET_NR_mlockall
10659     case TARGET_NR_mlockall:
10660         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10661 #endif
10662 #ifdef TARGET_NR_munlockall
10663     case TARGET_NR_munlockall:
10664         return get_errno(munlockall());
10665 #endif
10666 #ifdef TARGET_NR_truncate
10667     case TARGET_NR_truncate:
10668         if (!(p = lock_user_string(arg1)))
10669             return -TARGET_EFAULT;
10670         ret = get_errno(truncate(p, arg2));
10671         unlock_user(p, arg1, 0);
10672         return ret;
10673 #endif
10674 #ifdef TARGET_NR_ftruncate
10675     case TARGET_NR_ftruncate:
10676         return get_errno(ftruncate(arg1, arg2));
10677 #endif
10678     case TARGET_NR_fchmod:
10679         return get_errno(fchmod(arg1, arg2));
10680 #if defined(TARGET_NR_fchmodat)
10681     case TARGET_NR_fchmodat:
10682         if (!(p = lock_user_string(arg2)))
10683             return -TARGET_EFAULT;
10684         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10685         unlock_user(p, arg2, 0);
10686         return ret;
10687 #endif
10688     case TARGET_NR_getpriority:
10689         /* Note that negative values are valid for getpriority, so we must
10690            differentiate based on errno settings.  */
10691         errno = 0;
10692         ret = getpriority(arg1, arg2);
10693         if (ret == -1 && errno != 0) {
10694             return -host_to_target_errno(errno);
10695         }
10696 #ifdef TARGET_ALPHA
10697         /* Return value is the unbiased priority.  Signal no error.  */
10698         cpu_env->ir[IR_V0] = 0;
10699 #else
10700         /* Return value is a biased priority to avoid negative numbers.  */
10701         ret = 20 - ret;
10702 #endif
10703         return ret;
10704     case TARGET_NR_setpriority:
10705         return get_errno(setpriority(arg1, arg2, arg3));
10706 #ifdef TARGET_NR_statfs
10707     case TARGET_NR_statfs:
10708         if (!(p = lock_user_string(arg1))) {
10709             return -TARGET_EFAULT;
10710         }
10711         ret = get_errno(statfs(path(p), &stfs));
10712         unlock_user(p, arg1, 0);
10713     convert_statfs:
10714         if (!is_error(ret)) {
10715             struct target_statfs *target_stfs;
10716 
10717             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10718                 return -TARGET_EFAULT;
10719             __put_user(stfs.f_type, &target_stfs->f_type);
10720             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10721             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10722             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10723             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10724             __put_user(stfs.f_files, &target_stfs->f_files);
10725             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10726             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10727             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10728             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10729             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10730 #ifdef _STATFS_F_FLAGS
10731             __put_user(stfs.f_flags, &target_stfs->f_flags);
10732 #else
10733             __put_user(0, &target_stfs->f_flags);
10734 #endif
10735             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10736             unlock_user_struct(target_stfs, arg2, 1);
10737         }
10738         return ret;
10739 #endif
10740 #ifdef TARGET_NR_fstatfs
10741     case TARGET_NR_fstatfs:
10742         ret = get_errno(fstatfs(arg1, &stfs));
10743         goto convert_statfs;
10744 #endif
10745 #ifdef TARGET_NR_statfs64
10746     case TARGET_NR_statfs64:
10747         if (!(p = lock_user_string(arg1))) {
10748             return -TARGET_EFAULT;
10749         }
10750         ret = get_errno(statfs(path(p), &stfs));
10751         unlock_user(p, arg1, 0);
10752     convert_statfs64:
10753         if (!is_error(ret)) {
10754             struct target_statfs64 *target_stfs;
10755 
10756             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10757                 return -TARGET_EFAULT;
10758             __put_user(stfs.f_type, &target_stfs->f_type);
10759             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10760             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10761             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10762             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10763             __put_user(stfs.f_files, &target_stfs->f_files);
10764             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10765             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10766             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10767             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10768             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10769 #ifdef _STATFS_F_FLAGS
10770             __put_user(stfs.f_flags, &target_stfs->f_flags);
10771 #else
10772             __put_user(0, &target_stfs->f_flags);
10773 #endif
10774             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10775             unlock_user_struct(target_stfs, arg3, 1);
10776         }
10777         return ret;
10778     case TARGET_NR_fstatfs64:
10779         ret = get_errno(fstatfs(arg1, &stfs));
10780         goto convert_statfs64;
10781 #endif
10782 #ifdef TARGET_NR_socketcall
10783     case TARGET_NR_socketcall:
10784         return do_socketcall(arg1, arg2);
10785 #endif
10786 #ifdef TARGET_NR_accept
10787     case TARGET_NR_accept:
10788         return do_accept4(arg1, arg2, arg3, 0);
10789 #endif
10790 #ifdef TARGET_NR_accept4
10791     case TARGET_NR_accept4:
10792         return do_accept4(arg1, arg2, arg3, arg4);
10793 #endif
10794 #ifdef TARGET_NR_bind
10795     case TARGET_NR_bind:
10796         return do_bind(arg1, arg2, arg3);
10797 #endif
10798 #ifdef TARGET_NR_connect
10799     case TARGET_NR_connect:
10800         return do_connect(arg1, arg2, arg3);
10801 #endif
10802 #ifdef TARGET_NR_getpeername
10803     case TARGET_NR_getpeername:
10804         return do_getpeername(arg1, arg2, arg3);
10805 #endif
10806 #ifdef TARGET_NR_getsockname
10807     case TARGET_NR_getsockname:
10808         return do_getsockname(arg1, arg2, arg3);
10809 #endif
10810 #ifdef TARGET_NR_getsockopt
10811     case TARGET_NR_getsockopt:
10812         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10813 #endif
10814 #ifdef TARGET_NR_listen
10815     case TARGET_NR_listen:
10816         return get_errno(listen(arg1, arg2));
10817 #endif
10818 #ifdef TARGET_NR_recv
10819     case TARGET_NR_recv:
10820         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10821 #endif
10822 #ifdef TARGET_NR_recvfrom
10823     case TARGET_NR_recvfrom:
10824         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10825 #endif
10826 #ifdef TARGET_NR_recvmsg
10827     case TARGET_NR_recvmsg:
10828         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10829 #endif
10830 #ifdef TARGET_NR_send
10831     case TARGET_NR_send:
10832         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10833 #endif
10834 #ifdef TARGET_NR_sendmsg
10835     case TARGET_NR_sendmsg:
10836         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10837 #endif
10838 #ifdef TARGET_NR_sendmmsg
10839     case TARGET_NR_sendmmsg:
10840         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10841 #endif
10842 #ifdef TARGET_NR_recvmmsg
10843     case TARGET_NR_recvmmsg:
10844         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10845 #endif
10846 #ifdef TARGET_NR_sendto
10847     case TARGET_NR_sendto:
10848         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10849 #endif
10850 #ifdef TARGET_NR_shutdown
10851     case TARGET_NR_shutdown:
10852         return get_errno(shutdown(arg1, arg2));
10853 #endif
10854 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10855     case TARGET_NR_getrandom:
10856         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10857         if (!p) {
10858             return -TARGET_EFAULT;
10859         }
10860         ret = get_errno(getrandom(p, arg2, arg3));
10861         unlock_user(p, arg1, ret);
10862         return ret;
10863 #endif
10864 #ifdef TARGET_NR_socket
10865     case TARGET_NR_socket:
10866         return do_socket(arg1, arg2, arg3);
10867 #endif
10868 #ifdef TARGET_NR_socketpair
10869     case TARGET_NR_socketpair:
10870         return do_socketpair(arg1, arg2, arg3, arg4);
10871 #endif
10872 #ifdef TARGET_NR_setsockopt
10873     case TARGET_NR_setsockopt:
10874         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10875 #endif
10876 #if defined(TARGET_NR_syslog)
10877     case TARGET_NR_syslog:
10878         {
10879             int len = arg2;
10880 
10881             switch (arg1) {
10882             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10883             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10884             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10885             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10886             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10887             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10888             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10889             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10890                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10891             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10892             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10893             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10894                 {
10895                     if (len < 0) {
10896                         return -TARGET_EINVAL;
10897                     }
10898                     if (len == 0) {
10899                         return 0;
10900                     }
10901                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10902                     if (!p) {
10903                         return -TARGET_EFAULT;
10904                     }
10905                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10906                     unlock_user(p, arg2, arg3);
10907                 }
10908                 return ret;
10909             default:
10910                 return -TARGET_EINVAL;
10911             }
10912         }
10913         break;
10914 #endif
10915     case TARGET_NR_setitimer:
10916         {
10917             struct itimerval value, ovalue, *pvalue;
10918 
10919             if (arg2) {
10920                 pvalue = &value;
10921                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10922                     || copy_from_user_timeval(&pvalue->it_value,
10923                                               arg2 + sizeof(struct target_timeval)))
10924                     return -TARGET_EFAULT;
10925             } else {
10926                 pvalue = NULL;
10927             }
10928             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10929             if (!is_error(ret) && arg3) {
10930                 if (copy_to_user_timeval(arg3,
10931                                          &ovalue.it_interval)
10932                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10933                                             &ovalue.it_value))
10934                     return -TARGET_EFAULT;
10935             }
10936         }
10937         return ret;
10938     case TARGET_NR_getitimer:
10939         {
10940             struct itimerval value;
10941 
10942             ret = get_errno(getitimer(arg1, &value));
10943             if (!is_error(ret) && arg2) {
10944                 if (copy_to_user_timeval(arg2,
10945                                          &value.it_interval)
10946                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10947                                             &value.it_value))
10948                     return -TARGET_EFAULT;
10949             }
10950         }
10951         return ret;
10952 #ifdef TARGET_NR_stat
10953     case TARGET_NR_stat:
10954         if (!(p = lock_user_string(arg1))) {
10955             return -TARGET_EFAULT;
10956         }
10957         ret = get_errno(stat(path(p), &st));
10958         unlock_user(p, arg1, 0);
10959         goto do_stat;
10960 #endif
10961 #ifdef TARGET_NR_lstat
10962     case TARGET_NR_lstat:
10963         if (!(p = lock_user_string(arg1))) {
10964             return -TARGET_EFAULT;
10965         }
10966         ret = get_errno(lstat(path(p), &st));
10967         unlock_user(p, arg1, 0);
10968         goto do_stat;
10969 #endif
10970 #ifdef TARGET_NR_fstat
10971     case TARGET_NR_fstat:
10972         {
10973             ret = get_errno(fstat(arg1, &st));
10974 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10975         do_stat:
10976 #endif
10977             if (!is_error(ret)) {
10978                 struct target_stat *target_st;
10979 
10980                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10981                     return -TARGET_EFAULT;
10982                 memset(target_st, 0, sizeof(*target_st));
10983                 __put_user(st.st_dev, &target_st->st_dev);
10984                 __put_user(st.st_ino, &target_st->st_ino);
10985                 __put_user(st.st_mode, &target_st->st_mode);
10986                 __put_user(st.st_uid, &target_st->st_uid);
10987                 __put_user(st.st_gid, &target_st->st_gid);
10988                 __put_user(st.st_nlink, &target_st->st_nlink);
10989                 __put_user(st.st_rdev, &target_st->st_rdev);
10990                 __put_user(st.st_size, &target_st->st_size);
10991                 __put_user(st.st_blksize, &target_st->st_blksize);
10992                 __put_user(st.st_blocks, &target_st->st_blocks);
10993                 __put_user(st.st_atime, &target_st->target_st_atime);
10994                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10995                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10996 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10997                 __put_user(st.st_atim.tv_nsec,
10998                            &target_st->target_st_atime_nsec);
10999                 __put_user(st.st_mtim.tv_nsec,
11000                            &target_st->target_st_mtime_nsec);
11001                 __put_user(st.st_ctim.tv_nsec,
11002                            &target_st->target_st_ctime_nsec);
11003 #endif
11004                 unlock_user_struct(target_st, arg2, 1);
11005             }
11006         }
11007         return ret;
11008 #endif
11009     case TARGET_NR_vhangup:
11010         return get_errno(vhangup());
11011 #ifdef TARGET_NR_syscall
11012     case TARGET_NR_syscall:
11013         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11014                           arg6, arg7, arg8, 0);
11015 #endif
11016 #if defined(TARGET_NR_wait4)
11017     case TARGET_NR_wait4:
11018         {
11019             int status;
11020             abi_long status_ptr = arg2;
11021             struct rusage rusage, *rusage_ptr;
11022             abi_ulong target_rusage = arg4;
11023             abi_long rusage_err;
11024             if (target_rusage)
11025                 rusage_ptr = &rusage;
11026             else
11027                 rusage_ptr = NULL;
11028             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11029             if (!is_error(ret)) {
11030                 if (status_ptr && ret) {
11031                     status = host_to_target_waitstatus(status);
11032                     if (put_user_s32(status, status_ptr))
11033                         return -TARGET_EFAULT;
11034                 }
11035                 if (target_rusage) {
11036                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11037                     if (rusage_err) {
11038                         ret = rusage_err;
11039                     }
11040                 }
11041             }
11042         }
11043         return ret;
11044 #endif
11045 #ifdef TARGET_NR_swapoff
11046     case TARGET_NR_swapoff:
11047         if (!(p = lock_user_string(arg1)))
11048             return -TARGET_EFAULT;
11049         ret = get_errno(swapoff(p));
11050         unlock_user(p, arg1, 0);
11051         return ret;
11052 #endif
11053     case TARGET_NR_sysinfo:
11054         {
11055             struct target_sysinfo *target_value;
11056             struct sysinfo value;
11057             ret = get_errno(sysinfo(&value));
11058             if (!is_error(ret) && arg1)
11059             {
11060                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11061                     return -TARGET_EFAULT;
11062                 __put_user(value.uptime, &target_value->uptime);
11063                 __put_user(value.loads[0], &target_value->loads[0]);
11064                 __put_user(value.loads[1], &target_value->loads[1]);
11065                 __put_user(value.loads[2], &target_value->loads[2]);
11066                 __put_user(value.totalram, &target_value->totalram);
11067                 __put_user(value.freeram, &target_value->freeram);
11068                 __put_user(value.sharedram, &target_value->sharedram);
11069                 __put_user(value.bufferram, &target_value->bufferram);
11070                 __put_user(value.totalswap, &target_value->totalswap);
11071                 __put_user(value.freeswap, &target_value->freeswap);
11072                 __put_user(value.procs, &target_value->procs);
11073                 __put_user(value.totalhigh, &target_value->totalhigh);
11074                 __put_user(value.freehigh, &target_value->freehigh);
11075                 __put_user(value.mem_unit, &target_value->mem_unit);
11076                 unlock_user_struct(target_value, arg1, 1);
11077             }
11078         }
11079         return ret;
11080 #ifdef TARGET_NR_ipc
11081     case TARGET_NR_ipc:
11082         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11083 #endif
11084 #ifdef TARGET_NR_semget
11085     case TARGET_NR_semget:
11086         return get_errno(semget(arg1, arg2, arg3));
11087 #endif
11088 #ifdef TARGET_NR_semop
11089     case TARGET_NR_semop:
11090         return do_semtimedop(arg1, arg2, arg3, 0, false);
11091 #endif
11092 #ifdef TARGET_NR_semtimedop
11093     case TARGET_NR_semtimedop:
11094         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11095 #endif
11096 #ifdef TARGET_NR_semtimedop_time64
11097     case TARGET_NR_semtimedop_time64:
11098         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11099 #endif
11100 #ifdef TARGET_NR_semctl
11101     case TARGET_NR_semctl:
11102         return do_semctl(arg1, arg2, arg3, arg4);
11103 #endif
11104 #ifdef TARGET_NR_msgctl
11105     case TARGET_NR_msgctl:
11106         return do_msgctl(arg1, arg2, arg3);
11107 #endif
11108 #ifdef TARGET_NR_msgget
11109     case TARGET_NR_msgget:
11110         return get_errno(msgget(arg1, arg2));
11111 #endif
11112 #ifdef TARGET_NR_msgrcv
11113     case TARGET_NR_msgrcv:
11114         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11115 #endif
11116 #ifdef TARGET_NR_msgsnd
11117     case TARGET_NR_msgsnd:
11118         return do_msgsnd(arg1, arg2, arg3, arg4);
11119 #endif
11120 #ifdef TARGET_NR_shmget
11121     case TARGET_NR_shmget:
11122         return get_errno(shmget(arg1, arg2, arg3));
11123 #endif
11124 #ifdef TARGET_NR_shmctl
11125     case TARGET_NR_shmctl:
11126         return do_shmctl(arg1, arg2, arg3);
11127 #endif
11128 #ifdef TARGET_NR_shmat
11129     case TARGET_NR_shmat:
11130         return target_shmat(cpu_env, arg1, arg2, arg3);
11131 #endif
11132 #ifdef TARGET_NR_shmdt
11133     case TARGET_NR_shmdt:
11134         return target_shmdt(arg1);
11135 #endif
11136     case TARGET_NR_fsync:
11137         return get_errno(fsync(arg1));
11138     case TARGET_NR_clone:
11139         /* Linux manages to have three different orderings for its
11140          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11141          * match the kernel's CONFIG_CLONE_* settings.
11142          * Microblaze is further special in that it uses a sixth
11143          * implicit argument to clone for the TLS pointer.
11144          */
11145 #if defined(TARGET_MICROBLAZE)
11146         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11147 #elif defined(TARGET_CLONE_BACKWARDS)
11148         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11149 #elif defined(TARGET_CLONE_BACKWARDS2)
11150         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11151 #else
11152         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11153 #endif
11154         return ret;
11155 #ifdef __NR_exit_group
11156         /* new thread calls */
11157     case TARGET_NR_exit_group:
11158         preexit_cleanup(cpu_env, arg1);
11159         return get_errno(exit_group(arg1));
11160 #endif
11161     case TARGET_NR_setdomainname:
11162         if (!(p = lock_user_string(arg1)))
11163             return -TARGET_EFAULT;
11164         ret = get_errno(setdomainname(p, arg2));
11165         unlock_user(p, arg1, 0);
11166         return ret;
11167     case TARGET_NR_uname:
11168         /* no need to transcode because we use the linux syscall */
11169         {
11170             struct new_utsname * buf;
11171 
11172             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11173                 return -TARGET_EFAULT;
11174             ret = get_errno(sys_uname(buf));
11175             if (!is_error(ret)) {
11176                 /* Overwrite the native machine name with whatever is being
11177                    emulated. */
11178                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11179                           sizeof(buf->machine));
11180                 /* Allow the user to override the reported release.  */
11181                 if (qemu_uname_release && *qemu_uname_release) {
11182                     g_strlcpy(buf->release, qemu_uname_release,
11183                               sizeof(buf->release));
11184                 }
11185             }
11186             unlock_user_struct(buf, arg1, 1);
11187         }
11188         return ret;
11189 #ifdef TARGET_I386
11190     case TARGET_NR_modify_ldt:
11191         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11192 #if !defined(TARGET_X86_64)
11193     case TARGET_NR_vm86:
11194         return do_vm86(cpu_env, arg1, arg2);
11195 #endif
11196 #endif
11197 #if defined(TARGET_NR_adjtimex)
11198     case TARGET_NR_adjtimex:
11199         {
11200             struct timex host_buf;
11201 
11202             if (target_to_host_timex(&host_buf, arg1) != 0) {
11203                 return -TARGET_EFAULT;
11204             }
11205             ret = get_errno(adjtimex(&host_buf));
11206             if (!is_error(ret)) {
11207                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11208                     return -TARGET_EFAULT;
11209                 }
11210             }
11211         }
11212         return ret;
11213 #endif
11214 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11215     case TARGET_NR_clock_adjtime:
11216         {
11217             struct timex htx;
11218 
11219             if (target_to_host_timex(&htx, arg2) != 0) {
11220                 return -TARGET_EFAULT;
11221             }
11222             ret = get_errno(clock_adjtime(arg1, &htx));
11223             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11224                 return -TARGET_EFAULT;
11225             }
11226         }
11227         return ret;
11228 #endif
11229 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11230     case TARGET_NR_clock_adjtime64:
11231         {
11232             struct timex htx;
11233 
11234             if (target_to_host_timex64(&htx, arg2) != 0) {
11235                 return -TARGET_EFAULT;
11236             }
11237             ret = get_errno(clock_adjtime(arg1, &htx));
11238             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11239                     return -TARGET_EFAULT;
11240             }
11241         }
11242         return ret;
11243 #endif
11244     case TARGET_NR_getpgid:
11245         return get_errno(getpgid(arg1));
11246     case TARGET_NR_fchdir:
11247         return get_errno(fchdir(arg1));
11248     case TARGET_NR_personality:
11249         return get_errno(personality(arg1));
11250 #ifdef TARGET_NR__llseek /* Not on alpha */
11251     case TARGET_NR__llseek:
11252         {
11253             int64_t res;
11254 #if !defined(__NR_llseek)
11255             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11256             if (res == -1) {
11257                 ret = get_errno(res);
11258             } else {
11259                 ret = 0;
11260             }
11261 #else
11262             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11263 #endif
11264             if ((ret == 0) && put_user_s64(res, arg4)) {
11265                 return -TARGET_EFAULT;
11266             }
11267         }
11268         return ret;
11269 #endif
11270 #ifdef TARGET_NR_getdents
11271     case TARGET_NR_getdents:
11272         return do_getdents(arg1, arg2, arg3);
11273 #endif /* TARGET_NR_getdents */
11274 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11275     case TARGET_NR_getdents64:
11276         return do_getdents64(arg1, arg2, arg3);
11277 #endif /* TARGET_NR_getdents64 */
11278 #if defined(TARGET_NR__newselect)
11279     case TARGET_NR__newselect:
11280         return do_select(arg1, arg2, arg3, arg4, arg5);
11281 #endif
11282 #ifdef TARGET_NR_poll
11283     case TARGET_NR_poll:
11284         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11285 #endif
11286 #ifdef TARGET_NR_ppoll
11287     case TARGET_NR_ppoll:
11288         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11289 #endif
11290 #ifdef TARGET_NR_ppoll_time64
11291     case TARGET_NR_ppoll_time64:
11292         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11293 #endif
11294     case TARGET_NR_flock:
11295         /* NOTE: the flock constant seems to be the same for every
11296            Linux platform */
11297         return get_errno(safe_flock(arg1, arg2));
11298     case TARGET_NR_readv:
11299         {
11300             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11301             if (vec != NULL) {
11302                 ret = get_errno(safe_readv(arg1, vec, arg3));
11303                 unlock_iovec(vec, arg2, arg3, 1);
11304             } else {
11305                 ret = -host_to_target_errno(errno);
11306             }
11307         }
11308         return ret;
11309     case TARGET_NR_writev:
11310         {
11311             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11312             if (vec != NULL) {
11313                 ret = get_errno(safe_writev(arg1, vec, arg3));
11314                 unlock_iovec(vec, arg2, arg3, 0);
11315             } else {
11316                 ret = -host_to_target_errno(errno);
11317             }
11318         }
11319         return ret;
11320 #if defined(TARGET_NR_preadv)
11321     case TARGET_NR_preadv:
11322         {
11323             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11324             if (vec != NULL) {
11325                 unsigned long low, high;
11326 
11327                 target_to_host_low_high(arg4, arg5, &low, &high);
11328                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11329                 unlock_iovec(vec, arg2, arg3, 1);
11330             } else {
11331                 ret = -host_to_target_errno(errno);
11332            }
11333         }
11334         return ret;
11335 #endif
11336 #if defined(TARGET_NR_pwritev)
11337     case TARGET_NR_pwritev:
11338         {
11339             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11340             if (vec != NULL) {
11341                 unsigned long low, high;
11342 
11343                 target_to_host_low_high(arg4, arg5, &low, &high);
11344                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11345                 unlock_iovec(vec, arg2, arg3, 0);
11346             } else {
11347                 ret = -host_to_target_errno(errno);
11348            }
11349         }
11350         return ret;
11351 #endif
11352     case TARGET_NR_getsid:
11353         return get_errno(getsid(arg1));
11354 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11355     case TARGET_NR_fdatasync:
11356         return get_errno(fdatasync(arg1));
11357 #endif
11358     case TARGET_NR_sched_getaffinity:
11359         {
11360             unsigned int mask_size;
11361             unsigned long *mask;
11362 
11363             /*
11364              * sched_getaffinity needs multiples of ulong, so need to take
11365              * care of mismatches between target ulong and host ulong sizes.
11366              */
11367             if (arg2 & (sizeof(abi_ulong) - 1)) {
11368                 return -TARGET_EINVAL;
11369             }
11370             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11371 
11372             mask = alloca(mask_size);
11373             memset(mask, 0, mask_size);
11374             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11375 
11376             if (!is_error(ret)) {
11377                 if (ret > arg2) {
11378                     /* More data returned than the caller's buffer will fit.
11379                      * This only happens if sizeof(abi_long) < sizeof(long)
11380                      * and the caller passed us a buffer holding an odd number
11381                      * of abi_longs. If the host kernel is actually using the
11382                      * extra 4 bytes then fail EINVAL; otherwise we can just
11383                      * ignore them and only copy the interesting part.
11384                      */
11385                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11386                     if (numcpus > arg2 * 8) {
11387                         return -TARGET_EINVAL;
11388                     }
11389                     ret = arg2;
11390                 }
11391 
11392                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11393                     return -TARGET_EFAULT;
11394                 }
11395             }
11396         }
11397         return ret;
11398     case TARGET_NR_sched_setaffinity:
11399         {
11400             unsigned int mask_size;
11401             unsigned long *mask;
11402 
11403             /*
11404              * sched_setaffinity needs multiples of ulong, so need to take
11405              * care of mismatches between target ulong and host ulong sizes.
11406              */
11407             if (arg2 & (sizeof(abi_ulong) - 1)) {
11408                 return -TARGET_EINVAL;
11409             }
11410             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11411             mask = alloca(mask_size);
11412 
11413             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11414             if (ret) {
11415                 return ret;
11416             }
11417 
11418             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11419         }
11420     case TARGET_NR_getcpu:
11421         {
11422             unsigned cpuid, node;
11423             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11424                                        arg2 ? &node : NULL,
11425                                        NULL));
11426             if (is_error(ret)) {
11427                 return ret;
11428             }
11429             if (arg1 && put_user_u32(cpuid, arg1)) {
11430                 return -TARGET_EFAULT;
11431             }
11432             if (arg2 && put_user_u32(node, arg2)) {
11433                 return -TARGET_EFAULT;
11434             }
11435         }
11436         return ret;
11437     case TARGET_NR_sched_setparam:
11438         {
11439             struct target_sched_param *target_schp;
11440             struct sched_param schp;
11441 
11442             if (arg2 == 0) {
11443                 return -TARGET_EINVAL;
11444             }
11445             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11446                 return -TARGET_EFAULT;
11447             }
11448             schp.sched_priority = tswap32(target_schp->sched_priority);
11449             unlock_user_struct(target_schp, arg2, 0);
11450             return get_errno(sys_sched_setparam(arg1, &schp));
11451         }
11452     case TARGET_NR_sched_getparam:
11453         {
11454             struct target_sched_param *target_schp;
11455             struct sched_param schp;
11456 
11457             if (arg2 == 0) {
11458                 return -TARGET_EINVAL;
11459             }
11460             ret = get_errno(sys_sched_getparam(arg1, &schp));
11461             if (!is_error(ret)) {
11462                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11463                     return -TARGET_EFAULT;
11464                 }
11465                 target_schp->sched_priority = tswap32(schp.sched_priority);
11466                 unlock_user_struct(target_schp, arg2, 1);
11467             }
11468         }
11469         return ret;
11470     case TARGET_NR_sched_setscheduler:
11471         {
11472             struct target_sched_param *target_schp;
11473             struct sched_param schp;
11474             if (arg3 == 0) {
11475                 return -TARGET_EINVAL;
11476             }
11477             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11478                 return -TARGET_EFAULT;
11479             }
11480             schp.sched_priority = tswap32(target_schp->sched_priority);
11481             unlock_user_struct(target_schp, arg3, 0);
11482             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11483         }
11484     case TARGET_NR_sched_getscheduler:
11485         return get_errno(sys_sched_getscheduler(arg1));
11486     case TARGET_NR_sched_getattr:
11487         {
11488             struct target_sched_attr *target_scha;
11489             struct sched_attr scha;
11490             if (arg2 == 0) {
11491                 return -TARGET_EINVAL;
11492             }
11493             if (arg3 > sizeof(scha)) {
11494                 arg3 = sizeof(scha);
11495             }
11496             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11497             if (!is_error(ret)) {
11498                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11499                 if (!target_scha) {
11500                     return -TARGET_EFAULT;
11501                 }
11502                 target_scha->size = tswap32(scha.size);
11503                 target_scha->sched_policy = tswap32(scha.sched_policy);
11504                 target_scha->sched_flags = tswap64(scha.sched_flags);
11505                 target_scha->sched_nice = tswap32(scha.sched_nice);
11506                 target_scha->sched_priority = tswap32(scha.sched_priority);
11507                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11508                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11509                 target_scha->sched_period = tswap64(scha.sched_period);
11510                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11511                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11512                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11513                 }
11514                 unlock_user(target_scha, arg2, arg3);
11515             }
11516             return ret;
11517         }
11518     case TARGET_NR_sched_setattr:
11519         {
11520             struct target_sched_attr *target_scha;
11521             struct sched_attr scha;
11522             uint32_t size;
11523             int zeroed;
11524             if (arg2 == 0) {
11525                 return -TARGET_EINVAL;
11526             }
11527             if (get_user_u32(size, arg2)) {
11528                 return -TARGET_EFAULT;
11529             }
11530             if (!size) {
11531                 size = offsetof(struct target_sched_attr, sched_util_min);
11532             }
11533             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11534                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11535                     return -TARGET_EFAULT;
11536                 }
11537                 return -TARGET_E2BIG;
11538             }
11539 
11540             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11541             if (zeroed < 0) {
11542                 return zeroed;
11543             } else if (zeroed == 0) {
11544                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11545                     return -TARGET_EFAULT;
11546                 }
11547                 return -TARGET_E2BIG;
11548             }
11549             if (size > sizeof(struct target_sched_attr)) {
11550                 size = sizeof(struct target_sched_attr);
11551             }
11552 
11553             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11554             if (!target_scha) {
11555                 return -TARGET_EFAULT;
11556             }
11557             scha.size = size;
11558             scha.sched_policy = tswap32(target_scha->sched_policy);
11559             scha.sched_flags = tswap64(target_scha->sched_flags);
11560             scha.sched_nice = tswap32(target_scha->sched_nice);
11561             scha.sched_priority = tswap32(target_scha->sched_priority);
11562             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11563             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11564             scha.sched_period = tswap64(target_scha->sched_period);
11565             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11566                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11567                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11568             }
11569             unlock_user(target_scha, arg2, 0);
11570             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11571         }
11572     case TARGET_NR_sched_yield:
11573         return get_errno(sched_yield());
11574     case TARGET_NR_sched_get_priority_max:
11575         return get_errno(sched_get_priority_max(arg1));
11576     case TARGET_NR_sched_get_priority_min:
11577         return get_errno(sched_get_priority_min(arg1));
11578 #ifdef TARGET_NR_sched_rr_get_interval
11579     case TARGET_NR_sched_rr_get_interval:
11580         {
11581             struct timespec ts;
11582             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11583             if (!is_error(ret)) {
11584                 ret = host_to_target_timespec(arg2, &ts);
11585             }
11586         }
11587         return ret;
11588 #endif
11589 #ifdef TARGET_NR_sched_rr_get_interval_time64
11590     case TARGET_NR_sched_rr_get_interval_time64:
11591         {
11592             struct timespec ts;
11593             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11594             if (!is_error(ret)) {
11595                 ret = host_to_target_timespec64(arg2, &ts);
11596             }
11597         }
11598         return ret;
11599 #endif
11600 #if defined(TARGET_NR_nanosleep)
11601     case TARGET_NR_nanosleep:
11602         {
11603             struct timespec req, rem;
11604             target_to_host_timespec(&req, arg1);
11605             ret = get_errno(safe_nanosleep(&req, &rem));
11606             if (is_error(ret) && arg2) {
11607                 host_to_target_timespec(arg2, &rem);
11608             }
11609         }
11610         return ret;
11611 #endif
11612     case TARGET_NR_prctl:
11613         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11614         break;
11615 #ifdef TARGET_NR_arch_prctl
11616     case TARGET_NR_arch_prctl:
11617         return do_arch_prctl(cpu_env, arg1, arg2);
11618 #endif
11619 #ifdef TARGET_NR_pread64
11620     case TARGET_NR_pread64:
11621         if (regpairs_aligned(cpu_env, num)) {
11622             arg4 = arg5;
11623             arg5 = arg6;
11624         }
11625         if (arg2 == 0 && arg3 == 0) {
11626             /* Special-case NULL buffer and zero length, which should succeed */
11627             p = 0;
11628         } else {
11629             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11630             if (!p) {
11631                 return -TARGET_EFAULT;
11632             }
11633         }
11634         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11635         unlock_user(p, arg2, ret);
11636         return ret;
11637     case TARGET_NR_pwrite64:
11638         if (regpairs_aligned(cpu_env, num)) {
11639             arg4 = arg5;
11640             arg5 = arg6;
11641         }
11642         if (arg2 == 0 && arg3 == 0) {
11643             /* Special-case NULL buffer and zero length, which should succeed */
11644             p = 0;
11645         } else {
11646             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11647             if (!p) {
11648                 return -TARGET_EFAULT;
11649             }
11650         }
11651         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11652         unlock_user(p, arg2, 0);
11653         return ret;
11654 #endif
11655     case TARGET_NR_getcwd:
11656         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11657             return -TARGET_EFAULT;
11658         ret = get_errno(sys_getcwd1(p, arg2));
11659         unlock_user(p, arg1, ret);
11660         return ret;
11661     case TARGET_NR_capget:
11662     case TARGET_NR_capset:
11663     {
11664         struct target_user_cap_header *target_header;
11665         struct target_user_cap_data *target_data = NULL;
11666         struct __user_cap_header_struct header;
11667         struct __user_cap_data_struct data[2];
11668         struct __user_cap_data_struct *dataptr = NULL;
11669         int i, target_datalen;
11670         int data_items = 1;
11671 
11672         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11673             return -TARGET_EFAULT;
11674         }
11675         header.version = tswap32(target_header->version);
11676         header.pid = tswap32(target_header->pid);
11677 
11678         if (header.version != _LINUX_CAPABILITY_VERSION) {
11679             /* Version 2 and up takes pointer to two user_data structs */
11680             data_items = 2;
11681         }
11682 
11683         target_datalen = sizeof(*target_data) * data_items;
11684 
11685         if (arg2) {
11686             if (num == TARGET_NR_capget) {
11687                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11688             } else {
11689                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11690             }
11691             if (!target_data) {
11692                 unlock_user_struct(target_header, arg1, 0);
11693                 return -TARGET_EFAULT;
11694             }
11695 
11696             if (num == TARGET_NR_capset) {
11697                 for (i = 0; i < data_items; i++) {
11698                     data[i].effective = tswap32(target_data[i].effective);
11699                     data[i].permitted = tswap32(target_data[i].permitted);
11700                     data[i].inheritable = tswap32(target_data[i].inheritable);
11701                 }
11702             }
11703 
11704             dataptr = data;
11705         }
11706 
11707         if (num == TARGET_NR_capget) {
11708             ret = get_errno(capget(&header, dataptr));
11709         } else {
11710             ret = get_errno(capset(&header, dataptr));
11711         }
11712 
11713         /* The kernel always updates version for both capget and capset */
11714         target_header->version = tswap32(header.version);
11715         unlock_user_struct(target_header, arg1, 1);
11716 
11717         if (arg2) {
11718             if (num == TARGET_NR_capget) {
11719                 for (i = 0; i < data_items; i++) {
11720                     target_data[i].effective = tswap32(data[i].effective);
11721                     target_data[i].permitted = tswap32(data[i].permitted);
11722                     target_data[i].inheritable = tswap32(data[i].inheritable);
11723                 }
11724                 unlock_user(target_data, arg2, target_datalen);
11725             } else {
11726                 unlock_user(target_data, arg2, 0);
11727             }
11728         }
11729         return ret;
11730     }
11731     case TARGET_NR_sigaltstack:
11732         return do_sigaltstack(arg1, arg2, cpu_env);
11733 
11734 #ifdef CONFIG_SENDFILE
11735 #ifdef TARGET_NR_sendfile
11736     case TARGET_NR_sendfile:
11737     {
11738         off_t *offp = NULL;
11739         off_t off;
11740         if (arg3) {
11741             ret = get_user_sal(off, arg3);
11742             if (is_error(ret)) {
11743                 return ret;
11744             }
11745             offp = &off;
11746         }
11747         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11748         if (!is_error(ret) && arg3) {
11749             abi_long ret2 = put_user_sal(off, arg3);
11750             if (is_error(ret2)) {
11751                 ret = ret2;
11752             }
11753         }
11754         return ret;
11755     }
11756 #endif
11757 #ifdef TARGET_NR_sendfile64
11758     case TARGET_NR_sendfile64:
11759     {
11760         off_t *offp = NULL;
11761         off_t off;
11762         if (arg3) {
11763             ret = get_user_s64(off, arg3);
11764             if (is_error(ret)) {
11765                 return ret;
11766             }
11767             offp = &off;
11768         }
11769         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11770         if (!is_error(ret) && arg3) {
11771             abi_long ret2 = put_user_s64(off, arg3);
11772             if (is_error(ret2)) {
11773                 ret = ret2;
11774             }
11775         }
11776         return ret;
11777     }
11778 #endif
11779 #endif
11780 #ifdef TARGET_NR_vfork
11781     case TARGET_NR_vfork:
11782         return get_errno(do_fork(cpu_env,
11783                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11784                          0, 0, 0, 0));
11785 #endif
11786 #ifdef TARGET_NR_ugetrlimit
11787     case TARGET_NR_ugetrlimit:
11788     {
11789 	struct rlimit rlim;
11790 	int resource = target_to_host_resource(arg1);
11791 	ret = get_errno(getrlimit(resource, &rlim));
11792 	if (!is_error(ret)) {
11793 	    struct target_rlimit *target_rlim;
11794             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11795                 return -TARGET_EFAULT;
11796 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11797 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11798             unlock_user_struct(target_rlim, arg2, 1);
11799 	}
11800         return ret;
11801     }
11802 #endif
11803 #ifdef TARGET_NR_truncate64
11804     case TARGET_NR_truncate64:
11805         if (!(p = lock_user_string(arg1)))
11806             return -TARGET_EFAULT;
11807 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11808         unlock_user(p, arg1, 0);
11809         return ret;
11810 #endif
11811 #ifdef TARGET_NR_ftruncate64
11812     case TARGET_NR_ftruncate64:
11813         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11814 #endif
11815 #ifdef TARGET_NR_stat64
11816     case TARGET_NR_stat64:
11817         if (!(p = lock_user_string(arg1))) {
11818             return -TARGET_EFAULT;
11819         }
11820         ret = get_errno(stat(path(p), &st));
11821         unlock_user(p, arg1, 0);
11822         if (!is_error(ret))
11823             ret = host_to_target_stat64(cpu_env, arg2, &st);
11824         return ret;
11825 #endif
11826 #ifdef TARGET_NR_lstat64
11827     case TARGET_NR_lstat64:
11828         if (!(p = lock_user_string(arg1))) {
11829             return -TARGET_EFAULT;
11830         }
11831         ret = get_errno(lstat(path(p), &st));
11832         unlock_user(p, arg1, 0);
11833         if (!is_error(ret))
11834             ret = host_to_target_stat64(cpu_env, arg2, &st);
11835         return ret;
11836 #endif
11837 #ifdef TARGET_NR_fstat64
11838     case TARGET_NR_fstat64:
11839         ret = get_errno(fstat(arg1, &st));
11840         if (!is_error(ret))
11841             ret = host_to_target_stat64(cpu_env, arg2, &st);
11842         return ret;
11843 #endif
11844 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11845 #ifdef TARGET_NR_fstatat64
11846     case TARGET_NR_fstatat64:
11847 #endif
11848 #ifdef TARGET_NR_newfstatat
11849     case TARGET_NR_newfstatat:
11850 #endif
11851         if (!(p = lock_user_string(arg2))) {
11852             return -TARGET_EFAULT;
11853         }
11854         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11855         unlock_user(p, arg2, 0);
11856         if (!is_error(ret))
11857             ret = host_to_target_stat64(cpu_env, arg3, &st);
11858         return ret;
11859 #endif
11860 #if defined(TARGET_NR_statx)
11861     case TARGET_NR_statx:
11862         {
11863             struct target_statx *target_stx;
11864             int dirfd = arg1;
11865             int flags = arg3;
11866 
11867             p = lock_user_string(arg2);
11868             if (p == NULL) {
11869                 return -TARGET_EFAULT;
11870             }
11871 #if defined(__NR_statx)
11872             {
11873                 /*
11874                  * It is assumed that struct statx is architecture independent.
11875                  */
11876                 struct target_statx host_stx;
11877                 int mask = arg4;
11878 
11879                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11880                 if (!is_error(ret)) {
11881                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11882                         unlock_user(p, arg2, 0);
11883                         return -TARGET_EFAULT;
11884                     }
11885                 }
11886 
11887                 if (ret != -TARGET_ENOSYS) {
11888                     unlock_user(p, arg2, 0);
11889                     return ret;
11890                 }
11891             }
11892 #endif
11893             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11894             unlock_user(p, arg2, 0);
11895 
11896             if (!is_error(ret)) {
11897                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11898                     return -TARGET_EFAULT;
11899                 }
11900                 memset(target_stx, 0, sizeof(*target_stx));
11901                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11902                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11903                 __put_user(st.st_ino, &target_stx->stx_ino);
11904                 __put_user(st.st_mode, &target_stx->stx_mode);
11905                 __put_user(st.st_uid, &target_stx->stx_uid);
11906                 __put_user(st.st_gid, &target_stx->stx_gid);
11907                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11908                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11909                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11910                 __put_user(st.st_size, &target_stx->stx_size);
11911                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11912                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11913                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11914                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11915                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11916                 unlock_user_struct(target_stx, arg5, 1);
11917             }
11918         }
11919         return ret;
11920 #endif
11921 #ifdef TARGET_NR_lchown
11922     case TARGET_NR_lchown:
11923         if (!(p = lock_user_string(arg1)))
11924             return -TARGET_EFAULT;
11925         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11926         unlock_user(p, arg1, 0);
11927         return ret;
11928 #endif
11929 #ifdef TARGET_NR_getuid
11930     case TARGET_NR_getuid:
11931         return get_errno(high2lowuid(getuid()));
11932 #endif
11933 #ifdef TARGET_NR_getgid
11934     case TARGET_NR_getgid:
11935         return get_errno(high2lowgid(getgid()));
11936 #endif
11937 #ifdef TARGET_NR_geteuid
11938     case TARGET_NR_geteuid:
11939         return get_errno(high2lowuid(geteuid()));
11940 #endif
11941 #ifdef TARGET_NR_getegid
11942     case TARGET_NR_getegid:
11943         return get_errno(high2lowgid(getegid()));
11944 #endif
11945     case TARGET_NR_setreuid:
11946         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11947     case TARGET_NR_setregid:
11948         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11949     case TARGET_NR_getgroups:
11950         { /* the same code as for TARGET_NR_getgroups32 */
11951             int gidsetsize = arg1;
11952             target_id *target_grouplist;
11953             g_autofree gid_t *grouplist = NULL;
11954             int i;
11955 
11956             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11957                 return -TARGET_EINVAL;
11958             }
11959             if (gidsetsize > 0) {
11960                 grouplist = g_try_new(gid_t, gidsetsize);
11961                 if (!grouplist) {
11962                     return -TARGET_ENOMEM;
11963                 }
11964             }
11965             ret = get_errno(getgroups(gidsetsize, grouplist));
11966             if (!is_error(ret) && gidsetsize > 0) {
11967                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11968                                              gidsetsize * sizeof(target_id), 0);
11969                 if (!target_grouplist) {
11970                     return -TARGET_EFAULT;
11971                 }
11972                 for (i = 0; i < ret; i++) {
11973                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11974                 }
11975                 unlock_user(target_grouplist, arg2,
11976                             gidsetsize * sizeof(target_id));
11977             }
11978             return ret;
11979         }
11980     case TARGET_NR_setgroups:
11981         { /* the same code as for TARGET_NR_setgroups32 */
11982             int gidsetsize = arg1;
11983             target_id *target_grouplist;
11984             g_autofree gid_t *grouplist = NULL;
11985             int i;
11986 
11987             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11988                 return -TARGET_EINVAL;
11989             }
11990             if (gidsetsize > 0) {
11991                 grouplist = g_try_new(gid_t, gidsetsize);
11992                 if (!grouplist) {
11993                     return -TARGET_ENOMEM;
11994                 }
11995                 target_grouplist = lock_user(VERIFY_READ, arg2,
11996                                              gidsetsize * sizeof(target_id), 1);
11997                 if (!target_grouplist) {
11998                     return -TARGET_EFAULT;
11999                 }
12000                 for (i = 0; i < gidsetsize; i++) {
12001                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12002                 }
12003                 unlock_user(target_grouplist, arg2,
12004                             gidsetsize * sizeof(target_id));
12005             }
12006             return get_errno(sys_setgroups(gidsetsize, grouplist));
12007         }
12008     case TARGET_NR_fchown:
12009         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12010 #if defined(TARGET_NR_fchownat)
12011     case TARGET_NR_fchownat:
12012         if (!(p = lock_user_string(arg2)))
12013             return -TARGET_EFAULT;
12014         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12015                                  low2highgid(arg4), arg5));
12016         unlock_user(p, arg2, 0);
12017         return ret;
12018 #endif
12019 #ifdef TARGET_NR_setresuid
12020     case TARGET_NR_setresuid:
12021         return get_errno(sys_setresuid(low2highuid(arg1),
12022                                        low2highuid(arg2),
12023                                        low2highuid(arg3)));
12024 #endif
12025 #ifdef TARGET_NR_getresuid
12026     case TARGET_NR_getresuid:
12027         {
12028             uid_t ruid, euid, suid;
12029             ret = get_errno(getresuid(&ruid, &euid, &suid));
12030             if (!is_error(ret)) {
12031                 if (put_user_id(high2lowuid(ruid), arg1)
12032                     || put_user_id(high2lowuid(euid), arg2)
12033                     || put_user_id(high2lowuid(suid), arg3))
12034                     return -TARGET_EFAULT;
12035             }
12036         }
12037         return ret;
12038 #endif
12039 #ifdef TARGET_NR_getresgid
12040     case TARGET_NR_setresgid:
12041         return get_errno(sys_setresgid(low2highgid(arg1),
12042                                        low2highgid(arg2),
12043                                        low2highgid(arg3)));
12044 #endif
12045 #ifdef TARGET_NR_getresgid
12046     case TARGET_NR_getresgid:
12047         {
12048             gid_t rgid, egid, sgid;
12049             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12050             if (!is_error(ret)) {
12051                 if (put_user_id(high2lowgid(rgid), arg1)
12052                     || put_user_id(high2lowgid(egid), arg2)
12053                     || put_user_id(high2lowgid(sgid), arg3))
12054                     return -TARGET_EFAULT;
12055             }
12056         }
12057         return ret;
12058 #endif
12059 #ifdef TARGET_NR_chown
12060     case TARGET_NR_chown:
12061         if (!(p = lock_user_string(arg1)))
12062             return -TARGET_EFAULT;
12063         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12064         unlock_user(p, arg1, 0);
12065         return ret;
12066 #endif
12067     case TARGET_NR_setuid:
12068         return get_errno(sys_setuid(low2highuid(arg1)));
12069     case TARGET_NR_setgid:
12070         return get_errno(sys_setgid(low2highgid(arg1)));
12071     case TARGET_NR_setfsuid:
12072         return get_errno(setfsuid(arg1));
12073     case TARGET_NR_setfsgid:
12074         return get_errno(setfsgid(arg1));
12075 
12076 #ifdef TARGET_NR_lchown32
12077     case TARGET_NR_lchown32:
12078         if (!(p = lock_user_string(arg1)))
12079             return -TARGET_EFAULT;
12080         ret = get_errno(lchown(p, arg2, arg3));
12081         unlock_user(p, arg1, 0);
12082         return ret;
12083 #endif
12084 #ifdef TARGET_NR_getuid32
12085     case TARGET_NR_getuid32:
12086         return get_errno(getuid());
12087 #endif
12088 
12089 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12090    /* Alpha specific */
12091     case TARGET_NR_getxuid:
12092          {
12093             uid_t euid;
12094             euid=geteuid();
12095             cpu_env->ir[IR_A4]=euid;
12096          }
12097         return get_errno(getuid());
12098 #endif
12099 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12100    /* Alpha specific */
12101     case TARGET_NR_getxgid:
12102          {
12103             uid_t egid;
12104             egid=getegid();
12105             cpu_env->ir[IR_A4]=egid;
12106          }
12107         return get_errno(getgid());
12108 #endif
12109 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12110     /* Alpha specific */
12111     case TARGET_NR_osf_getsysinfo:
12112         ret = -TARGET_EOPNOTSUPP;
12113         switch (arg1) {
12114           case TARGET_GSI_IEEE_FP_CONTROL:
12115             {
12116                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12117                 uint64_t swcr = cpu_env->swcr;
12118 
12119                 swcr &= ~SWCR_STATUS_MASK;
12120                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12121 
12122                 if (put_user_u64 (swcr, arg2))
12123                         return -TARGET_EFAULT;
12124                 ret = 0;
12125             }
12126             break;
12127 
12128           /* case GSI_IEEE_STATE_AT_SIGNAL:
12129              -- Not implemented in linux kernel.
12130              case GSI_UACPROC:
12131              -- Retrieves current unaligned access state; not much used.
12132              case GSI_PROC_TYPE:
12133              -- Retrieves implver information; surely not used.
12134              case GSI_GET_HWRPB:
12135              -- Grabs a copy of the HWRPB; surely not used.
12136           */
12137         }
12138         return ret;
12139 #endif
12140 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12141     /* Alpha specific */
12142     case TARGET_NR_osf_setsysinfo:
12143         ret = -TARGET_EOPNOTSUPP;
12144         switch (arg1) {
12145           case TARGET_SSI_IEEE_FP_CONTROL:
12146             {
12147                 uint64_t swcr, fpcr;
12148 
12149                 if (get_user_u64 (swcr, arg2)) {
12150                     return -TARGET_EFAULT;
12151                 }
12152 
12153                 /*
12154                  * The kernel calls swcr_update_status to update the
12155                  * status bits from the fpcr at every point that it
12156                  * could be queried.  Therefore, we store the status
12157                  * bits only in FPCR.
12158                  */
12159                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12160 
12161                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12162                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12163                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12164                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12165                 ret = 0;
12166             }
12167             break;
12168 
12169           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12170             {
12171                 uint64_t exc, fpcr, fex;
12172 
12173                 if (get_user_u64(exc, arg2)) {
12174                     return -TARGET_EFAULT;
12175                 }
12176                 exc &= SWCR_STATUS_MASK;
12177                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12178 
12179                 /* Old exceptions are not signaled.  */
12180                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12181                 fex = exc & ~fex;
12182                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12183                 fex &= (cpu_env)->swcr;
12184 
12185                 /* Update the hardware fpcr.  */
12186                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12187                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12188 
12189                 if (fex) {
12190                     int si_code = TARGET_FPE_FLTUNK;
12191                     target_siginfo_t info;
12192 
12193                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12194                         si_code = TARGET_FPE_FLTUND;
12195                     }
12196                     if (fex & SWCR_TRAP_ENABLE_INE) {
12197                         si_code = TARGET_FPE_FLTRES;
12198                     }
12199                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12200                         si_code = TARGET_FPE_FLTUND;
12201                     }
12202                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12203                         si_code = TARGET_FPE_FLTOVF;
12204                     }
12205                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12206                         si_code = TARGET_FPE_FLTDIV;
12207                     }
12208                     if (fex & SWCR_TRAP_ENABLE_INV) {
12209                         si_code = TARGET_FPE_FLTINV;
12210                     }
12211 
12212                     info.si_signo = SIGFPE;
12213                     info.si_errno = 0;
12214                     info.si_code = si_code;
12215                     info._sifields._sigfault._addr = (cpu_env)->pc;
12216                     queue_signal(cpu_env, info.si_signo,
12217                                  QEMU_SI_FAULT, &info);
12218                 }
12219                 ret = 0;
12220             }
12221             break;
12222 
12223           /* case SSI_NVPAIRS:
12224              -- Used with SSIN_UACPROC to enable unaligned accesses.
12225              case SSI_IEEE_STATE_AT_SIGNAL:
12226              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12227              -- Not implemented in linux kernel
12228           */
12229         }
12230         return ret;
12231 #endif
12232 #ifdef TARGET_NR_osf_sigprocmask
12233     /* Alpha specific.  */
12234     case TARGET_NR_osf_sigprocmask:
12235         {
12236             abi_ulong mask;
12237             int how;
12238             sigset_t set, oldset;
12239 
12240             switch(arg1) {
12241             case TARGET_SIG_BLOCK:
12242                 how = SIG_BLOCK;
12243                 break;
12244             case TARGET_SIG_UNBLOCK:
12245                 how = SIG_UNBLOCK;
12246                 break;
12247             case TARGET_SIG_SETMASK:
12248                 how = SIG_SETMASK;
12249                 break;
12250             default:
12251                 return -TARGET_EINVAL;
12252             }
12253             mask = arg2;
12254             target_to_host_old_sigset(&set, &mask);
12255             ret = do_sigprocmask(how, &set, &oldset);
12256             if (!ret) {
12257                 host_to_target_old_sigset(&mask, &oldset);
12258                 ret = mask;
12259             }
12260         }
12261         return ret;
12262 #endif
12263 
12264 #ifdef TARGET_NR_getgid32
12265     case TARGET_NR_getgid32:
12266         return get_errno(getgid());
12267 #endif
12268 #ifdef TARGET_NR_geteuid32
12269     case TARGET_NR_geteuid32:
12270         return get_errno(geteuid());
12271 #endif
12272 #ifdef TARGET_NR_getegid32
12273     case TARGET_NR_getegid32:
12274         return get_errno(getegid());
12275 #endif
12276 #ifdef TARGET_NR_setreuid32
12277     case TARGET_NR_setreuid32:
12278         return get_errno(sys_setreuid(arg1, arg2));
12279 #endif
12280 #ifdef TARGET_NR_setregid32
12281     case TARGET_NR_setregid32:
12282         return get_errno(sys_setregid(arg1, arg2));
12283 #endif
12284 #ifdef TARGET_NR_getgroups32
12285     case TARGET_NR_getgroups32:
12286         { /* the same code as for TARGET_NR_getgroups */
12287             int gidsetsize = arg1;
12288             uint32_t *target_grouplist;
12289             g_autofree gid_t *grouplist = NULL;
12290             int i;
12291 
12292             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12293                 return -TARGET_EINVAL;
12294             }
12295             if (gidsetsize > 0) {
12296                 grouplist = g_try_new(gid_t, gidsetsize);
12297                 if (!grouplist) {
12298                     return -TARGET_ENOMEM;
12299                 }
12300             }
12301             ret = get_errno(getgroups(gidsetsize, grouplist));
12302             if (!is_error(ret) && gidsetsize > 0) {
12303                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12304                                              gidsetsize * 4, 0);
12305                 if (!target_grouplist) {
12306                     return -TARGET_EFAULT;
12307                 }
12308                 for (i = 0; i < ret; i++) {
12309                     target_grouplist[i] = tswap32(grouplist[i]);
12310                 }
12311                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12312             }
12313             return ret;
12314         }
12315 #endif
12316 #ifdef TARGET_NR_setgroups32
12317     case TARGET_NR_setgroups32:
12318         { /* the same code as for TARGET_NR_setgroups */
12319             int gidsetsize = arg1;
12320             uint32_t *target_grouplist;
12321             g_autofree gid_t *grouplist = NULL;
12322             int i;
12323 
12324             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12325                 return -TARGET_EINVAL;
12326             }
12327             if (gidsetsize > 0) {
12328                 grouplist = g_try_new(gid_t, gidsetsize);
12329                 if (!grouplist) {
12330                     return -TARGET_ENOMEM;
12331                 }
12332                 target_grouplist = lock_user(VERIFY_READ, arg2,
12333                                              gidsetsize * 4, 1);
12334                 if (!target_grouplist) {
12335                     return -TARGET_EFAULT;
12336                 }
12337                 for (i = 0; i < gidsetsize; i++) {
12338                     grouplist[i] = tswap32(target_grouplist[i]);
12339                 }
12340                 unlock_user(target_grouplist, arg2, 0);
12341             }
12342             return get_errno(sys_setgroups(gidsetsize, grouplist));
12343         }
12344 #endif
12345 #ifdef TARGET_NR_fchown32
12346     case TARGET_NR_fchown32:
12347         return get_errno(fchown(arg1, arg2, arg3));
12348 #endif
12349 #ifdef TARGET_NR_setresuid32
12350     case TARGET_NR_setresuid32:
12351         return get_errno(sys_setresuid(arg1, arg2, arg3));
12352 #endif
12353 #ifdef TARGET_NR_getresuid32
12354     case TARGET_NR_getresuid32:
12355         {
12356             uid_t ruid, euid, suid;
12357             ret = get_errno(getresuid(&ruid, &euid, &suid));
12358             if (!is_error(ret)) {
12359                 if (put_user_u32(ruid, arg1)
12360                     || put_user_u32(euid, arg2)
12361                     || put_user_u32(suid, arg3))
12362                     return -TARGET_EFAULT;
12363             }
12364         }
12365         return ret;
12366 #endif
12367 #ifdef TARGET_NR_setresgid32
12368     case TARGET_NR_setresgid32:
12369         return get_errno(sys_setresgid(arg1, arg2, arg3));
12370 #endif
12371 #ifdef TARGET_NR_getresgid32
12372     case TARGET_NR_getresgid32:
12373         {
12374             gid_t rgid, egid, sgid;
12375             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12376             if (!is_error(ret)) {
12377                 if (put_user_u32(rgid, arg1)
12378                     || put_user_u32(egid, arg2)
12379                     || put_user_u32(sgid, arg3))
12380                     return -TARGET_EFAULT;
12381             }
12382         }
12383         return ret;
12384 #endif
12385 #ifdef TARGET_NR_chown32
12386     case TARGET_NR_chown32:
12387         if (!(p = lock_user_string(arg1)))
12388             return -TARGET_EFAULT;
12389         ret = get_errno(chown(p, arg2, arg3));
12390         unlock_user(p, arg1, 0);
12391         return ret;
12392 #endif
12393 #ifdef TARGET_NR_setuid32
12394     case TARGET_NR_setuid32:
12395         return get_errno(sys_setuid(arg1));
12396 #endif
12397 #ifdef TARGET_NR_setgid32
12398     case TARGET_NR_setgid32:
12399         return get_errno(sys_setgid(arg1));
12400 #endif
12401 #ifdef TARGET_NR_setfsuid32
12402     case TARGET_NR_setfsuid32:
12403         return get_errno(setfsuid(arg1));
12404 #endif
12405 #ifdef TARGET_NR_setfsgid32
12406     case TARGET_NR_setfsgid32:
12407         return get_errno(setfsgid(arg1));
12408 #endif
12409 #ifdef TARGET_NR_mincore
12410     case TARGET_NR_mincore:
12411         {
12412             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12413             if (!a) {
12414                 return -TARGET_ENOMEM;
12415             }
12416             p = lock_user_string(arg3);
12417             if (!p) {
12418                 ret = -TARGET_EFAULT;
12419             } else {
12420                 ret = get_errno(mincore(a, arg2, p));
12421                 unlock_user(p, arg3, ret);
12422             }
12423             unlock_user(a, arg1, 0);
12424         }
12425         return ret;
12426 #endif
12427 #ifdef TARGET_NR_arm_fadvise64_64
12428     case TARGET_NR_arm_fadvise64_64:
12429         /* arm_fadvise64_64 looks like fadvise64_64 but
12430          * with different argument order: fd, advice, offset, len
12431          * rather than the usual fd, offset, len, advice.
12432          * Note that offset and len are both 64-bit so appear as
12433          * pairs of 32-bit registers.
12434          */
12435         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12436                             target_offset64(arg5, arg6), arg2);
12437         return -host_to_target_errno(ret);
12438 #endif
12439 
12440 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12441 
12442 #ifdef TARGET_NR_fadvise64_64
12443     case TARGET_NR_fadvise64_64:
12444 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12445         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12446         ret = arg2;
12447         arg2 = arg3;
12448         arg3 = arg4;
12449         arg4 = arg5;
12450         arg5 = arg6;
12451         arg6 = ret;
12452 #else
12453         /* 6 args: fd, offset (high, low), len (high, low), advice */
12454         if (regpairs_aligned(cpu_env, num)) {
12455             /* offset is in (3,4), len in (5,6) and advice in 7 */
12456             arg2 = arg3;
12457             arg3 = arg4;
12458             arg4 = arg5;
12459             arg5 = arg6;
12460             arg6 = arg7;
12461         }
12462 #endif
12463         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12464                             target_offset64(arg4, arg5), arg6);
12465         return -host_to_target_errno(ret);
12466 #endif
12467 
12468 #ifdef TARGET_NR_fadvise64
12469     case TARGET_NR_fadvise64:
12470         /* 5 args: fd, offset (high, low), len, advice */
12471         if (regpairs_aligned(cpu_env, num)) {
12472             /* offset is in (3,4), len in 5 and advice in 6 */
12473             arg2 = arg3;
12474             arg3 = arg4;
12475             arg4 = arg5;
12476             arg5 = arg6;
12477         }
12478         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12479         return -host_to_target_errno(ret);
12480 #endif
12481 
12482 #else /* not a 32-bit ABI */
12483 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12484 #ifdef TARGET_NR_fadvise64_64
12485     case TARGET_NR_fadvise64_64:
12486 #endif
12487 #ifdef TARGET_NR_fadvise64
12488     case TARGET_NR_fadvise64:
12489 #endif
12490 #ifdef TARGET_S390X
12491         switch (arg4) {
12492         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12493         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12494         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12495         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12496         default: break;
12497         }
12498 #endif
12499         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12500 #endif
12501 #endif /* end of 64-bit ABI fadvise handling */
12502 
12503 #ifdef TARGET_NR_madvise
12504     case TARGET_NR_madvise:
12505         return target_madvise(arg1, arg2, arg3);
12506 #endif
12507 #ifdef TARGET_NR_fcntl64
12508     case TARGET_NR_fcntl64:
12509     {
12510         int cmd;
12511         struct flock fl;
12512         from_flock64_fn *copyfrom = copy_from_user_flock64;
12513         to_flock64_fn *copyto = copy_to_user_flock64;
12514 
12515 #ifdef TARGET_ARM
12516         if (!cpu_env->eabi) {
12517             copyfrom = copy_from_user_oabi_flock64;
12518             copyto = copy_to_user_oabi_flock64;
12519         }
12520 #endif
12521 
12522         cmd = target_to_host_fcntl_cmd(arg2);
12523         if (cmd == -TARGET_EINVAL) {
12524             return cmd;
12525         }
12526 
12527         switch(arg2) {
12528         case TARGET_F_GETLK64:
12529             ret = copyfrom(&fl, arg3);
12530             if (ret) {
12531                 break;
12532             }
12533             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12534             if (ret == 0) {
12535                 ret = copyto(arg3, &fl);
12536             }
12537 	    break;
12538 
12539         case TARGET_F_SETLK64:
12540         case TARGET_F_SETLKW64:
12541             ret = copyfrom(&fl, arg3);
12542             if (ret) {
12543                 break;
12544             }
12545             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12546 	    break;
12547         default:
12548             ret = do_fcntl(arg1, arg2, arg3);
12549             break;
12550         }
12551         return ret;
12552     }
12553 #endif
12554 #ifdef TARGET_NR_cacheflush
12555     case TARGET_NR_cacheflush:
12556         /* self-modifying code is handled automatically, so nothing needed */
12557         return 0;
12558 #endif
12559 #ifdef TARGET_NR_getpagesize
12560     case TARGET_NR_getpagesize:
12561         return TARGET_PAGE_SIZE;
12562 #endif
12563     case TARGET_NR_gettid:
12564         return get_errno(sys_gettid());
12565 #ifdef TARGET_NR_readahead
12566     case TARGET_NR_readahead:
12567 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12568         if (regpairs_aligned(cpu_env, num)) {
12569             arg2 = arg3;
12570             arg3 = arg4;
12571             arg4 = arg5;
12572         }
12573         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12574 #else
12575         ret = get_errno(readahead(arg1, arg2, arg3));
12576 #endif
12577         return ret;
12578 #endif
12579 #ifdef CONFIG_ATTR
12580 #ifdef TARGET_NR_setxattr
12581     case TARGET_NR_listxattr:
12582     case TARGET_NR_llistxattr:
12583     {
12584         void *b = 0;
12585         if (arg2) {
12586             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12587             if (!b) {
12588                 return -TARGET_EFAULT;
12589             }
12590         }
12591         p = lock_user_string(arg1);
12592         if (p) {
12593             if (num == TARGET_NR_listxattr) {
12594                 ret = get_errno(listxattr(p, b, arg3));
12595             } else {
12596                 ret = get_errno(llistxattr(p, b, arg3));
12597             }
12598         } else {
12599             ret = -TARGET_EFAULT;
12600         }
12601         unlock_user(p, arg1, 0);
12602         unlock_user(b, arg2, arg3);
12603         return ret;
12604     }
12605     case TARGET_NR_flistxattr:
12606     {
12607         void *b = 0;
12608         if (arg2) {
12609             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12610             if (!b) {
12611                 return -TARGET_EFAULT;
12612             }
12613         }
12614         ret = get_errno(flistxattr(arg1, b, arg3));
12615         unlock_user(b, arg2, arg3);
12616         return ret;
12617     }
12618     case TARGET_NR_setxattr:
12619     case TARGET_NR_lsetxattr:
12620         {
12621             void *n, *v = 0;
12622             if (arg3) {
12623                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12624                 if (!v) {
12625                     return -TARGET_EFAULT;
12626                 }
12627             }
12628             p = lock_user_string(arg1);
12629             n = lock_user_string(arg2);
12630             if (p && n) {
12631                 if (num == TARGET_NR_setxattr) {
12632                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12633                 } else {
12634                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12635                 }
12636             } else {
12637                 ret = -TARGET_EFAULT;
12638             }
12639             unlock_user(p, arg1, 0);
12640             unlock_user(n, arg2, 0);
12641             unlock_user(v, arg3, 0);
12642         }
12643         return ret;
12644     case TARGET_NR_fsetxattr:
12645         {
12646             void *n, *v = 0;
12647             if (arg3) {
12648                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12649                 if (!v) {
12650                     return -TARGET_EFAULT;
12651                 }
12652             }
12653             n = lock_user_string(arg2);
12654             if (n) {
12655                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12656             } else {
12657                 ret = -TARGET_EFAULT;
12658             }
12659             unlock_user(n, arg2, 0);
12660             unlock_user(v, arg3, 0);
12661         }
12662         return ret;
12663     case TARGET_NR_getxattr:
12664     case TARGET_NR_lgetxattr:
12665         {
12666             void *n, *v = 0;
12667             if (arg3) {
12668                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12669                 if (!v) {
12670                     return -TARGET_EFAULT;
12671                 }
12672             }
12673             p = lock_user_string(arg1);
12674             n = lock_user_string(arg2);
12675             if (p && n) {
12676                 if (num == TARGET_NR_getxattr) {
12677                     ret = get_errno(getxattr(p, n, v, arg4));
12678                 } else {
12679                     ret = get_errno(lgetxattr(p, n, v, arg4));
12680                 }
12681             } else {
12682                 ret = -TARGET_EFAULT;
12683             }
12684             unlock_user(p, arg1, 0);
12685             unlock_user(n, arg2, 0);
12686             unlock_user(v, arg3, arg4);
12687         }
12688         return ret;
12689     case TARGET_NR_fgetxattr:
12690         {
12691             void *n, *v = 0;
12692             if (arg3) {
12693                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12694                 if (!v) {
12695                     return -TARGET_EFAULT;
12696                 }
12697             }
12698             n = lock_user_string(arg2);
12699             if (n) {
12700                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12701             } else {
12702                 ret = -TARGET_EFAULT;
12703             }
12704             unlock_user(n, arg2, 0);
12705             unlock_user(v, arg3, arg4);
12706         }
12707         return ret;
12708     case TARGET_NR_removexattr:
12709     case TARGET_NR_lremovexattr:
12710         {
12711             void *n;
12712             p = lock_user_string(arg1);
12713             n = lock_user_string(arg2);
12714             if (p && n) {
12715                 if (num == TARGET_NR_removexattr) {
12716                     ret = get_errno(removexattr(p, n));
12717                 } else {
12718                     ret = get_errno(lremovexattr(p, n));
12719                 }
12720             } else {
12721                 ret = -TARGET_EFAULT;
12722             }
12723             unlock_user(p, arg1, 0);
12724             unlock_user(n, arg2, 0);
12725         }
12726         return ret;
12727     case TARGET_NR_fremovexattr:
12728         {
12729             void *n;
12730             n = lock_user_string(arg2);
12731             if (n) {
12732                 ret = get_errno(fremovexattr(arg1, n));
12733             } else {
12734                 ret = -TARGET_EFAULT;
12735             }
12736             unlock_user(n, arg2, 0);
12737         }
12738         return ret;
12739 #endif
12740 #endif /* CONFIG_ATTR */
12741 #ifdef TARGET_NR_set_thread_area
12742     case TARGET_NR_set_thread_area:
12743 #if defined(TARGET_MIPS)
12744       cpu_env->active_tc.CP0_UserLocal = arg1;
12745       return 0;
12746 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12747       return do_set_thread_area(cpu_env, arg1);
12748 #elif defined(TARGET_M68K)
12749       {
12750           TaskState *ts = get_task_state(cpu);
12751           ts->tp_value = arg1;
12752           return 0;
12753       }
12754 #else
12755       return -TARGET_ENOSYS;
12756 #endif
12757 #endif
12758 #ifdef TARGET_NR_get_thread_area
12759     case TARGET_NR_get_thread_area:
12760 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12761         return do_get_thread_area(cpu_env, arg1);
12762 #elif defined(TARGET_M68K)
12763         {
12764             TaskState *ts = get_task_state(cpu);
12765             return ts->tp_value;
12766         }
12767 #else
12768         return -TARGET_ENOSYS;
12769 #endif
12770 #endif
12771 #ifdef TARGET_NR_getdomainname
12772     case TARGET_NR_getdomainname:
12773         return -TARGET_ENOSYS;
12774 #endif
12775 
12776 #ifdef TARGET_NR_clock_settime
12777     case TARGET_NR_clock_settime:
12778     {
12779         struct timespec ts;
12780 
12781         ret = target_to_host_timespec(&ts, arg2);
12782         if (!is_error(ret)) {
12783             ret = get_errno(clock_settime(arg1, &ts));
12784         }
12785         return ret;
12786     }
12787 #endif
12788 #ifdef TARGET_NR_clock_settime64
12789     case TARGET_NR_clock_settime64:
12790     {
12791         struct timespec ts;
12792 
12793         ret = target_to_host_timespec64(&ts, arg2);
12794         if (!is_error(ret)) {
12795             ret = get_errno(clock_settime(arg1, &ts));
12796         }
12797         return ret;
12798     }
12799 #endif
12800 #ifdef TARGET_NR_clock_gettime
12801     case TARGET_NR_clock_gettime:
12802     {
12803         struct timespec ts;
12804         ret = get_errno(clock_gettime(arg1, &ts));
12805         if (!is_error(ret)) {
12806             ret = host_to_target_timespec(arg2, &ts);
12807         }
12808         return ret;
12809     }
12810 #endif
12811 #ifdef TARGET_NR_clock_gettime64
12812     case TARGET_NR_clock_gettime64:
12813     {
12814         struct timespec ts;
12815         ret = get_errno(clock_gettime(arg1, &ts));
12816         if (!is_error(ret)) {
12817             ret = host_to_target_timespec64(arg2, &ts);
12818         }
12819         return ret;
12820     }
12821 #endif
12822 #ifdef TARGET_NR_clock_getres
12823     case TARGET_NR_clock_getres:
12824     {
12825         struct timespec ts;
12826         ret = get_errno(clock_getres(arg1, &ts));
12827         if (!is_error(ret)) {
12828             host_to_target_timespec(arg2, &ts);
12829         }
12830         return ret;
12831     }
12832 #endif
12833 #ifdef TARGET_NR_clock_getres_time64
12834     case TARGET_NR_clock_getres_time64:
12835     {
12836         struct timespec ts;
12837         ret = get_errno(clock_getres(arg1, &ts));
12838         if (!is_error(ret)) {
12839             host_to_target_timespec64(arg2, &ts);
12840         }
12841         return ret;
12842     }
12843 #endif
12844 #ifdef TARGET_NR_clock_nanosleep
12845     case TARGET_NR_clock_nanosleep:
12846     {
12847         struct timespec ts;
12848         if (target_to_host_timespec(&ts, arg3)) {
12849             return -TARGET_EFAULT;
12850         }
12851         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12852                                              &ts, arg4 ? &ts : NULL));
12853         /*
12854          * if the call is interrupted by a signal handler, it fails
12855          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12856          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12857          */
12858         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12859             host_to_target_timespec(arg4, &ts)) {
12860               return -TARGET_EFAULT;
12861         }
12862 
12863         return ret;
12864     }
12865 #endif
12866 #ifdef TARGET_NR_clock_nanosleep_time64
12867     case TARGET_NR_clock_nanosleep_time64:
12868     {
12869         struct timespec ts;
12870 
12871         if (target_to_host_timespec64(&ts, arg3)) {
12872             return -TARGET_EFAULT;
12873         }
12874 
12875         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12876                                              &ts, arg4 ? &ts : NULL));
12877 
12878         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12879             host_to_target_timespec64(arg4, &ts)) {
12880             return -TARGET_EFAULT;
12881         }
12882         return ret;
12883     }
12884 #endif
12885 
12886 #if defined(TARGET_NR_set_tid_address)
12887     case TARGET_NR_set_tid_address:
12888     {
12889         TaskState *ts = get_task_state(cpu);
12890         ts->child_tidptr = arg1;
12891         /* do not call host set_tid_address() syscall, instead return tid() */
12892         return get_errno(sys_gettid());
12893     }
12894 #endif
12895 
12896     case TARGET_NR_tkill:
12897         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12898 
12899     case TARGET_NR_tgkill:
12900         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12901                          target_to_host_signal(arg3)));
12902 
12903 #ifdef TARGET_NR_set_robust_list
12904     case TARGET_NR_set_robust_list:
12905     case TARGET_NR_get_robust_list:
12906         /* The ABI for supporting robust futexes has userspace pass
12907          * the kernel a pointer to a linked list which is updated by
12908          * userspace after the syscall; the list is walked by the kernel
12909          * when the thread exits. Since the linked list in QEMU guest
12910          * memory isn't a valid linked list for the host and we have
12911          * no way to reliably intercept the thread-death event, we can't
12912          * support these. Silently return ENOSYS so that guest userspace
12913          * falls back to a non-robust futex implementation (which should
12914          * be OK except in the corner case of the guest crashing while
12915          * holding a mutex that is shared with another process via
12916          * shared memory).
12917          */
12918         return -TARGET_ENOSYS;
12919 #endif
12920 
12921 #if defined(TARGET_NR_utimensat)
12922     case TARGET_NR_utimensat:
12923         {
12924             struct timespec *tsp, ts[2];
12925             if (!arg3) {
12926                 tsp = NULL;
12927             } else {
12928                 if (target_to_host_timespec(ts, arg3)) {
12929                     return -TARGET_EFAULT;
12930                 }
12931                 if (target_to_host_timespec(ts + 1, arg3 +
12932                                             sizeof(struct target_timespec))) {
12933                     return -TARGET_EFAULT;
12934                 }
12935                 tsp = ts;
12936             }
12937             if (!arg2)
12938                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12939             else {
12940                 if (!(p = lock_user_string(arg2))) {
12941                     return -TARGET_EFAULT;
12942                 }
12943                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12944                 unlock_user(p, arg2, 0);
12945             }
12946         }
12947         return ret;
12948 #endif
12949 #ifdef TARGET_NR_utimensat_time64
12950     case TARGET_NR_utimensat_time64:
12951         {
12952             struct timespec *tsp, ts[2];
12953             if (!arg3) {
12954                 tsp = NULL;
12955             } else {
12956                 if (target_to_host_timespec64(ts, arg3)) {
12957                     return -TARGET_EFAULT;
12958                 }
12959                 if (target_to_host_timespec64(ts + 1, arg3 +
12960                                      sizeof(struct target__kernel_timespec))) {
12961                     return -TARGET_EFAULT;
12962                 }
12963                 tsp = ts;
12964             }
12965             if (!arg2)
12966                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12967             else {
12968                 p = lock_user_string(arg2);
12969                 if (!p) {
12970                     return -TARGET_EFAULT;
12971                 }
12972                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12973                 unlock_user(p, arg2, 0);
12974             }
12975         }
12976         return ret;
12977 #endif
12978 #ifdef TARGET_NR_futex
12979     case TARGET_NR_futex:
12980         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12981 #endif
12982 #ifdef TARGET_NR_futex_time64
12983     case TARGET_NR_futex_time64:
12984         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12985 #endif
12986 #ifdef CONFIG_INOTIFY
12987 #if defined(TARGET_NR_inotify_init)
12988     case TARGET_NR_inotify_init:
12989         ret = get_errno(inotify_init());
12990         if (ret >= 0) {
12991             fd_trans_register(ret, &target_inotify_trans);
12992         }
12993         return ret;
12994 #endif
12995 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12996     case TARGET_NR_inotify_init1:
12997         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12998                                           fcntl_flags_tbl)));
12999         if (ret >= 0) {
13000             fd_trans_register(ret, &target_inotify_trans);
13001         }
13002         return ret;
13003 #endif
13004 #if defined(TARGET_NR_inotify_add_watch)
13005     case TARGET_NR_inotify_add_watch:
13006         p = lock_user_string(arg2);
13007         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13008         unlock_user(p, arg2, 0);
13009         return ret;
13010 #endif
13011 #if defined(TARGET_NR_inotify_rm_watch)
13012     case TARGET_NR_inotify_rm_watch:
13013         return get_errno(inotify_rm_watch(arg1, arg2));
13014 #endif
13015 #endif
13016 
13017 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13018     case TARGET_NR_mq_open:
13019         {
13020             struct mq_attr posix_mq_attr;
13021             struct mq_attr *pposix_mq_attr;
13022             int host_flags;
13023 
13024             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13025             pposix_mq_attr = NULL;
13026             if (arg4) {
13027                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13028                     return -TARGET_EFAULT;
13029                 }
13030                 pposix_mq_attr = &posix_mq_attr;
13031             }
13032             p = lock_user_string(arg1 - 1);
13033             if (!p) {
13034                 return -TARGET_EFAULT;
13035             }
13036             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13037             unlock_user (p, arg1, 0);
13038         }
13039         return ret;
13040 
13041     case TARGET_NR_mq_unlink:
13042         p = lock_user_string(arg1 - 1);
13043         if (!p) {
13044             return -TARGET_EFAULT;
13045         }
13046         ret = get_errno(mq_unlink(p));
13047         unlock_user (p, arg1, 0);
13048         return ret;
13049 
13050 #ifdef TARGET_NR_mq_timedsend
13051     case TARGET_NR_mq_timedsend:
13052         {
13053             struct timespec ts;
13054 
13055             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13056             if (arg5 != 0) {
13057                 if (target_to_host_timespec(&ts, arg5)) {
13058                     return -TARGET_EFAULT;
13059                 }
13060                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13061                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13062                     return -TARGET_EFAULT;
13063                 }
13064             } else {
13065                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13066             }
13067             unlock_user (p, arg2, arg3);
13068         }
13069         return ret;
13070 #endif
13071 #ifdef TARGET_NR_mq_timedsend_time64
13072     case TARGET_NR_mq_timedsend_time64:
13073         {
13074             struct timespec ts;
13075 
13076             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13077             if (arg5 != 0) {
13078                 if (target_to_host_timespec64(&ts, arg5)) {
13079                     return -TARGET_EFAULT;
13080                 }
13081                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13082                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13083                     return -TARGET_EFAULT;
13084                 }
13085             } else {
13086                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13087             }
13088             unlock_user(p, arg2, arg3);
13089         }
13090         return ret;
13091 #endif
13092 
13093 #ifdef TARGET_NR_mq_timedreceive
13094     case TARGET_NR_mq_timedreceive:
13095         {
13096             struct timespec ts;
13097             unsigned int prio;
13098 
13099             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13100             if (arg5 != 0) {
13101                 if (target_to_host_timespec(&ts, arg5)) {
13102                     return -TARGET_EFAULT;
13103                 }
13104                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13105                                                      &prio, &ts));
13106                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13107                     return -TARGET_EFAULT;
13108                 }
13109             } else {
13110                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13111                                                      &prio, NULL));
13112             }
13113             unlock_user (p, arg2, arg3);
13114             if (arg4 != 0)
13115                 put_user_u32(prio, arg4);
13116         }
13117         return ret;
13118 #endif
13119 #ifdef TARGET_NR_mq_timedreceive_time64
13120     case TARGET_NR_mq_timedreceive_time64:
13121         {
13122             struct timespec ts;
13123             unsigned int prio;
13124 
13125             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13126             if (arg5 != 0) {
13127                 if (target_to_host_timespec64(&ts, arg5)) {
13128                     return -TARGET_EFAULT;
13129                 }
13130                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13131                                                      &prio, &ts));
13132                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13133                     return -TARGET_EFAULT;
13134                 }
13135             } else {
13136                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13137                                                      &prio, NULL));
13138             }
13139             unlock_user(p, arg2, arg3);
13140             if (arg4 != 0) {
13141                 put_user_u32(prio, arg4);
13142             }
13143         }
13144         return ret;
13145 #endif
13146 
13147     /* Not implemented for now... */
13148 /*     case TARGET_NR_mq_notify: */
13149 /*         break; */
13150 
13151     case TARGET_NR_mq_getsetattr:
13152         {
13153             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13154             ret = 0;
13155             if (arg2 != 0) {
13156                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13157                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13158                                            &posix_mq_attr_out));
13159             } else if (arg3 != 0) {
13160                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13161             }
13162             if (ret == 0 && arg3 != 0) {
13163                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13164             }
13165         }
13166         return ret;
13167 #endif
13168 
13169 #ifdef CONFIG_SPLICE
13170 #ifdef TARGET_NR_tee
13171     case TARGET_NR_tee:
13172         {
13173             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13174         }
13175         return ret;
13176 #endif
13177 #ifdef TARGET_NR_splice
13178     case TARGET_NR_splice:
13179         {
13180             loff_t loff_in, loff_out;
13181             loff_t *ploff_in = NULL, *ploff_out = NULL;
13182             if (arg2) {
13183                 if (get_user_u64(loff_in, arg2)) {
13184                     return -TARGET_EFAULT;
13185                 }
13186                 ploff_in = &loff_in;
13187             }
13188             if (arg4) {
13189                 if (get_user_u64(loff_out, arg4)) {
13190                     return -TARGET_EFAULT;
13191                 }
13192                 ploff_out = &loff_out;
13193             }
13194             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13195             if (arg2) {
13196                 if (put_user_u64(loff_in, arg2)) {
13197                     return -TARGET_EFAULT;
13198                 }
13199             }
13200             if (arg4) {
13201                 if (put_user_u64(loff_out, arg4)) {
13202                     return -TARGET_EFAULT;
13203                 }
13204             }
13205         }
13206         return ret;
13207 #endif
13208 #ifdef TARGET_NR_vmsplice
13209 	case TARGET_NR_vmsplice:
13210         {
13211             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13212             if (vec != NULL) {
13213                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13214                 unlock_iovec(vec, arg2, arg3, 0);
13215             } else {
13216                 ret = -host_to_target_errno(errno);
13217             }
13218         }
13219         return ret;
13220 #endif
13221 #endif /* CONFIG_SPLICE */
13222 #ifdef CONFIG_EVENTFD
13223 #if defined(TARGET_NR_eventfd)
13224     case TARGET_NR_eventfd:
13225         ret = get_errno(eventfd(arg1, 0));
13226         if (ret >= 0) {
13227             fd_trans_register(ret, &target_eventfd_trans);
13228         }
13229         return ret;
13230 #endif
13231 #if defined(TARGET_NR_eventfd2)
13232     case TARGET_NR_eventfd2:
13233     {
13234         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13235         if (arg2 & TARGET_O_NONBLOCK) {
13236             host_flags |= O_NONBLOCK;
13237         }
13238         if (arg2 & TARGET_O_CLOEXEC) {
13239             host_flags |= O_CLOEXEC;
13240         }
13241         ret = get_errno(eventfd(arg1, host_flags));
13242         if (ret >= 0) {
13243             fd_trans_register(ret, &target_eventfd_trans);
13244         }
13245         return ret;
13246     }
13247 #endif
13248 #endif /* CONFIG_EVENTFD  */
13249 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13250     case TARGET_NR_fallocate:
13251 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13252         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13253                                   target_offset64(arg5, arg6)));
13254 #else
13255         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13256 #endif
13257         return ret;
13258 #endif
13259 #if defined(CONFIG_SYNC_FILE_RANGE)
13260 #if defined(TARGET_NR_sync_file_range)
13261     case TARGET_NR_sync_file_range:
13262 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13263 #if defined(TARGET_MIPS)
13264         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13265                                         target_offset64(arg5, arg6), arg7));
13266 #else
13267         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13268                                         target_offset64(arg4, arg5), arg6));
13269 #endif /* !TARGET_MIPS */
13270 #else
13271         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13272 #endif
13273         return ret;
13274 #endif
13275 #if defined(TARGET_NR_sync_file_range2) || \
13276     defined(TARGET_NR_arm_sync_file_range)
13277 #if defined(TARGET_NR_sync_file_range2)
13278     case TARGET_NR_sync_file_range2:
13279 #endif
13280 #if defined(TARGET_NR_arm_sync_file_range)
13281     case TARGET_NR_arm_sync_file_range:
13282 #endif
13283         /* This is like sync_file_range but the arguments are reordered */
13284 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13285         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13286                                         target_offset64(arg5, arg6), arg2));
13287 #else
13288         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13289 #endif
13290         return ret;
13291 #endif
13292 #endif
13293 #if defined(TARGET_NR_signalfd4)
13294     case TARGET_NR_signalfd4:
13295         return do_signalfd4(arg1, arg2, arg4);
13296 #endif
13297 #if defined(TARGET_NR_signalfd)
13298     case TARGET_NR_signalfd:
13299         return do_signalfd4(arg1, arg2, 0);
13300 #endif
13301 #if defined(CONFIG_EPOLL)
13302 #if defined(TARGET_NR_epoll_create)
13303     case TARGET_NR_epoll_create:
13304         return get_errno(epoll_create(arg1));
13305 #endif
13306 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13307     case TARGET_NR_epoll_create1:
13308         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13309 #endif
13310 #if defined(TARGET_NR_epoll_ctl)
13311     case TARGET_NR_epoll_ctl:
13312     {
13313         struct epoll_event ep;
13314         struct epoll_event *epp = 0;
13315         if (arg4) {
13316             if (arg2 != EPOLL_CTL_DEL) {
13317                 struct target_epoll_event *target_ep;
13318                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13319                     return -TARGET_EFAULT;
13320                 }
13321                 ep.events = tswap32(target_ep->events);
13322                 /*
13323                  * The epoll_data_t union is just opaque data to the kernel,
13324                  * so we transfer all 64 bits across and need not worry what
13325                  * actual data type it is.
13326                  */
13327                 ep.data.u64 = tswap64(target_ep->data.u64);
13328                 unlock_user_struct(target_ep, arg4, 0);
13329             }
13330             /*
13331              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13332              * non-null pointer, even though this argument is ignored.
13333              *
13334              */
13335             epp = &ep;
13336         }
13337         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13338     }
13339 #endif
13340 
13341 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13342 #if defined(TARGET_NR_epoll_wait)
13343     case TARGET_NR_epoll_wait:
13344 #endif
13345 #if defined(TARGET_NR_epoll_pwait)
13346     case TARGET_NR_epoll_pwait:
13347 #endif
13348     {
13349         struct target_epoll_event *target_ep;
13350         struct epoll_event *ep;
13351         int epfd = arg1;
13352         int maxevents = arg3;
13353         int timeout = arg4;
13354 
13355         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13356             return -TARGET_EINVAL;
13357         }
13358 
13359         target_ep = lock_user(VERIFY_WRITE, arg2,
13360                               maxevents * sizeof(struct target_epoll_event), 1);
13361         if (!target_ep) {
13362             return -TARGET_EFAULT;
13363         }
13364 
13365         ep = g_try_new(struct epoll_event, maxevents);
13366         if (!ep) {
13367             unlock_user(target_ep, arg2, 0);
13368             return -TARGET_ENOMEM;
13369         }
13370 
13371         switch (num) {
13372 #if defined(TARGET_NR_epoll_pwait)
13373         case TARGET_NR_epoll_pwait:
13374         {
13375             sigset_t *set = NULL;
13376 
13377             if (arg5) {
13378                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13379                 if (ret != 0) {
13380                     break;
13381                 }
13382             }
13383 
13384             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13385                                              set, SIGSET_T_SIZE));
13386 
13387             if (set) {
13388                 finish_sigsuspend_mask(ret);
13389             }
13390             break;
13391         }
13392 #endif
13393 #if defined(TARGET_NR_epoll_wait)
13394         case TARGET_NR_epoll_wait:
13395             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13396                                              NULL, 0));
13397             break;
13398 #endif
13399         default:
13400             ret = -TARGET_ENOSYS;
13401         }
13402         if (!is_error(ret)) {
13403             int i;
13404             for (i = 0; i < ret; i++) {
13405                 target_ep[i].events = tswap32(ep[i].events);
13406                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13407             }
13408             unlock_user(target_ep, arg2,
13409                         ret * sizeof(struct target_epoll_event));
13410         } else {
13411             unlock_user(target_ep, arg2, 0);
13412         }
13413         g_free(ep);
13414         return ret;
13415     }
13416 #endif
13417 #endif
13418 #ifdef TARGET_NR_prlimit64
13419     case TARGET_NR_prlimit64:
13420     {
13421         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13422         struct target_rlimit64 *target_rnew, *target_rold;
13423         struct host_rlimit64 rnew, rold, *rnewp = 0;
13424         int resource = target_to_host_resource(arg2);
13425 
13426         if (arg3 && (resource != RLIMIT_AS &&
13427                      resource != RLIMIT_DATA &&
13428                      resource != RLIMIT_STACK)) {
13429             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13430                 return -TARGET_EFAULT;
13431             }
13432             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13433             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13434             unlock_user_struct(target_rnew, arg3, 0);
13435             rnewp = &rnew;
13436         }
13437 
13438         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13439         if (!is_error(ret) && arg4) {
13440             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13441                 return -TARGET_EFAULT;
13442             }
13443             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13444             __put_user(rold.rlim_max, &target_rold->rlim_max);
13445             unlock_user_struct(target_rold, arg4, 1);
13446         }
13447         return ret;
13448     }
13449 #endif
13450 #ifdef TARGET_NR_gethostname
13451     case TARGET_NR_gethostname:
13452     {
13453         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13454         if (name) {
13455             ret = get_errno(gethostname(name, arg2));
13456             unlock_user(name, arg1, arg2);
13457         } else {
13458             ret = -TARGET_EFAULT;
13459         }
13460         return ret;
13461     }
13462 #endif
13463 #ifdef TARGET_NR_atomic_cmpxchg_32
13464     case TARGET_NR_atomic_cmpxchg_32:
13465     {
13466         /* should use start_exclusive from main.c */
13467         abi_ulong mem_value;
13468         if (get_user_u32(mem_value, arg6)) {
13469             target_siginfo_t info;
13470             info.si_signo = SIGSEGV;
13471             info.si_errno = 0;
13472             info.si_code = TARGET_SEGV_MAPERR;
13473             info._sifields._sigfault._addr = arg6;
13474             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13475             ret = 0xdeadbeef;
13476 
13477         }
13478         if (mem_value == arg2)
13479             put_user_u32(arg1, arg6);
13480         return mem_value;
13481     }
13482 #endif
13483 #ifdef TARGET_NR_atomic_barrier
13484     case TARGET_NR_atomic_barrier:
13485         /* Like the kernel implementation and the
13486            qemu arm barrier, no-op this? */
13487         return 0;
13488 #endif
13489 
13490 #ifdef TARGET_NR_timer_create
13491     case TARGET_NR_timer_create:
13492     {
13493         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13494 
13495         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13496 
13497         int clkid = arg1;
13498         int timer_index = next_free_host_timer();
13499 
13500         if (timer_index < 0) {
13501             ret = -TARGET_EAGAIN;
13502         } else {
13503             timer_t *phtimer = g_posix_timers  + timer_index;
13504 
13505             if (arg2) {
13506                 phost_sevp = &host_sevp;
13507                 ret = target_to_host_sigevent(phost_sevp, arg2);
13508                 if (ret != 0) {
13509                     free_host_timer_slot(timer_index);
13510                     return ret;
13511                 }
13512             }
13513 
13514             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13515             if (ret) {
13516                 free_host_timer_slot(timer_index);
13517             } else {
13518                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13519                     timer_delete(*phtimer);
13520                     free_host_timer_slot(timer_index);
13521                     return -TARGET_EFAULT;
13522                 }
13523             }
13524         }
13525         return ret;
13526     }
13527 #endif
13528 
13529 #ifdef TARGET_NR_timer_settime
13530     case TARGET_NR_timer_settime:
13531     {
13532         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13533          * struct itimerspec * old_value */
13534         target_timer_t timerid = get_timer_id(arg1);
13535 
13536         if (timerid < 0) {
13537             ret = timerid;
13538         } else if (arg3 == 0) {
13539             ret = -TARGET_EINVAL;
13540         } else {
13541             timer_t htimer = g_posix_timers[timerid];
13542             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13543 
13544             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13545                 return -TARGET_EFAULT;
13546             }
13547             ret = get_errno(
13548                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13549             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13550                 return -TARGET_EFAULT;
13551             }
13552         }
13553         return ret;
13554     }
13555 #endif
13556 
13557 #ifdef TARGET_NR_timer_settime64
13558     case TARGET_NR_timer_settime64:
13559     {
13560         target_timer_t timerid = get_timer_id(arg1);
13561 
13562         if (timerid < 0) {
13563             ret = timerid;
13564         } else if (arg3 == 0) {
13565             ret = -TARGET_EINVAL;
13566         } else {
13567             timer_t htimer = g_posix_timers[timerid];
13568             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13569 
13570             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13571                 return -TARGET_EFAULT;
13572             }
13573             ret = get_errno(
13574                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13575             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13576                 return -TARGET_EFAULT;
13577             }
13578         }
13579         return ret;
13580     }
13581 #endif
13582 
13583 #ifdef TARGET_NR_timer_gettime
13584     case TARGET_NR_timer_gettime:
13585     {
13586         /* args: timer_t timerid, struct itimerspec *curr_value */
13587         target_timer_t timerid = get_timer_id(arg1);
13588 
13589         if (timerid < 0) {
13590             ret = timerid;
13591         } else if (!arg2) {
13592             ret = -TARGET_EFAULT;
13593         } else {
13594             timer_t htimer = g_posix_timers[timerid];
13595             struct itimerspec hspec;
13596             ret = get_errno(timer_gettime(htimer, &hspec));
13597 
13598             if (host_to_target_itimerspec(arg2, &hspec)) {
13599                 ret = -TARGET_EFAULT;
13600             }
13601         }
13602         return ret;
13603     }
13604 #endif
13605 
13606 #ifdef TARGET_NR_timer_gettime64
13607     case TARGET_NR_timer_gettime64:
13608     {
13609         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13610         target_timer_t timerid = get_timer_id(arg1);
13611 
13612         if (timerid < 0) {
13613             ret = timerid;
13614         } else if (!arg2) {
13615             ret = -TARGET_EFAULT;
13616         } else {
13617             timer_t htimer = g_posix_timers[timerid];
13618             struct itimerspec hspec;
13619             ret = get_errno(timer_gettime(htimer, &hspec));
13620 
13621             if (host_to_target_itimerspec64(arg2, &hspec)) {
13622                 ret = -TARGET_EFAULT;
13623             }
13624         }
13625         return ret;
13626     }
13627 #endif
13628 
13629 #ifdef TARGET_NR_timer_getoverrun
13630     case TARGET_NR_timer_getoverrun:
13631     {
13632         /* args: timer_t timerid */
13633         target_timer_t timerid = get_timer_id(arg1);
13634 
13635         if (timerid < 0) {
13636             ret = timerid;
13637         } else {
13638             timer_t htimer = g_posix_timers[timerid];
13639             ret = get_errno(timer_getoverrun(htimer));
13640         }
13641         return ret;
13642     }
13643 #endif
13644 
13645 #ifdef TARGET_NR_timer_delete
13646     case TARGET_NR_timer_delete:
13647     {
13648         /* args: timer_t timerid */
13649         target_timer_t timerid = get_timer_id(arg1);
13650 
13651         if (timerid < 0) {
13652             ret = timerid;
13653         } else {
13654             timer_t htimer = g_posix_timers[timerid];
13655             ret = get_errno(timer_delete(htimer));
13656             free_host_timer_slot(timerid);
13657         }
13658         return ret;
13659     }
13660 #endif
13661 
13662 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13663     case TARGET_NR_timerfd_create:
13664         ret = get_errno(timerfd_create(arg1,
13665                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13666         if (ret >= 0) {
13667             fd_trans_register(ret, &target_timerfd_trans);
13668         }
13669         return ret;
13670 #endif
13671 
13672 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13673     case TARGET_NR_timerfd_gettime:
13674         {
13675             struct itimerspec its_curr;
13676 
13677             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13678 
13679             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13680                 return -TARGET_EFAULT;
13681             }
13682         }
13683         return ret;
13684 #endif
13685 
13686 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13687     case TARGET_NR_timerfd_gettime64:
13688         {
13689             struct itimerspec its_curr;
13690 
13691             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13692 
13693             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13694                 return -TARGET_EFAULT;
13695             }
13696         }
13697         return ret;
13698 #endif
13699 
13700 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13701     case TARGET_NR_timerfd_settime:
13702         {
13703             struct itimerspec its_new, its_old, *p_new;
13704 
13705             if (arg3) {
13706                 if (target_to_host_itimerspec(&its_new, arg3)) {
13707                     return -TARGET_EFAULT;
13708                 }
13709                 p_new = &its_new;
13710             } else {
13711                 p_new = NULL;
13712             }
13713 
13714             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13715 
13716             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13717                 return -TARGET_EFAULT;
13718             }
13719         }
13720         return ret;
13721 #endif
13722 
13723 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13724     case TARGET_NR_timerfd_settime64:
13725         {
13726             struct itimerspec its_new, its_old, *p_new;
13727 
13728             if (arg3) {
13729                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13730                     return -TARGET_EFAULT;
13731                 }
13732                 p_new = &its_new;
13733             } else {
13734                 p_new = NULL;
13735             }
13736 
13737             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13738 
13739             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13740                 return -TARGET_EFAULT;
13741             }
13742         }
13743         return ret;
13744 #endif
13745 
13746 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13747     case TARGET_NR_ioprio_get:
13748         return get_errno(ioprio_get(arg1, arg2));
13749 #endif
13750 
13751 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13752     case TARGET_NR_ioprio_set:
13753         return get_errno(ioprio_set(arg1, arg2, arg3));
13754 #endif
13755 
13756 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13757     case TARGET_NR_setns:
13758         return get_errno(setns(arg1, arg2));
13759 #endif
13760 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13761     case TARGET_NR_unshare:
13762         return get_errno(unshare(arg1));
13763 #endif
13764 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13765     case TARGET_NR_kcmp:
13766         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13767 #endif
13768 #ifdef TARGET_NR_swapcontext
13769     case TARGET_NR_swapcontext:
13770         /* PowerPC specific.  */
13771         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13772 #endif
13773 #ifdef TARGET_NR_memfd_create
13774     case TARGET_NR_memfd_create:
13775         p = lock_user_string(arg1);
13776         if (!p) {
13777             return -TARGET_EFAULT;
13778         }
13779         ret = get_errno(memfd_create(p, arg2));
13780         fd_trans_unregister(ret);
13781         unlock_user(p, arg1, 0);
13782         return ret;
13783 #endif
13784 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13785     case TARGET_NR_membarrier:
13786         return get_errno(membarrier(arg1, arg2));
13787 #endif
13788 
13789 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13790     case TARGET_NR_copy_file_range:
13791         {
13792             loff_t inoff, outoff;
13793             loff_t *pinoff = NULL, *poutoff = NULL;
13794 
13795             if (arg2) {
13796                 if (get_user_u64(inoff, arg2)) {
13797                     return -TARGET_EFAULT;
13798                 }
13799                 pinoff = &inoff;
13800             }
13801             if (arg4) {
13802                 if (get_user_u64(outoff, arg4)) {
13803                     return -TARGET_EFAULT;
13804                 }
13805                 poutoff = &outoff;
13806             }
13807             /* Do not sign-extend the count parameter. */
13808             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13809                                                  (abi_ulong)arg5, arg6));
13810             if (!is_error(ret) && ret > 0) {
13811                 if (arg2) {
13812                     if (put_user_u64(inoff, arg2)) {
13813                         return -TARGET_EFAULT;
13814                     }
13815                 }
13816                 if (arg4) {
13817                     if (put_user_u64(outoff, arg4)) {
13818                         return -TARGET_EFAULT;
13819                     }
13820                 }
13821             }
13822         }
13823         return ret;
13824 #endif
13825 
13826 #if defined(TARGET_NR_pivot_root)
13827     case TARGET_NR_pivot_root:
13828         {
13829             void *p2;
13830             p = lock_user_string(arg1); /* new_root */
13831             p2 = lock_user_string(arg2); /* put_old */
13832             if (!p || !p2) {
13833                 ret = -TARGET_EFAULT;
13834             } else {
13835                 ret = get_errno(pivot_root(p, p2));
13836             }
13837             unlock_user(p2, arg2, 0);
13838             unlock_user(p, arg1, 0);
13839         }
13840         return ret;
13841 #endif
13842 
13843 #if defined(TARGET_NR_riscv_hwprobe)
13844     case TARGET_NR_riscv_hwprobe:
13845         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13846 #endif
13847 
13848     default:
13849         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13850         return -TARGET_ENOSYS;
13851     }
13852     return ret;
13853 }
13854 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13855 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13856                     abi_long arg2, abi_long arg3, abi_long arg4,
13857                     abi_long arg5, abi_long arg6, abi_long arg7,
13858                     abi_long arg8)
13859 {
13860     CPUState *cpu = env_cpu(cpu_env);
13861     abi_long ret;
13862 
13863 #ifdef DEBUG_ERESTARTSYS
13864     /* Debug-only code for exercising the syscall-restart code paths
13865      * in the per-architecture cpu main loops: restart every syscall
13866      * the guest makes once before letting it through.
13867      */
13868     {
13869         static bool flag;
13870         flag = !flag;
13871         if (flag) {
13872             return -QEMU_ERESTARTSYS;
13873         }
13874     }
13875 #endif
13876 
13877     record_syscall_start(cpu, num, arg1,
13878                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13879 
13880     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13881         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13882     }
13883 
13884     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13885                       arg5, arg6, arg7, arg8);
13886 
13887     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13888         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13889                           arg3, arg4, arg5, arg6);
13890     }
13891 
13892     record_syscall_return(cpu, num, ret);
13893     return ret;
13894 }
13895