xref: /openbmc/qemu/linux-user/syscall.c (revision adaec191)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168  * once. This exercises the codepaths for restart.
169  */
170 //#define DEBUG_ERESTARTSYS
171 
172 //#include <linux/msdos_fs.h>
173 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
174 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
175 
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183 
184 #define _syscall0(type,name)		\
185 static type name (void)			\
186 {					\
187 	return syscall(__NR_##name);	\
188 }
189 
190 #define _syscall1(type,name,type1,arg1)		\
191 static type name (type1 arg1)			\
192 {						\
193 	return syscall(__NR_##name, arg1);	\
194 }
195 
196 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
197 static type name (type1 arg1,type2 arg2)		\
198 {							\
199 	return syscall(__NR_##name, arg1, arg2);	\
200 }
201 
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
203 static type name (type1 arg1,type2 arg2,type3 arg3)		\
204 {								\
205 	return syscall(__NR_##name, arg1, arg2, arg3);		\
206 }
207 
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
210 {										\
211 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
212 }
213 
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
215 		  type5,arg5)							\
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
217 {										\
218 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
219 }
220 
221 
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5,type6,arg6)					\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
225                   type6 arg6)							\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
228 }
229 
230 
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247 
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252 
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257    errno. */
258 static int gettid(void) {
259     return -ENOSYS;
260 }
261 #endif
262 
263 /* For the 64-bit guest on 32-bit host case we must emulate
264  * getdents using getdents64, because otherwise the host
265  * might hand us back more dirent records than we can fit
266  * into the guest buffer after structure format conversion.
267  * Otherwise we emulate getdents with getdents if the host has it.
268  */
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
272 
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
283           loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287           siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297           const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308           void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310           struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312           struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
322 
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325           unsigned long, idx1, unsigned long, idx2)
326 #endif
327 
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
330   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
331   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
332   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
333   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
334   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
335   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
336   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
337   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
338   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
339   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
340   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
342 #if defined(O_DIRECT)
343   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
344 #endif
345 #if defined(O_NOATIME)
346   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
347 #endif
348 #if defined(O_CLOEXEC)
349   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
350 #endif
351 #if defined(O_PATH)
352   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
353 #endif
354 #if defined(O_TMPFILE)
355   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
356 #endif
357   /* Don't terminate the list prematurely on 64-bit host+guest.  */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361   { 0, 0, 0, 0 }
362 };
363 
364 enum {
365     QEMU_IFLA_BR_UNSPEC,
366     QEMU_IFLA_BR_FORWARD_DELAY,
367     QEMU_IFLA_BR_HELLO_TIME,
368     QEMU_IFLA_BR_MAX_AGE,
369     QEMU_IFLA_BR_AGEING_TIME,
370     QEMU_IFLA_BR_STP_STATE,
371     QEMU_IFLA_BR_PRIORITY,
372     QEMU_IFLA_BR_VLAN_FILTERING,
373     QEMU_IFLA_BR_VLAN_PROTOCOL,
374     QEMU_IFLA_BR_GROUP_FWD_MASK,
375     QEMU_IFLA_BR_ROOT_ID,
376     QEMU_IFLA_BR_BRIDGE_ID,
377     QEMU_IFLA_BR_ROOT_PORT,
378     QEMU_IFLA_BR_ROOT_PATH_COST,
379     QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380     QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381     QEMU_IFLA_BR_HELLO_TIMER,
382     QEMU_IFLA_BR_TCN_TIMER,
383     QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384     QEMU_IFLA_BR_GC_TIMER,
385     QEMU_IFLA_BR_GROUP_ADDR,
386     QEMU_IFLA_BR_FDB_FLUSH,
387     QEMU_IFLA_BR_MCAST_ROUTER,
388     QEMU_IFLA_BR_MCAST_SNOOPING,
389     QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390     QEMU_IFLA_BR_MCAST_QUERIER,
391     QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392     QEMU_IFLA_BR_MCAST_HASH_MAX,
393     QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395     QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396     QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397     QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398     QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399     QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401     QEMU_IFLA_BR_NF_CALL_IPTABLES,
402     QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403     QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404     QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405     QEMU_IFLA_BR_PAD,
406     QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407     QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408     QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409     QEMU_IFLA_BR_MCAST_MLD_VERSION,
410     QEMU___IFLA_BR_MAX,
411 };
412 
413 enum {
414     QEMU_IFLA_UNSPEC,
415     QEMU_IFLA_ADDRESS,
416     QEMU_IFLA_BROADCAST,
417     QEMU_IFLA_IFNAME,
418     QEMU_IFLA_MTU,
419     QEMU_IFLA_LINK,
420     QEMU_IFLA_QDISC,
421     QEMU_IFLA_STATS,
422     QEMU_IFLA_COST,
423     QEMU_IFLA_PRIORITY,
424     QEMU_IFLA_MASTER,
425     QEMU_IFLA_WIRELESS,
426     QEMU_IFLA_PROTINFO,
427     QEMU_IFLA_TXQLEN,
428     QEMU_IFLA_MAP,
429     QEMU_IFLA_WEIGHT,
430     QEMU_IFLA_OPERSTATE,
431     QEMU_IFLA_LINKMODE,
432     QEMU_IFLA_LINKINFO,
433     QEMU_IFLA_NET_NS_PID,
434     QEMU_IFLA_IFALIAS,
435     QEMU_IFLA_NUM_VF,
436     QEMU_IFLA_VFINFO_LIST,
437     QEMU_IFLA_STATS64,
438     QEMU_IFLA_VF_PORTS,
439     QEMU_IFLA_PORT_SELF,
440     QEMU_IFLA_AF_SPEC,
441     QEMU_IFLA_GROUP,
442     QEMU_IFLA_NET_NS_FD,
443     QEMU_IFLA_EXT_MASK,
444     QEMU_IFLA_PROMISCUITY,
445     QEMU_IFLA_NUM_TX_QUEUES,
446     QEMU_IFLA_NUM_RX_QUEUES,
447     QEMU_IFLA_CARRIER,
448     QEMU_IFLA_PHYS_PORT_ID,
449     QEMU_IFLA_CARRIER_CHANGES,
450     QEMU_IFLA_PHYS_SWITCH_ID,
451     QEMU_IFLA_LINK_NETNSID,
452     QEMU_IFLA_PHYS_PORT_NAME,
453     QEMU_IFLA_PROTO_DOWN,
454     QEMU_IFLA_GSO_MAX_SEGS,
455     QEMU_IFLA_GSO_MAX_SIZE,
456     QEMU_IFLA_PAD,
457     QEMU_IFLA_XDP,
458     QEMU_IFLA_EVENT,
459     QEMU_IFLA_NEW_NETNSID,
460     QEMU_IFLA_IF_NETNSID,
461     QEMU_IFLA_CARRIER_UP_COUNT,
462     QEMU_IFLA_CARRIER_DOWN_COUNT,
463     QEMU_IFLA_NEW_IFINDEX,
464     QEMU___IFLA_MAX
465 };
466 
467 enum {
468     QEMU_IFLA_BRPORT_UNSPEC,
469     QEMU_IFLA_BRPORT_STATE,
470     QEMU_IFLA_BRPORT_PRIORITY,
471     QEMU_IFLA_BRPORT_COST,
472     QEMU_IFLA_BRPORT_MODE,
473     QEMU_IFLA_BRPORT_GUARD,
474     QEMU_IFLA_BRPORT_PROTECT,
475     QEMU_IFLA_BRPORT_FAST_LEAVE,
476     QEMU_IFLA_BRPORT_LEARNING,
477     QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478     QEMU_IFLA_BRPORT_PROXYARP,
479     QEMU_IFLA_BRPORT_LEARNING_SYNC,
480     QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481     QEMU_IFLA_BRPORT_ROOT_ID,
482     QEMU_IFLA_BRPORT_BRIDGE_ID,
483     QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484     QEMU_IFLA_BRPORT_DESIGNATED_COST,
485     QEMU_IFLA_BRPORT_ID,
486     QEMU_IFLA_BRPORT_NO,
487     QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488     QEMU_IFLA_BRPORT_CONFIG_PENDING,
489     QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490     QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491     QEMU_IFLA_BRPORT_HOLD_TIMER,
492     QEMU_IFLA_BRPORT_FLUSH,
493     QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494     QEMU_IFLA_BRPORT_PAD,
495     QEMU_IFLA_BRPORT_MCAST_FLOOD,
496     QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497     QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498     QEMU_IFLA_BRPORT_BCAST_FLOOD,
499     QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500     QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501     QEMU___IFLA_BRPORT_MAX
502 };
503 
504 enum {
505     QEMU_IFLA_INFO_UNSPEC,
506     QEMU_IFLA_INFO_KIND,
507     QEMU_IFLA_INFO_DATA,
508     QEMU_IFLA_INFO_XSTATS,
509     QEMU_IFLA_INFO_SLAVE_KIND,
510     QEMU_IFLA_INFO_SLAVE_DATA,
511     QEMU___IFLA_INFO_MAX,
512 };
513 
514 enum {
515     QEMU_IFLA_INET_UNSPEC,
516     QEMU_IFLA_INET_CONF,
517     QEMU___IFLA_INET_MAX,
518 };
519 
520 enum {
521     QEMU_IFLA_INET6_UNSPEC,
522     QEMU_IFLA_INET6_FLAGS,
523     QEMU_IFLA_INET6_CONF,
524     QEMU_IFLA_INET6_STATS,
525     QEMU_IFLA_INET6_MCAST,
526     QEMU_IFLA_INET6_CACHEINFO,
527     QEMU_IFLA_INET6_ICMP6STATS,
528     QEMU_IFLA_INET6_TOKEN,
529     QEMU_IFLA_INET6_ADDR_GEN_MODE,
530     QEMU___IFLA_INET6_MAX
531 };
532 
533 enum {
534     QEMU_IFLA_XDP_UNSPEC,
535     QEMU_IFLA_XDP_FD,
536     QEMU_IFLA_XDP_ATTACHED,
537     QEMU_IFLA_XDP_FLAGS,
538     QEMU_IFLA_XDP_PROG_ID,
539     QEMU___IFLA_XDP_MAX,
540 };
541 
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545     TargetFdDataFunc host_to_target_data;
546     TargetFdDataFunc target_to_host_data;
547     TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
549 
550 static TargetFdTrans **target_fd_trans;
551 
552 static unsigned int target_fd_max;
553 
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
555 {
556     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557         return target_fd_trans[fd]->target_to_host_data;
558     }
559     return NULL;
560 }
561 
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
563 {
564     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565         return target_fd_trans[fd]->host_to_target_data;
566     }
567     return NULL;
568 }
569 
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
571 {
572     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573         return target_fd_trans[fd]->target_to_host_addr;
574     }
575     return NULL;
576 }
577 
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
579 {
580     unsigned int oldmax;
581 
582     if (fd >= target_fd_max) {
583         oldmax = target_fd_max;
584         target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585         target_fd_trans = g_renew(TargetFdTrans *,
586                                   target_fd_trans, target_fd_max);
587         memset((void *)(target_fd_trans + oldmax), 0,
588                (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
589     }
590     target_fd_trans[fd] = trans;
591 }
592 
593 static void fd_trans_unregister(int fd)
594 {
595     if (fd >= 0 && fd < target_fd_max) {
596         target_fd_trans[fd] = NULL;
597     }
598 }
599 
600 static void fd_trans_dup(int oldfd, int newfd)
601 {
602     fd_trans_unregister(newfd);
603     if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604         fd_trans_register(newfd, target_fd_trans[oldfd]);
605     }
606 }
607 
608 static int sys_getcwd1(char *buf, size_t size)
609 {
610   if (getcwd(buf, size) == NULL) {
611       /* getcwd() sets errno */
612       return (-1);
613   }
614   return strlen(buf)+1;
615 }
616 
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621           const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624                          const struct timespec times[2], int flags)
625 {
626     errno = ENOSYS;
627     return -1;
628 }
629 #endif
630 #endif /* TARGET_NR_utimensat */
631 
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636           const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639                          int newfd, const char *new, int flags)
640 {
641     if (flags == 0) {
642         return renameat(oldfd, old, newfd, new);
643     }
644     errno = ENOSYS;
645     return -1;
646 }
647 #endif
648 #endif /* TARGET_NR_renameat2 */
649 
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
652 
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
655 {
656   return (inotify_init());
657 }
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
661 {
662   return (inotify_add_watch(fd, pathname, mask));
663 }
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
667 {
668   return (inotify_rm_watch(fd, wd));
669 }
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
674 {
675   return (inotify_init1(flags));
676 }
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY  */
686 
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694     uint64_t rlim_cur;
695     uint64_t rlim_max;
696 };
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698           const struct host_rlimit64 *, new_limit,
699           struct host_rlimit64 *, old_limit)
700 #endif
701 
702 
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
706 
707 static inline int next_free_host_timer(void)
708 {
709     int k ;
710     /* FIXME: Does finding the next free slot require a lock? */
711     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712         if (g_posix_timers[k] == 0) {
713             g_posix_timers[k] = (timer_t) 1;
714             return k;
715         }
716     }
717     return -1;
718 }
719 #endif
720 
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
724 {
725     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
726 }
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731  * of registers which translates to the same as ARM/MIPS, because we start with
732  * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
737 {
738     switch (num) {
739     case TARGET_NR_pread64:
740     case TARGET_NR_pwrite64:
741         return 1;
742 
743     default:
744         return 0;
745     }
746 }
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
752 
753 #define ERRNO_TABLE_SIZE 1200
754 
755 /* target_to_host_errno_table[] is initialized from
756  * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
758 };
759 
760 /*
761  * This list is the union of errno values overridden in asm-<arch>/errno.h
762  * minus the errnos that are not actually generic to all archs.
763  */
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765     [EAGAIN]		= TARGET_EAGAIN,
766     [EIDRM]		= TARGET_EIDRM,
767     [ECHRNG]		= TARGET_ECHRNG,
768     [EL2NSYNC]		= TARGET_EL2NSYNC,
769     [EL3HLT]		= TARGET_EL3HLT,
770     [EL3RST]		= TARGET_EL3RST,
771     [ELNRNG]		= TARGET_ELNRNG,
772     [EUNATCH]		= TARGET_EUNATCH,
773     [ENOCSI]		= TARGET_ENOCSI,
774     [EL2HLT]		= TARGET_EL2HLT,
775     [EDEADLK]		= TARGET_EDEADLK,
776     [ENOLCK]		= TARGET_ENOLCK,
777     [EBADE]		= TARGET_EBADE,
778     [EBADR]		= TARGET_EBADR,
779     [EXFULL]		= TARGET_EXFULL,
780     [ENOANO]		= TARGET_ENOANO,
781     [EBADRQC]		= TARGET_EBADRQC,
782     [EBADSLT]		= TARGET_EBADSLT,
783     [EBFONT]		= TARGET_EBFONT,
784     [ENOSTR]		= TARGET_ENOSTR,
785     [ENODATA]		= TARGET_ENODATA,
786     [ETIME]		= TARGET_ETIME,
787     [ENOSR]		= TARGET_ENOSR,
788     [ENONET]		= TARGET_ENONET,
789     [ENOPKG]		= TARGET_ENOPKG,
790     [EREMOTE]		= TARGET_EREMOTE,
791     [ENOLINK]		= TARGET_ENOLINK,
792     [EADV]		= TARGET_EADV,
793     [ESRMNT]		= TARGET_ESRMNT,
794     [ECOMM]		= TARGET_ECOMM,
795     [EPROTO]		= TARGET_EPROTO,
796     [EDOTDOT]		= TARGET_EDOTDOT,
797     [EMULTIHOP]		= TARGET_EMULTIHOP,
798     [EBADMSG]		= TARGET_EBADMSG,
799     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
800     [EOVERFLOW]		= TARGET_EOVERFLOW,
801     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
802     [EBADFD]		= TARGET_EBADFD,
803     [EREMCHG]		= TARGET_EREMCHG,
804     [ELIBACC]		= TARGET_ELIBACC,
805     [ELIBBAD]		= TARGET_ELIBBAD,
806     [ELIBSCN]		= TARGET_ELIBSCN,
807     [ELIBMAX]		= TARGET_ELIBMAX,
808     [ELIBEXEC]		= TARGET_ELIBEXEC,
809     [EILSEQ]		= TARGET_EILSEQ,
810     [ENOSYS]		= TARGET_ENOSYS,
811     [ELOOP]		= TARGET_ELOOP,
812     [ERESTART]		= TARGET_ERESTART,
813     [ESTRPIPE]		= TARGET_ESTRPIPE,
814     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
815     [EUSERS]		= TARGET_EUSERS,
816     [ENOTSOCK]		= TARGET_ENOTSOCK,
817     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
818     [EMSGSIZE]		= TARGET_EMSGSIZE,
819     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
820     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
821     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
822     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
823     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
824     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
825     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
826     [EADDRINUSE]	= TARGET_EADDRINUSE,
827     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
828     [ENETDOWN]		= TARGET_ENETDOWN,
829     [ENETUNREACH]	= TARGET_ENETUNREACH,
830     [ENETRESET]		= TARGET_ENETRESET,
831     [ECONNABORTED]	= TARGET_ECONNABORTED,
832     [ECONNRESET]	= TARGET_ECONNRESET,
833     [ENOBUFS]		= TARGET_ENOBUFS,
834     [EISCONN]		= TARGET_EISCONN,
835     [ENOTCONN]		= TARGET_ENOTCONN,
836     [EUCLEAN]		= TARGET_EUCLEAN,
837     [ENOTNAM]		= TARGET_ENOTNAM,
838     [ENAVAIL]		= TARGET_ENAVAIL,
839     [EISNAM]		= TARGET_EISNAM,
840     [EREMOTEIO]		= TARGET_EREMOTEIO,
841     [EDQUOT]            = TARGET_EDQUOT,
842     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
843     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
844     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
845     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
846     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
847     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
848     [EALREADY]		= TARGET_EALREADY,
849     [EINPROGRESS]	= TARGET_EINPROGRESS,
850     [ESTALE]		= TARGET_ESTALE,
851     [ECANCELED]		= TARGET_ECANCELED,
852     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
853     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855     [ENOKEY]		= TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873     [ENOMSG]            = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876     [ERFKILL]           = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879     [EHWPOISON]         = TARGET_EHWPOISON,
880 #endif
881 };
882 
883 static inline int host_to_target_errno(int err)
884 {
885     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886         host_to_target_errno_table[err]) {
887         return host_to_target_errno_table[err];
888     }
889     return err;
890 }
891 
892 static inline int target_to_host_errno(int err)
893 {
894     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895         target_to_host_errno_table[err]) {
896         return target_to_host_errno_table[err];
897     }
898     return err;
899 }
900 
901 static inline abi_long get_errno(abi_long ret)
902 {
903     if (ret == -1)
904         return -host_to_target_errno(errno);
905     else
906         return ret;
907 }
908 
909 const char *target_strerror(int err)
910 {
911     if (err == TARGET_ERESTARTSYS) {
912         return "To be restarted";
913     }
914     if (err == TARGET_QEMU_ESIGRETURN) {
915         return "Successful exit from sigreturn";
916     }
917 
918     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
919         return NULL;
920     }
921     return strerror(target_to_host_errno(err));
922 }
923 
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
926 { \
927     return safe_syscall(__NR_##name); \
928 }
929 
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
932 { \
933     return safe_syscall(__NR_##name, arg1); \
934 }
935 
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
938 { \
939     return safe_syscall(__NR_##name, arg1, arg2); \
940 }
941 
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
944 { \
945     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
946 }
947 
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
949     type4, arg4) \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
951 { \
952     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
953 }
954 
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956     type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
958     type5 arg5) \
959 { \
960     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
961 }
962 
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964     type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966     type5 arg5, type6 arg6) \
967 { \
968     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
969 }
970 
971 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
972 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
973 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
974               int, flags, mode_t, mode)
975 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
976               struct rusage *, rusage)
977 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
978               int, options, struct rusage *, rusage)
979 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
980 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
981               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
982 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
983               struct timespec *, tsp, const sigset_t *, sigmask,
984               size_t, sigsetsize)
985 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
986               int, maxevents, int, timeout, const sigset_t *, sigmask,
987               size_t, sigsetsize)
988 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
989               const struct timespec *,timeout,int *,uaddr2,int,val3)
990 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
991 safe_syscall2(int, kill, pid_t, pid, int, sig)
992 safe_syscall2(int, tkill, int, tid, int, sig)
993 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
994 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
995 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
996 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
997               unsigned long, pos_l, unsigned long, pos_h)
998 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
999               unsigned long, pos_l, unsigned long, pos_h)
1000 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1001               socklen_t, addrlen)
1002 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1003               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1004 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1005               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1006 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1007 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1008 safe_syscall2(int, flock, int, fd, int, operation)
1009 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1010               const struct timespec *, uts, size_t, sigsetsize)
1011 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1012               int, flags)
1013 safe_syscall2(int, nanosleep, const struct timespec *, req,
1014               struct timespec *, rem)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1017               const struct timespec *, req, struct timespec *, rem)
1018 #endif
1019 #ifdef __NR_msgsnd
1020 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1021               int, flags)
1022 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1023               long, msgtype, int, flags)
1024 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1025               unsigned, nsops, const struct timespec *, timeout)
1026 #else
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028  * wrappers for the sub-operations to hide this implementation detail.
1029  * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030  * for the call parameter because some structs in there conflict with the
1031  * sys/ipc.h ones. So we just define them here, and rely on them being
1032  * the same for all host architectures.
1033  */
1034 #define Q_SEMTIMEDOP 4
1035 #define Q_MSGSND 11
1036 #define Q_MSGRCV 12
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1038 
1039 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1040               void *, ptr, long, fifth)
1041 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1042 {
1043     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1044 }
1045 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1046 {
1047     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1048 }
1049 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1050                            const struct timespec *timeout)
1051 {
1052     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1053                     (long)timeout);
1054 }
1055 #endif
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1058               size_t, len, unsigned, prio, const struct timespec *, timeout)
1059 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1060               size_t, len, unsigned *, prio, const struct timespec *, timeout)
1061 #endif
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063  * "third argument might be integer or pointer or not present" behaviour of
1064  * the libc function.
1065  */
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069  *  use the flock64 struct rather than unsuffixed flock
1070  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1071  */
1072 #ifdef __NR_fcntl64
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1074 #else
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1076 #endif
1077 
1078 static inline int host_to_target_sock_type(int host_type)
1079 {
1080     int target_type;
1081 
1082     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1083     case SOCK_DGRAM:
1084         target_type = TARGET_SOCK_DGRAM;
1085         break;
1086     case SOCK_STREAM:
1087         target_type = TARGET_SOCK_STREAM;
1088         break;
1089     default:
1090         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1091         break;
1092     }
1093 
1094 #if defined(SOCK_CLOEXEC)
1095     if (host_type & SOCK_CLOEXEC) {
1096         target_type |= TARGET_SOCK_CLOEXEC;
1097     }
1098 #endif
1099 
1100 #if defined(SOCK_NONBLOCK)
1101     if (host_type & SOCK_NONBLOCK) {
1102         target_type |= TARGET_SOCK_NONBLOCK;
1103     }
1104 #endif
1105 
1106     return target_type;
1107 }
1108 
1109 static abi_ulong target_brk;
1110 static abi_ulong target_original_brk;
1111 static abi_ulong brk_page;
1112 
1113 void target_set_brk(abi_ulong new_brk)
1114 {
1115     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1116     brk_page = HOST_PAGE_ALIGN(target_brk);
1117 }
1118 
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1121 
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long do_brk(abi_ulong new_brk)
1124 {
1125     abi_long mapped_addr;
1126     abi_ulong new_alloc_size;
1127 
1128     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1129 
1130     if (!new_brk) {
1131         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1132         return target_brk;
1133     }
1134     if (new_brk < target_original_brk) {
1135         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1136                    target_brk);
1137         return target_brk;
1138     }
1139 
1140     /* If the new brk is less than the highest page reserved to the
1141      * target heap allocation, set it and we're almost done...  */
1142     if (new_brk <= brk_page) {
1143         /* Heap contents are initialized to zero, as for anonymous
1144          * mapped pages.  */
1145         if (new_brk > target_brk) {
1146             memset(g2h(target_brk), 0, new_brk - target_brk);
1147         }
1148 	target_brk = new_brk;
1149         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1150     	return target_brk;
1151     }
1152 
1153     /* We need to allocate more memory after the brk... Note that
1154      * we don't use MAP_FIXED because that will map over the top of
1155      * any existing mapping (like the one with the host libc or qemu
1156      * itself); instead we treat "mapped but at wrong address" as
1157      * a failure and unmap again.
1158      */
1159     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1160     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1161                                         PROT_READ|PROT_WRITE,
1162                                         MAP_ANON|MAP_PRIVATE, 0, 0));
1163 
1164     if (mapped_addr == brk_page) {
1165         /* Heap contents are initialized to zero, as for anonymous
1166          * mapped pages.  Technically the new pages are already
1167          * initialized to zero since they *are* anonymous mapped
1168          * pages, however we have to take care with the contents that
1169          * come from the remaining part of the previous page: it may
1170          * contains garbage data due to a previous heap usage (grown
1171          * then shrunken).  */
1172         memset(g2h(target_brk), 0, brk_page - target_brk);
1173 
1174         target_brk = new_brk;
1175         brk_page = HOST_PAGE_ALIGN(target_brk);
1176         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1177             target_brk);
1178         return target_brk;
1179     } else if (mapped_addr != -1) {
1180         /* Mapped but at wrong address, meaning there wasn't actually
1181          * enough space for this brk.
1182          */
1183         target_munmap(mapped_addr, new_alloc_size);
1184         mapped_addr = -1;
1185         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1186     }
1187     else {
1188         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1189     }
1190 
1191 #if defined(TARGET_ALPHA)
1192     /* We (partially) emulate OSF/1 on Alpha, which requires we
1193        return a proper errno, not an unchanged brk value.  */
1194     return -TARGET_ENOMEM;
1195 #endif
1196     /* For everything else, return the previous break. */
1197     return target_brk;
1198 }
1199 
1200 static inline abi_long copy_from_user_fdset(fd_set *fds,
1201                                             abi_ulong target_fds_addr,
1202                                             int n)
1203 {
1204     int i, nw, j, k;
1205     abi_ulong b, *target_fds;
1206 
1207     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1208     if (!(target_fds = lock_user(VERIFY_READ,
1209                                  target_fds_addr,
1210                                  sizeof(abi_ulong) * nw,
1211                                  1)))
1212         return -TARGET_EFAULT;
1213 
1214     FD_ZERO(fds);
1215     k = 0;
1216     for (i = 0; i < nw; i++) {
1217         /* grab the abi_ulong */
1218         __get_user(b, &target_fds[i]);
1219         for (j = 0; j < TARGET_ABI_BITS; j++) {
1220             /* check the bit inside the abi_ulong */
1221             if ((b >> j) & 1)
1222                 FD_SET(k, fds);
1223             k++;
1224         }
1225     }
1226 
1227     unlock_user(target_fds, target_fds_addr, 0);
1228 
1229     return 0;
1230 }
1231 
1232 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1233                                                  abi_ulong target_fds_addr,
1234                                                  int n)
1235 {
1236     if (target_fds_addr) {
1237         if (copy_from_user_fdset(fds, target_fds_addr, n))
1238             return -TARGET_EFAULT;
1239         *fds_ptr = fds;
1240     } else {
1241         *fds_ptr = NULL;
1242     }
1243     return 0;
1244 }
1245 
1246 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1247                                           const fd_set *fds,
1248                                           int n)
1249 {
1250     int i, nw, j, k;
1251     abi_long v;
1252     abi_ulong *target_fds;
1253 
1254     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255     if (!(target_fds = lock_user(VERIFY_WRITE,
1256                                  target_fds_addr,
1257                                  sizeof(abi_ulong) * nw,
1258                                  0)))
1259         return -TARGET_EFAULT;
1260 
1261     k = 0;
1262     for (i = 0; i < nw; i++) {
1263         v = 0;
1264         for (j = 0; j < TARGET_ABI_BITS; j++) {
1265             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1266             k++;
1267         }
1268         __put_user(v, &target_fds[i]);
1269     }
1270 
1271     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1272 
1273     return 0;
1274 }
1275 
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1278 #else
1279 #define HOST_HZ 100
1280 #endif
1281 
1282 static inline abi_long host_to_target_clock_t(long ticks)
1283 {
1284 #if HOST_HZ == TARGET_HZ
1285     return ticks;
1286 #else
1287     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1288 #endif
1289 }
1290 
1291 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1292                                              const struct rusage *rusage)
1293 {
1294     struct target_rusage *target_rusage;
1295 
1296     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1297         return -TARGET_EFAULT;
1298     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1299     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1300     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1301     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1302     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1303     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1304     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1305     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1306     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1307     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1308     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1309     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1310     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1311     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1312     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1313     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1314     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1315     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1316     unlock_user_struct(target_rusage, target_addr, 1);
1317 
1318     return 0;
1319 }
1320 
1321 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1322 {
1323     abi_ulong target_rlim_swap;
1324     rlim_t result;
1325 
1326     target_rlim_swap = tswapal(target_rlim);
1327     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1328         return RLIM_INFINITY;
1329 
1330     result = target_rlim_swap;
1331     if (target_rlim_swap != (rlim_t)result)
1332         return RLIM_INFINITY;
1333 
1334     return result;
1335 }
1336 
1337 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1338 {
1339     abi_ulong target_rlim_swap;
1340     abi_ulong result;
1341 
1342     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1343         target_rlim_swap = TARGET_RLIM_INFINITY;
1344     else
1345         target_rlim_swap = rlim;
1346     result = tswapal(target_rlim_swap);
1347 
1348     return result;
1349 }
1350 
1351 static inline int target_to_host_resource(int code)
1352 {
1353     switch (code) {
1354     case TARGET_RLIMIT_AS:
1355         return RLIMIT_AS;
1356     case TARGET_RLIMIT_CORE:
1357         return RLIMIT_CORE;
1358     case TARGET_RLIMIT_CPU:
1359         return RLIMIT_CPU;
1360     case TARGET_RLIMIT_DATA:
1361         return RLIMIT_DATA;
1362     case TARGET_RLIMIT_FSIZE:
1363         return RLIMIT_FSIZE;
1364     case TARGET_RLIMIT_LOCKS:
1365         return RLIMIT_LOCKS;
1366     case TARGET_RLIMIT_MEMLOCK:
1367         return RLIMIT_MEMLOCK;
1368     case TARGET_RLIMIT_MSGQUEUE:
1369         return RLIMIT_MSGQUEUE;
1370     case TARGET_RLIMIT_NICE:
1371         return RLIMIT_NICE;
1372     case TARGET_RLIMIT_NOFILE:
1373         return RLIMIT_NOFILE;
1374     case TARGET_RLIMIT_NPROC:
1375         return RLIMIT_NPROC;
1376     case TARGET_RLIMIT_RSS:
1377         return RLIMIT_RSS;
1378     case TARGET_RLIMIT_RTPRIO:
1379         return RLIMIT_RTPRIO;
1380     case TARGET_RLIMIT_SIGPENDING:
1381         return RLIMIT_SIGPENDING;
1382     case TARGET_RLIMIT_STACK:
1383         return RLIMIT_STACK;
1384     default:
1385         return code;
1386     }
1387 }
1388 
1389 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1390                                               abi_ulong target_tv_addr)
1391 {
1392     struct target_timeval *target_tv;
1393 
1394     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1395         return -TARGET_EFAULT;
1396 
1397     __get_user(tv->tv_sec, &target_tv->tv_sec);
1398     __get_user(tv->tv_usec, &target_tv->tv_usec);
1399 
1400     unlock_user_struct(target_tv, target_tv_addr, 0);
1401 
1402     return 0;
1403 }
1404 
1405 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1406                                             const struct timeval *tv)
1407 {
1408     struct target_timeval *target_tv;
1409 
1410     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1411         return -TARGET_EFAULT;
1412 
1413     __put_user(tv->tv_sec, &target_tv->tv_sec);
1414     __put_user(tv->tv_usec, &target_tv->tv_usec);
1415 
1416     unlock_user_struct(target_tv, target_tv_addr, 1);
1417 
1418     return 0;
1419 }
1420 
1421 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1422                                                abi_ulong target_tz_addr)
1423 {
1424     struct target_timezone *target_tz;
1425 
1426     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1427         return -TARGET_EFAULT;
1428     }
1429 
1430     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1431     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1432 
1433     unlock_user_struct(target_tz, target_tz_addr, 0);
1434 
1435     return 0;
1436 }
1437 
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1439 #include <mqueue.h>
1440 
1441 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1442                                               abi_ulong target_mq_attr_addr)
1443 {
1444     struct target_mq_attr *target_mq_attr;
1445 
1446     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1447                           target_mq_attr_addr, 1))
1448         return -TARGET_EFAULT;
1449 
1450     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1451     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1452     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1453     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1454 
1455     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1456 
1457     return 0;
1458 }
1459 
1460 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1461                                             const struct mq_attr *attr)
1462 {
1463     struct target_mq_attr *target_mq_attr;
1464 
1465     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1466                           target_mq_attr_addr, 0))
1467         return -TARGET_EFAULT;
1468 
1469     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1470     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1471     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1472     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1473 
1474     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1475 
1476     return 0;
1477 }
1478 #endif
1479 
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long do_select(int n,
1483                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1484                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1485 {
1486     fd_set rfds, wfds, efds;
1487     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1488     struct timeval tv;
1489     struct timespec ts, *ts_ptr;
1490     abi_long ret;
1491 
1492     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493     if (ret) {
1494         return ret;
1495     }
1496     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497     if (ret) {
1498         return ret;
1499     }
1500     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501     if (ret) {
1502         return ret;
1503     }
1504 
1505     if (target_tv_addr) {
1506         if (copy_from_user_timeval(&tv, target_tv_addr))
1507             return -TARGET_EFAULT;
1508         ts.tv_sec = tv.tv_sec;
1509         ts.tv_nsec = tv.tv_usec * 1000;
1510         ts_ptr = &ts;
1511     } else {
1512         ts_ptr = NULL;
1513     }
1514 
1515     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1516                                   ts_ptr, NULL));
1517 
1518     if (!is_error(ret)) {
1519         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1520             return -TARGET_EFAULT;
1521         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1522             return -TARGET_EFAULT;
1523         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1524             return -TARGET_EFAULT;
1525 
1526         if (target_tv_addr) {
1527             tv.tv_sec = ts.tv_sec;
1528             tv.tv_usec = ts.tv_nsec / 1000;
1529             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1530                 return -TARGET_EFAULT;
1531             }
1532         }
1533     }
1534 
1535     return ret;
1536 }
1537 
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long do_old_select(abi_ulong arg1)
1540 {
1541     struct target_sel_arg_struct *sel;
1542     abi_ulong inp, outp, exp, tvp;
1543     long nsel;
1544 
1545     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1546         return -TARGET_EFAULT;
1547     }
1548 
1549     nsel = tswapal(sel->n);
1550     inp = tswapal(sel->inp);
1551     outp = tswapal(sel->outp);
1552     exp = tswapal(sel->exp);
1553     tvp = tswapal(sel->tvp);
1554 
1555     unlock_user_struct(sel, arg1, 0);
1556 
1557     return do_select(nsel, inp, outp, exp, tvp);
1558 }
1559 #endif
1560 #endif
1561 
1562 static abi_long do_pipe2(int host_pipe[], int flags)
1563 {
1564 #ifdef CONFIG_PIPE2
1565     return pipe2(host_pipe, flags);
1566 #else
1567     return -ENOSYS;
1568 #endif
1569 }
1570 
1571 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1572                         int flags, int is_pipe2)
1573 {
1574     int host_pipe[2];
1575     abi_long ret;
1576     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1577 
1578     if (is_error(ret))
1579         return get_errno(ret);
1580 
1581     /* Several targets have special calling conventions for the original
1582        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1583     if (!is_pipe2) {
1584 #if defined(TARGET_ALPHA)
1585         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1586         return host_pipe[0];
1587 #elif defined(TARGET_MIPS)
1588         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1589         return host_pipe[0];
1590 #elif defined(TARGET_SH4)
1591         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1592         return host_pipe[0];
1593 #elif defined(TARGET_SPARC)
1594         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1595         return host_pipe[0];
1596 #endif
1597     }
1598 
1599     if (put_user_s32(host_pipe[0], pipedes)
1600         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1601         return -TARGET_EFAULT;
1602     return get_errno(ret);
1603 }
1604 
1605 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1606                                               abi_ulong target_addr,
1607                                               socklen_t len)
1608 {
1609     struct target_ip_mreqn *target_smreqn;
1610 
1611     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1612     if (!target_smreqn)
1613         return -TARGET_EFAULT;
1614     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1615     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1616     if (len == sizeof(struct target_ip_mreqn))
1617         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1618     unlock_user(target_smreqn, target_addr, 0);
1619 
1620     return 0;
1621 }
1622 
1623 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1624                                                abi_ulong target_addr,
1625                                                socklen_t len)
1626 {
1627     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1628     sa_family_t sa_family;
1629     struct target_sockaddr *target_saddr;
1630 
1631     if (fd_trans_target_to_host_addr(fd)) {
1632         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1633     }
1634 
1635     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1636     if (!target_saddr)
1637         return -TARGET_EFAULT;
1638 
1639     sa_family = tswap16(target_saddr->sa_family);
1640 
1641     /* Oops. The caller might send a incomplete sun_path; sun_path
1642      * must be terminated by \0 (see the manual page), but
1643      * unfortunately it is quite common to specify sockaddr_un
1644      * length as "strlen(x->sun_path)" while it should be
1645      * "strlen(...) + 1". We'll fix that here if needed.
1646      * Linux kernel has a similar feature.
1647      */
1648 
1649     if (sa_family == AF_UNIX) {
1650         if (len < unix_maxlen && len > 0) {
1651             char *cp = (char*)target_saddr;
1652 
1653             if ( cp[len-1] && !cp[len] )
1654                 len++;
1655         }
1656         if (len > unix_maxlen)
1657             len = unix_maxlen;
1658     }
1659 
1660     memcpy(addr, target_saddr, len);
1661     addr->sa_family = sa_family;
1662     if (sa_family == AF_NETLINK) {
1663         struct sockaddr_nl *nladdr;
1664 
1665         nladdr = (struct sockaddr_nl *)addr;
1666         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1667         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1668     } else if (sa_family == AF_PACKET) {
1669 	struct target_sockaddr_ll *lladdr;
1670 
1671 	lladdr = (struct target_sockaddr_ll *)addr;
1672 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1673 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1674     }
1675     unlock_user(target_saddr, target_addr, 0);
1676 
1677     return 0;
1678 }
1679 
1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681                                                struct sockaddr *addr,
1682                                                socklen_t len)
1683 {
1684     struct target_sockaddr *target_saddr;
1685 
1686     if (len == 0) {
1687         return 0;
1688     }
1689     assert(addr);
1690 
1691     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692     if (!target_saddr)
1693         return -TARGET_EFAULT;
1694     memcpy(target_saddr, addr, len);
1695     if (len >= offsetof(struct target_sockaddr, sa_family) +
1696         sizeof(target_saddr->sa_family)) {
1697         target_saddr->sa_family = tswap16(addr->sa_family);
1698     }
1699     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1700         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1701         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1702         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1703     } else if (addr->sa_family == AF_PACKET) {
1704         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1705         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1706         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1707     } else if (addr->sa_family == AF_INET6 &&
1708                len >= sizeof(struct target_sockaddr_in6)) {
1709         struct target_sockaddr_in6 *target_in6 =
1710                (struct target_sockaddr_in6 *)target_saddr;
1711         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1712     }
1713     unlock_user(target_saddr, target_addr, len);
1714 
1715     return 0;
1716 }
1717 
1718 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1719                                            struct target_msghdr *target_msgh)
1720 {
1721     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1722     abi_long msg_controllen;
1723     abi_ulong target_cmsg_addr;
1724     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1725     socklen_t space = 0;
1726 
1727     msg_controllen = tswapal(target_msgh->msg_controllen);
1728     if (msg_controllen < sizeof (struct target_cmsghdr))
1729         goto the_end;
1730     target_cmsg_addr = tswapal(target_msgh->msg_control);
1731     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1732     target_cmsg_start = target_cmsg;
1733     if (!target_cmsg)
1734         return -TARGET_EFAULT;
1735 
1736     while (cmsg && target_cmsg) {
1737         void *data = CMSG_DATA(cmsg);
1738         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1739 
1740         int len = tswapal(target_cmsg->cmsg_len)
1741             - sizeof(struct target_cmsghdr);
1742 
1743         space += CMSG_SPACE(len);
1744         if (space > msgh->msg_controllen) {
1745             space -= CMSG_SPACE(len);
1746             /* This is a QEMU bug, since we allocated the payload
1747              * area ourselves (unlike overflow in host-to-target
1748              * conversion, which is just the guest giving us a buffer
1749              * that's too small). It can't happen for the payload types
1750              * we currently support; if it becomes an issue in future
1751              * we would need to improve our allocation strategy to
1752              * something more intelligent than "twice the size of the
1753              * target buffer we're reading from".
1754              */
1755             gemu_log("Host cmsg overflow\n");
1756             break;
1757         }
1758 
1759         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1760             cmsg->cmsg_level = SOL_SOCKET;
1761         } else {
1762             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1763         }
1764         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1765         cmsg->cmsg_len = CMSG_LEN(len);
1766 
1767         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1768             int *fd = (int *)data;
1769             int *target_fd = (int *)target_data;
1770             int i, numfds = len / sizeof(int);
1771 
1772             for (i = 0; i < numfds; i++) {
1773                 __get_user(fd[i], target_fd + i);
1774             }
1775         } else if (cmsg->cmsg_level == SOL_SOCKET
1776                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1777             struct ucred *cred = (struct ucred *)data;
1778             struct target_ucred *target_cred =
1779                 (struct target_ucred *)target_data;
1780 
1781             __get_user(cred->pid, &target_cred->pid);
1782             __get_user(cred->uid, &target_cred->uid);
1783             __get_user(cred->gid, &target_cred->gid);
1784         } else {
1785             gemu_log("Unsupported ancillary data: %d/%d\n",
1786                                         cmsg->cmsg_level, cmsg->cmsg_type);
1787             memcpy(data, target_data, len);
1788         }
1789 
1790         cmsg = CMSG_NXTHDR(msgh, cmsg);
1791         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1792                                          target_cmsg_start);
1793     }
1794     unlock_user(target_cmsg, target_cmsg_addr, 0);
1795  the_end:
1796     msgh->msg_controllen = space;
1797     return 0;
1798 }
1799 
1800 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1801                                            struct msghdr *msgh)
1802 {
1803     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1804     abi_long msg_controllen;
1805     abi_ulong target_cmsg_addr;
1806     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1807     socklen_t space = 0;
1808 
1809     msg_controllen = tswapal(target_msgh->msg_controllen);
1810     if (msg_controllen < sizeof (struct target_cmsghdr))
1811         goto the_end;
1812     target_cmsg_addr = tswapal(target_msgh->msg_control);
1813     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1814     target_cmsg_start = target_cmsg;
1815     if (!target_cmsg)
1816         return -TARGET_EFAULT;
1817 
1818     while (cmsg && target_cmsg) {
1819         void *data = CMSG_DATA(cmsg);
1820         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1821 
1822         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1823         int tgt_len, tgt_space;
1824 
1825         /* We never copy a half-header but may copy half-data;
1826          * this is Linux's behaviour in put_cmsg(). Note that
1827          * truncation here is a guest problem (which we report
1828          * to the guest via the CTRUNC bit), unlike truncation
1829          * in target_to_host_cmsg, which is a QEMU bug.
1830          */
1831         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1832             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1833             break;
1834         }
1835 
1836         if (cmsg->cmsg_level == SOL_SOCKET) {
1837             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1838         } else {
1839             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1840         }
1841         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1842 
1843         /* Payload types which need a different size of payload on
1844          * the target must adjust tgt_len here.
1845          */
1846         tgt_len = len;
1847         switch (cmsg->cmsg_level) {
1848         case SOL_SOCKET:
1849             switch (cmsg->cmsg_type) {
1850             case SO_TIMESTAMP:
1851                 tgt_len = sizeof(struct target_timeval);
1852                 break;
1853             default:
1854                 break;
1855             }
1856             break;
1857         default:
1858             break;
1859         }
1860 
1861         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1862             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1864         }
1865 
1866         /* We must now copy-and-convert len bytes of payload
1867          * into tgt_len bytes of destination space. Bear in mind
1868          * that in both source and destination we may be dealing
1869          * with a truncated value!
1870          */
1871         switch (cmsg->cmsg_level) {
1872         case SOL_SOCKET:
1873             switch (cmsg->cmsg_type) {
1874             case SCM_RIGHTS:
1875             {
1876                 int *fd = (int *)data;
1877                 int *target_fd = (int *)target_data;
1878                 int i, numfds = tgt_len / sizeof(int);
1879 
1880                 for (i = 0; i < numfds; i++) {
1881                     __put_user(fd[i], target_fd + i);
1882                 }
1883                 break;
1884             }
1885             case SO_TIMESTAMP:
1886             {
1887                 struct timeval *tv = (struct timeval *)data;
1888                 struct target_timeval *target_tv =
1889                     (struct target_timeval *)target_data;
1890 
1891                 if (len != sizeof(struct timeval) ||
1892                     tgt_len != sizeof(struct target_timeval)) {
1893                     goto unimplemented;
1894                 }
1895 
1896                 /* copy struct timeval to target */
1897                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1898                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1899                 break;
1900             }
1901             case SCM_CREDENTIALS:
1902             {
1903                 struct ucred *cred = (struct ucred *)data;
1904                 struct target_ucred *target_cred =
1905                     (struct target_ucred *)target_data;
1906 
1907                 __put_user(cred->pid, &target_cred->pid);
1908                 __put_user(cred->uid, &target_cred->uid);
1909                 __put_user(cred->gid, &target_cred->gid);
1910                 break;
1911             }
1912             default:
1913                 goto unimplemented;
1914             }
1915             break;
1916 
1917         case SOL_IP:
1918             switch (cmsg->cmsg_type) {
1919             case IP_TTL:
1920             {
1921                 uint32_t *v = (uint32_t *)data;
1922                 uint32_t *t_int = (uint32_t *)target_data;
1923 
1924                 if (len != sizeof(uint32_t) ||
1925                     tgt_len != sizeof(uint32_t)) {
1926                     goto unimplemented;
1927                 }
1928                 __put_user(*v, t_int);
1929                 break;
1930             }
1931             case IP_RECVERR:
1932             {
1933                 struct errhdr_t {
1934                    struct sock_extended_err ee;
1935                    struct sockaddr_in offender;
1936                 };
1937                 struct errhdr_t *errh = (struct errhdr_t *)data;
1938                 struct errhdr_t *target_errh =
1939                     (struct errhdr_t *)target_data;
1940 
1941                 if (len != sizeof(struct errhdr_t) ||
1942                     tgt_len != sizeof(struct errhdr_t)) {
1943                     goto unimplemented;
1944                 }
1945                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1946                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1947                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1948                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1949                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1950                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1951                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1952                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1953                     (void *) &errh->offender, sizeof(errh->offender));
1954                 break;
1955             }
1956             default:
1957                 goto unimplemented;
1958             }
1959             break;
1960 
1961         case SOL_IPV6:
1962             switch (cmsg->cmsg_type) {
1963             case IPV6_HOPLIMIT:
1964             {
1965                 uint32_t *v = (uint32_t *)data;
1966                 uint32_t *t_int = (uint32_t *)target_data;
1967 
1968                 if (len != sizeof(uint32_t) ||
1969                     tgt_len != sizeof(uint32_t)) {
1970                     goto unimplemented;
1971                 }
1972                 __put_user(*v, t_int);
1973                 break;
1974             }
1975             case IPV6_RECVERR:
1976             {
1977                 struct errhdr6_t {
1978                    struct sock_extended_err ee;
1979                    struct sockaddr_in6 offender;
1980                 };
1981                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1982                 struct errhdr6_t *target_errh =
1983                     (struct errhdr6_t *)target_data;
1984 
1985                 if (len != sizeof(struct errhdr6_t) ||
1986                     tgt_len != sizeof(struct errhdr6_t)) {
1987                     goto unimplemented;
1988                 }
1989                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1992                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997                     (void *) &errh->offender, sizeof(errh->offender));
1998                 break;
1999             }
2000             default:
2001                 goto unimplemented;
2002             }
2003             break;
2004 
2005         default:
2006         unimplemented:
2007             gemu_log("Unsupported ancillary data: %d/%d\n",
2008                                         cmsg->cmsg_level, cmsg->cmsg_type);
2009             memcpy(target_data, data, MIN(len, tgt_len));
2010             if (tgt_len > len) {
2011                 memset(target_data + len, 0, tgt_len - len);
2012             }
2013         }
2014 
2015         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2016         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2017         if (msg_controllen < tgt_space) {
2018             tgt_space = msg_controllen;
2019         }
2020         msg_controllen -= tgt_space;
2021         space += tgt_space;
2022         cmsg = CMSG_NXTHDR(msgh, cmsg);
2023         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2024                                          target_cmsg_start);
2025     }
2026     unlock_user(target_cmsg, target_cmsg_addr, space);
2027  the_end:
2028     target_msgh->msg_controllen = tswapal(space);
2029     return 0;
2030 }
2031 
2032 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2033 {
2034     nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2035     nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2036     nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2037     nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2038     nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2039 }
2040 
2041 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2042                                               size_t len,
2043                                               abi_long (*host_to_target_nlmsg)
2044                                                        (struct nlmsghdr *))
2045 {
2046     uint32_t nlmsg_len;
2047     abi_long ret;
2048 
2049     while (len > sizeof(struct nlmsghdr)) {
2050 
2051         nlmsg_len = nlh->nlmsg_len;
2052         if (nlmsg_len < sizeof(struct nlmsghdr) ||
2053             nlmsg_len > len) {
2054             break;
2055         }
2056 
2057         switch (nlh->nlmsg_type) {
2058         case NLMSG_DONE:
2059             tswap_nlmsghdr(nlh);
2060             return 0;
2061         case NLMSG_NOOP:
2062             break;
2063         case NLMSG_ERROR:
2064         {
2065             struct nlmsgerr *e = NLMSG_DATA(nlh);
2066             e->error = tswap32(e->error);
2067             tswap_nlmsghdr(&e->msg);
2068             tswap_nlmsghdr(nlh);
2069             return 0;
2070         }
2071         default:
2072             ret = host_to_target_nlmsg(nlh);
2073             if (ret < 0) {
2074                 tswap_nlmsghdr(nlh);
2075                 return ret;
2076             }
2077             break;
2078         }
2079         tswap_nlmsghdr(nlh);
2080         len -= NLMSG_ALIGN(nlmsg_len);
2081         nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2082     }
2083     return 0;
2084 }
2085 
2086 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2087                                               size_t len,
2088                                               abi_long (*target_to_host_nlmsg)
2089                                                        (struct nlmsghdr *))
2090 {
2091     int ret;
2092 
2093     while (len > sizeof(struct nlmsghdr)) {
2094         if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2095             tswap32(nlh->nlmsg_len) > len) {
2096             break;
2097         }
2098         tswap_nlmsghdr(nlh);
2099         switch (nlh->nlmsg_type) {
2100         case NLMSG_DONE:
2101             return 0;
2102         case NLMSG_NOOP:
2103             break;
2104         case NLMSG_ERROR:
2105         {
2106             struct nlmsgerr *e = NLMSG_DATA(nlh);
2107             e->error = tswap32(e->error);
2108             tswap_nlmsghdr(&e->msg);
2109             return 0;
2110         }
2111         default:
2112             ret = target_to_host_nlmsg(nlh);
2113             if (ret < 0) {
2114                 return ret;
2115             }
2116         }
2117         len -= NLMSG_ALIGN(nlh->nlmsg_len);
2118         nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2119     }
2120     return 0;
2121 }
2122 
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2125                                                size_t len, void *context,
2126                                                abi_long (*host_to_target_nlattr)
2127                                                         (struct nlattr *,
2128                                                          void *context))
2129 {
2130     unsigned short nla_len;
2131     abi_long ret;
2132 
2133     while (len > sizeof(struct nlattr)) {
2134         nla_len = nlattr->nla_len;
2135         if (nla_len < sizeof(struct nlattr) ||
2136             nla_len > len) {
2137             break;
2138         }
2139         ret = host_to_target_nlattr(nlattr, context);
2140         nlattr->nla_len = tswap16(nlattr->nla_len);
2141         nlattr->nla_type = tswap16(nlattr->nla_type);
2142         if (ret < 0) {
2143             return ret;
2144         }
2145         len -= NLA_ALIGN(nla_len);
2146         nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2147     }
2148     return 0;
2149 }
2150 
2151 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2152                                                size_t len,
2153                                                abi_long (*host_to_target_rtattr)
2154                                                         (struct rtattr *))
2155 {
2156     unsigned short rta_len;
2157     abi_long ret;
2158 
2159     while (len > sizeof(struct rtattr)) {
2160         rta_len = rtattr->rta_len;
2161         if (rta_len < sizeof(struct rtattr) ||
2162             rta_len > len) {
2163             break;
2164         }
2165         ret = host_to_target_rtattr(rtattr);
2166         rtattr->rta_len = tswap16(rtattr->rta_len);
2167         rtattr->rta_type = tswap16(rtattr->rta_type);
2168         if (ret < 0) {
2169             return ret;
2170         }
2171         len -= RTA_ALIGN(rta_len);
2172         rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2173     }
2174     return 0;
2175 }
2176 
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2178 
2179 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2180                                                   void *context)
2181 {
2182     uint16_t *u16;
2183     uint32_t *u32;
2184     uint64_t *u64;
2185 
2186     switch (nlattr->nla_type) {
2187     /* no data */
2188     case QEMU_IFLA_BR_FDB_FLUSH:
2189         break;
2190     /* binary */
2191     case QEMU_IFLA_BR_GROUP_ADDR:
2192         break;
2193     /* uint8_t */
2194     case QEMU_IFLA_BR_VLAN_FILTERING:
2195     case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2196     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2197     case QEMU_IFLA_BR_MCAST_ROUTER:
2198     case QEMU_IFLA_BR_MCAST_SNOOPING:
2199     case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2200     case QEMU_IFLA_BR_MCAST_QUERIER:
2201     case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2202     case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2203     case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2204     case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2205     case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2206     case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2207     case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2208         break;
2209     /* uint16_t */
2210     case QEMU_IFLA_BR_PRIORITY:
2211     case QEMU_IFLA_BR_VLAN_PROTOCOL:
2212     case QEMU_IFLA_BR_GROUP_FWD_MASK:
2213     case QEMU_IFLA_BR_ROOT_PORT:
2214     case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2215         u16 = NLA_DATA(nlattr);
2216         *u16 = tswap16(*u16);
2217         break;
2218     /* uint32_t */
2219     case QEMU_IFLA_BR_FORWARD_DELAY:
2220     case QEMU_IFLA_BR_HELLO_TIME:
2221     case QEMU_IFLA_BR_MAX_AGE:
2222     case QEMU_IFLA_BR_AGEING_TIME:
2223     case QEMU_IFLA_BR_STP_STATE:
2224     case QEMU_IFLA_BR_ROOT_PATH_COST:
2225     case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2226     case QEMU_IFLA_BR_MCAST_HASH_MAX:
2227     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2228     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2229         u32 = NLA_DATA(nlattr);
2230         *u32 = tswap32(*u32);
2231         break;
2232     /* uint64_t */
2233     case QEMU_IFLA_BR_HELLO_TIMER:
2234     case QEMU_IFLA_BR_TCN_TIMER:
2235     case QEMU_IFLA_BR_GC_TIMER:
2236     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2237     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2238     case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2239     case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2240     case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2241     case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2242     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2243         u64 = NLA_DATA(nlattr);
2244         *u64 = tswap64(*u64);
2245         break;
2246     /* ifla_bridge_id: uin8_t[] */
2247     case QEMU_IFLA_BR_ROOT_ID:
2248     case QEMU_IFLA_BR_BRIDGE_ID:
2249         break;
2250     default:
2251         gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2252         break;
2253     }
2254     return 0;
2255 }
2256 
2257 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2258                                                         void *context)
2259 {
2260     uint16_t *u16;
2261     uint32_t *u32;
2262     uint64_t *u64;
2263 
2264     switch (nlattr->nla_type) {
2265     /* uint8_t */
2266     case QEMU_IFLA_BRPORT_STATE:
2267     case QEMU_IFLA_BRPORT_MODE:
2268     case QEMU_IFLA_BRPORT_GUARD:
2269     case QEMU_IFLA_BRPORT_PROTECT:
2270     case QEMU_IFLA_BRPORT_FAST_LEAVE:
2271     case QEMU_IFLA_BRPORT_LEARNING:
2272     case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2273     case QEMU_IFLA_BRPORT_PROXYARP:
2274     case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2275     case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2276     case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2277     case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2278     case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2279     case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2280     case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2281     case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2282     case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2283     case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2284         break;
2285     /* uint16_t */
2286     case QEMU_IFLA_BRPORT_PRIORITY:
2287     case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2288     case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2289     case QEMU_IFLA_BRPORT_ID:
2290     case QEMU_IFLA_BRPORT_NO:
2291     case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2292         u16 = NLA_DATA(nlattr);
2293         *u16 = tswap16(*u16);
2294         break;
2295     /* uin32_t */
2296     case QEMU_IFLA_BRPORT_COST:
2297         u32 = NLA_DATA(nlattr);
2298         *u32 = tswap32(*u32);
2299         break;
2300     /* uint64_t */
2301     case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2302     case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2303     case QEMU_IFLA_BRPORT_HOLD_TIMER:
2304         u64 = NLA_DATA(nlattr);
2305         *u64 = tswap64(*u64);
2306         break;
2307     /* ifla_bridge_id: uint8_t[] */
2308     case QEMU_IFLA_BRPORT_ROOT_ID:
2309     case QEMU_IFLA_BRPORT_BRIDGE_ID:
2310         break;
2311     default:
2312         gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2313         break;
2314     }
2315     return 0;
2316 }
2317 
2318 struct linkinfo_context {
2319     int len;
2320     char *name;
2321     int slave_len;
2322     char *slave_name;
2323 };
2324 
2325 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2326                                                     void *context)
2327 {
2328     struct linkinfo_context *li_context = context;
2329 
2330     switch (nlattr->nla_type) {
2331     /* string */
2332     case QEMU_IFLA_INFO_KIND:
2333         li_context->name = NLA_DATA(nlattr);
2334         li_context->len = nlattr->nla_len - NLA_HDRLEN;
2335         break;
2336     case QEMU_IFLA_INFO_SLAVE_KIND:
2337         li_context->slave_name = NLA_DATA(nlattr);
2338         li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2339         break;
2340     /* stats */
2341     case QEMU_IFLA_INFO_XSTATS:
2342         /* FIXME: only used by CAN */
2343         break;
2344     /* nested */
2345     case QEMU_IFLA_INFO_DATA:
2346         if (strncmp(li_context->name, "bridge",
2347                     li_context->len) == 0) {
2348             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2349                                                   nlattr->nla_len,
2350                                                   NULL,
2351                                              host_to_target_data_bridge_nlattr);
2352         } else {
2353             gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2354         }
2355         break;
2356     case QEMU_IFLA_INFO_SLAVE_DATA:
2357         if (strncmp(li_context->slave_name, "bridge",
2358                     li_context->slave_len) == 0) {
2359             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2360                                                   nlattr->nla_len,
2361                                                   NULL,
2362                                        host_to_target_slave_data_bridge_nlattr);
2363         } else {
2364             gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365                      li_context->slave_name);
2366         }
2367         break;
2368     default:
2369         gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2370         break;
2371     }
2372 
2373     return 0;
2374 }
2375 
2376 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2377                                                 void *context)
2378 {
2379     uint32_t *u32;
2380     int i;
2381 
2382     switch (nlattr->nla_type) {
2383     case QEMU_IFLA_INET_CONF:
2384         u32 = NLA_DATA(nlattr);
2385         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2386              i++) {
2387             u32[i] = tswap32(u32[i]);
2388         }
2389         break;
2390     default:
2391         gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2392     }
2393     return 0;
2394 }
2395 
2396 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2397                                                 void *context)
2398 {
2399     uint32_t *u32;
2400     uint64_t *u64;
2401     struct ifla_cacheinfo *ci;
2402     int i;
2403 
2404     switch (nlattr->nla_type) {
2405     /* binaries */
2406     case QEMU_IFLA_INET6_TOKEN:
2407         break;
2408     /* uint8_t */
2409     case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2410         break;
2411     /* uint32_t */
2412     case QEMU_IFLA_INET6_FLAGS:
2413         u32 = NLA_DATA(nlattr);
2414         *u32 = tswap32(*u32);
2415         break;
2416     /* uint32_t[] */
2417     case QEMU_IFLA_INET6_CONF:
2418         u32 = NLA_DATA(nlattr);
2419         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2420              i++) {
2421             u32[i] = tswap32(u32[i]);
2422         }
2423         break;
2424     /* ifla_cacheinfo */
2425     case QEMU_IFLA_INET6_CACHEINFO:
2426         ci = NLA_DATA(nlattr);
2427         ci->max_reasm_len = tswap32(ci->max_reasm_len);
2428         ci->tstamp = tswap32(ci->tstamp);
2429         ci->reachable_time = tswap32(ci->reachable_time);
2430         ci->retrans_time = tswap32(ci->retrans_time);
2431         break;
2432     /* uint64_t[] */
2433     case QEMU_IFLA_INET6_STATS:
2434     case QEMU_IFLA_INET6_ICMP6STATS:
2435         u64 = NLA_DATA(nlattr);
2436         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2437              i++) {
2438             u64[i] = tswap64(u64[i]);
2439         }
2440         break;
2441     default:
2442         gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2443     }
2444     return 0;
2445 }
2446 
2447 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2448                                                     void *context)
2449 {
2450     switch (nlattr->nla_type) {
2451     case AF_INET:
2452         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2453                                               NULL,
2454                                              host_to_target_data_inet_nlattr);
2455     case AF_INET6:
2456         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2457                                               NULL,
2458                                              host_to_target_data_inet6_nlattr);
2459     default:
2460         gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2461         break;
2462     }
2463     return 0;
2464 }
2465 
2466 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2467                                                void *context)
2468 {
2469     uint32_t *u32;
2470 
2471     switch (nlattr->nla_type) {
2472     /* uint8_t */
2473     case QEMU_IFLA_XDP_ATTACHED:
2474         break;
2475     /* uint32_t */
2476     case QEMU_IFLA_XDP_PROG_ID:
2477         u32 = NLA_DATA(nlattr);
2478         *u32 = tswap32(*u32);
2479         break;
2480     default:
2481         gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2482         break;
2483     }
2484     return 0;
2485 }
2486 
2487 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2488 {
2489     uint32_t *u32;
2490     struct rtnl_link_stats *st;
2491     struct rtnl_link_stats64 *st64;
2492     struct rtnl_link_ifmap *map;
2493     struct linkinfo_context li_context;
2494 
2495     switch (rtattr->rta_type) {
2496     /* binary stream */
2497     case QEMU_IFLA_ADDRESS:
2498     case QEMU_IFLA_BROADCAST:
2499     /* string */
2500     case QEMU_IFLA_IFNAME:
2501     case QEMU_IFLA_QDISC:
2502         break;
2503     /* uin8_t */
2504     case QEMU_IFLA_OPERSTATE:
2505     case QEMU_IFLA_LINKMODE:
2506     case QEMU_IFLA_CARRIER:
2507     case QEMU_IFLA_PROTO_DOWN:
2508         break;
2509     /* uint32_t */
2510     case QEMU_IFLA_MTU:
2511     case QEMU_IFLA_LINK:
2512     case QEMU_IFLA_WEIGHT:
2513     case QEMU_IFLA_TXQLEN:
2514     case QEMU_IFLA_CARRIER_CHANGES:
2515     case QEMU_IFLA_NUM_RX_QUEUES:
2516     case QEMU_IFLA_NUM_TX_QUEUES:
2517     case QEMU_IFLA_PROMISCUITY:
2518     case QEMU_IFLA_EXT_MASK:
2519     case QEMU_IFLA_LINK_NETNSID:
2520     case QEMU_IFLA_GROUP:
2521     case QEMU_IFLA_MASTER:
2522     case QEMU_IFLA_NUM_VF:
2523     case QEMU_IFLA_GSO_MAX_SEGS:
2524     case QEMU_IFLA_GSO_MAX_SIZE:
2525     case QEMU_IFLA_CARRIER_UP_COUNT:
2526     case QEMU_IFLA_CARRIER_DOWN_COUNT:
2527         u32 = RTA_DATA(rtattr);
2528         *u32 = tswap32(*u32);
2529         break;
2530     /* struct rtnl_link_stats */
2531     case QEMU_IFLA_STATS:
2532         st = RTA_DATA(rtattr);
2533         st->rx_packets = tswap32(st->rx_packets);
2534         st->tx_packets = tswap32(st->tx_packets);
2535         st->rx_bytes = tswap32(st->rx_bytes);
2536         st->tx_bytes = tswap32(st->tx_bytes);
2537         st->rx_errors = tswap32(st->rx_errors);
2538         st->tx_errors = tswap32(st->tx_errors);
2539         st->rx_dropped = tswap32(st->rx_dropped);
2540         st->tx_dropped = tswap32(st->tx_dropped);
2541         st->multicast = tswap32(st->multicast);
2542         st->collisions = tswap32(st->collisions);
2543 
2544         /* detailed rx_errors: */
2545         st->rx_length_errors = tswap32(st->rx_length_errors);
2546         st->rx_over_errors = tswap32(st->rx_over_errors);
2547         st->rx_crc_errors = tswap32(st->rx_crc_errors);
2548         st->rx_frame_errors = tswap32(st->rx_frame_errors);
2549         st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2550         st->rx_missed_errors = tswap32(st->rx_missed_errors);
2551 
2552         /* detailed tx_errors */
2553         st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2554         st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2555         st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2556         st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2557         st->tx_window_errors = tswap32(st->tx_window_errors);
2558 
2559         /* for cslip etc */
2560         st->rx_compressed = tswap32(st->rx_compressed);
2561         st->tx_compressed = tswap32(st->tx_compressed);
2562         break;
2563     /* struct rtnl_link_stats64 */
2564     case QEMU_IFLA_STATS64:
2565         st64 = RTA_DATA(rtattr);
2566         st64->rx_packets = tswap64(st64->rx_packets);
2567         st64->tx_packets = tswap64(st64->tx_packets);
2568         st64->rx_bytes = tswap64(st64->rx_bytes);
2569         st64->tx_bytes = tswap64(st64->tx_bytes);
2570         st64->rx_errors = tswap64(st64->rx_errors);
2571         st64->tx_errors = tswap64(st64->tx_errors);
2572         st64->rx_dropped = tswap64(st64->rx_dropped);
2573         st64->tx_dropped = tswap64(st64->tx_dropped);
2574         st64->multicast = tswap64(st64->multicast);
2575         st64->collisions = tswap64(st64->collisions);
2576 
2577         /* detailed rx_errors: */
2578         st64->rx_length_errors = tswap64(st64->rx_length_errors);
2579         st64->rx_over_errors = tswap64(st64->rx_over_errors);
2580         st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2581         st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2582         st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2583         st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2584 
2585         /* detailed tx_errors */
2586         st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2587         st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2588         st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2589         st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2590         st64->tx_window_errors = tswap64(st64->tx_window_errors);
2591 
2592         /* for cslip etc */
2593         st64->rx_compressed = tswap64(st64->rx_compressed);
2594         st64->tx_compressed = tswap64(st64->tx_compressed);
2595         break;
2596     /* struct rtnl_link_ifmap */
2597     case QEMU_IFLA_MAP:
2598         map = RTA_DATA(rtattr);
2599         map->mem_start = tswap64(map->mem_start);
2600         map->mem_end = tswap64(map->mem_end);
2601         map->base_addr = tswap64(map->base_addr);
2602         map->irq = tswap16(map->irq);
2603         break;
2604     /* nested */
2605     case QEMU_IFLA_LINKINFO:
2606         memset(&li_context, 0, sizeof(li_context));
2607         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2608                                               &li_context,
2609                                            host_to_target_data_linkinfo_nlattr);
2610     case QEMU_IFLA_AF_SPEC:
2611         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2612                                               NULL,
2613                                              host_to_target_data_spec_nlattr);
2614     case QEMU_IFLA_XDP:
2615         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2616                                               NULL,
2617                                                 host_to_target_data_xdp_nlattr);
2618     default:
2619         gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2620         break;
2621     }
2622     return 0;
2623 }
2624 
2625 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2626 {
2627     uint32_t *u32;
2628     struct ifa_cacheinfo *ci;
2629 
2630     switch (rtattr->rta_type) {
2631     /* binary: depends on family type */
2632     case IFA_ADDRESS:
2633     case IFA_LOCAL:
2634         break;
2635     /* string */
2636     case IFA_LABEL:
2637         break;
2638     /* u32 */
2639     case IFA_FLAGS:
2640     case IFA_BROADCAST:
2641         u32 = RTA_DATA(rtattr);
2642         *u32 = tswap32(*u32);
2643         break;
2644     /* struct ifa_cacheinfo */
2645     case IFA_CACHEINFO:
2646         ci = RTA_DATA(rtattr);
2647         ci->ifa_prefered = tswap32(ci->ifa_prefered);
2648         ci->ifa_valid = tswap32(ci->ifa_valid);
2649         ci->cstamp = tswap32(ci->cstamp);
2650         ci->tstamp = tswap32(ci->tstamp);
2651         break;
2652     default:
2653         gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2654         break;
2655     }
2656     return 0;
2657 }
2658 
2659 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2660 {
2661     uint32_t *u32;
2662     switch (rtattr->rta_type) {
2663     /* binary: depends on family type */
2664     case RTA_GATEWAY:
2665     case RTA_DST:
2666     case RTA_PREFSRC:
2667         break;
2668     /* u32 */
2669     case RTA_PRIORITY:
2670     case RTA_TABLE:
2671     case RTA_OIF:
2672         u32 = RTA_DATA(rtattr);
2673         *u32 = tswap32(*u32);
2674         break;
2675     default:
2676         gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2677         break;
2678     }
2679     return 0;
2680 }
2681 
2682 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2683                                          uint32_t rtattr_len)
2684 {
2685     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2686                                           host_to_target_data_link_rtattr);
2687 }
2688 
2689 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2690                                          uint32_t rtattr_len)
2691 {
2692     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2693                                           host_to_target_data_addr_rtattr);
2694 }
2695 
2696 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2697                                          uint32_t rtattr_len)
2698 {
2699     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2700                                           host_to_target_data_route_rtattr);
2701 }
2702 
2703 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2704 {
2705     uint32_t nlmsg_len;
2706     struct ifinfomsg *ifi;
2707     struct ifaddrmsg *ifa;
2708     struct rtmsg *rtm;
2709 
2710     nlmsg_len = nlh->nlmsg_len;
2711     switch (nlh->nlmsg_type) {
2712     case RTM_NEWLINK:
2713     case RTM_DELLINK:
2714     case RTM_GETLINK:
2715         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2716             ifi = NLMSG_DATA(nlh);
2717             ifi->ifi_type = tswap16(ifi->ifi_type);
2718             ifi->ifi_index = tswap32(ifi->ifi_index);
2719             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2720             ifi->ifi_change = tswap32(ifi->ifi_change);
2721             host_to_target_link_rtattr(IFLA_RTA(ifi),
2722                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2723         }
2724         break;
2725     case RTM_NEWADDR:
2726     case RTM_DELADDR:
2727     case RTM_GETADDR:
2728         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2729             ifa = NLMSG_DATA(nlh);
2730             ifa->ifa_index = tswap32(ifa->ifa_index);
2731             host_to_target_addr_rtattr(IFA_RTA(ifa),
2732                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2733         }
2734         break;
2735     case RTM_NEWROUTE:
2736     case RTM_DELROUTE:
2737     case RTM_GETROUTE:
2738         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2739             rtm = NLMSG_DATA(nlh);
2740             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2741             host_to_target_route_rtattr(RTM_RTA(rtm),
2742                                         nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2743         }
2744         break;
2745     default:
2746         return -TARGET_EINVAL;
2747     }
2748     return 0;
2749 }
2750 
2751 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2752                                                   size_t len)
2753 {
2754     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2755 }
2756 
2757 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2758                                                size_t len,
2759                                                abi_long (*target_to_host_rtattr)
2760                                                         (struct rtattr *))
2761 {
2762     abi_long ret;
2763 
2764     while (len >= sizeof(struct rtattr)) {
2765         if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2766             tswap16(rtattr->rta_len) > len) {
2767             break;
2768         }
2769         rtattr->rta_len = tswap16(rtattr->rta_len);
2770         rtattr->rta_type = tswap16(rtattr->rta_type);
2771         ret = target_to_host_rtattr(rtattr);
2772         if (ret < 0) {
2773             return ret;
2774         }
2775         len -= RTA_ALIGN(rtattr->rta_len);
2776         rtattr = (struct rtattr *)(((char *)rtattr) +
2777                  RTA_ALIGN(rtattr->rta_len));
2778     }
2779     return 0;
2780 }
2781 
2782 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2783 {
2784     switch (rtattr->rta_type) {
2785     default:
2786         gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2787         break;
2788     }
2789     return 0;
2790 }
2791 
2792 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2793 {
2794     switch (rtattr->rta_type) {
2795     /* binary: depends on family type */
2796     case IFA_LOCAL:
2797     case IFA_ADDRESS:
2798         break;
2799     default:
2800         gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2801         break;
2802     }
2803     return 0;
2804 }
2805 
2806 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2807 {
2808     uint32_t *u32;
2809     switch (rtattr->rta_type) {
2810     /* binary: depends on family type */
2811     case RTA_DST:
2812     case RTA_SRC:
2813     case RTA_GATEWAY:
2814         break;
2815     /* u32 */
2816     case RTA_PRIORITY:
2817     case RTA_OIF:
2818         u32 = RTA_DATA(rtattr);
2819         *u32 = tswap32(*u32);
2820         break;
2821     default:
2822         gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2823         break;
2824     }
2825     return 0;
2826 }
2827 
2828 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2829                                        uint32_t rtattr_len)
2830 {
2831     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2832                                    target_to_host_data_link_rtattr);
2833 }
2834 
2835 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2836                                      uint32_t rtattr_len)
2837 {
2838     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2839                                    target_to_host_data_addr_rtattr);
2840 }
2841 
2842 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2843                                      uint32_t rtattr_len)
2844 {
2845     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2846                                    target_to_host_data_route_rtattr);
2847 }
2848 
2849 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2850 {
2851     struct ifinfomsg *ifi;
2852     struct ifaddrmsg *ifa;
2853     struct rtmsg *rtm;
2854 
2855     switch (nlh->nlmsg_type) {
2856     case RTM_GETLINK:
2857         break;
2858     case RTM_NEWLINK:
2859     case RTM_DELLINK:
2860         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2861             ifi = NLMSG_DATA(nlh);
2862             ifi->ifi_type = tswap16(ifi->ifi_type);
2863             ifi->ifi_index = tswap32(ifi->ifi_index);
2864             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2865             ifi->ifi_change = tswap32(ifi->ifi_change);
2866             target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2867                                        NLMSG_LENGTH(sizeof(*ifi)));
2868         }
2869         break;
2870     case RTM_GETADDR:
2871     case RTM_NEWADDR:
2872     case RTM_DELADDR:
2873         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2874             ifa = NLMSG_DATA(nlh);
2875             ifa->ifa_index = tswap32(ifa->ifa_index);
2876             target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2877                                        NLMSG_LENGTH(sizeof(*ifa)));
2878         }
2879         break;
2880     case RTM_GETROUTE:
2881         break;
2882     case RTM_NEWROUTE:
2883     case RTM_DELROUTE:
2884         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2885             rtm = NLMSG_DATA(nlh);
2886             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2887             target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2888                                         NLMSG_LENGTH(sizeof(*rtm)));
2889         }
2890         break;
2891     default:
2892         return -TARGET_EOPNOTSUPP;
2893     }
2894     return 0;
2895 }
2896 
2897 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2898 {
2899     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2900 }
2901 #endif /* CONFIG_RTNETLINK */
2902 
2903 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2904 {
2905     switch (nlh->nlmsg_type) {
2906     default:
2907         gemu_log("Unknown host audit message type %d\n",
2908                  nlh->nlmsg_type);
2909         return -TARGET_EINVAL;
2910     }
2911     return 0;
2912 }
2913 
2914 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2915                                                   size_t len)
2916 {
2917     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2918 }
2919 
2920 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2921 {
2922     switch (nlh->nlmsg_type) {
2923     case AUDIT_USER:
2924     case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2925     case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2926         break;
2927     default:
2928         gemu_log("Unknown target audit message type %d\n",
2929                  nlh->nlmsg_type);
2930         return -TARGET_EINVAL;
2931     }
2932 
2933     return 0;
2934 }
2935 
2936 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2937 {
2938     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2939 }
2940 
2941 /* do_setsockopt() Must return target values and target errnos. */
2942 static abi_long do_setsockopt(int sockfd, int level, int optname,
2943                               abi_ulong optval_addr, socklen_t optlen)
2944 {
2945     abi_long ret;
2946     int val;
2947     struct ip_mreqn *ip_mreq;
2948     struct ip_mreq_source *ip_mreq_source;
2949 
2950     switch(level) {
2951     case SOL_TCP:
2952         /* TCP options all take an 'int' value.  */
2953         if (optlen < sizeof(uint32_t))
2954             return -TARGET_EINVAL;
2955 
2956         if (get_user_u32(val, optval_addr))
2957             return -TARGET_EFAULT;
2958         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2959         break;
2960     case SOL_IP:
2961         switch(optname) {
2962         case IP_TOS:
2963         case IP_TTL:
2964         case IP_HDRINCL:
2965         case IP_ROUTER_ALERT:
2966         case IP_RECVOPTS:
2967         case IP_RETOPTS:
2968         case IP_PKTINFO:
2969         case IP_MTU_DISCOVER:
2970         case IP_RECVERR:
2971         case IP_RECVTTL:
2972         case IP_RECVTOS:
2973 #ifdef IP_FREEBIND
2974         case IP_FREEBIND:
2975 #endif
2976         case IP_MULTICAST_TTL:
2977         case IP_MULTICAST_LOOP:
2978             val = 0;
2979             if (optlen >= sizeof(uint32_t)) {
2980                 if (get_user_u32(val, optval_addr))
2981                     return -TARGET_EFAULT;
2982             } else if (optlen >= 1) {
2983                 if (get_user_u8(val, optval_addr))
2984                     return -TARGET_EFAULT;
2985             }
2986             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2987             break;
2988         case IP_ADD_MEMBERSHIP:
2989         case IP_DROP_MEMBERSHIP:
2990             if (optlen < sizeof (struct target_ip_mreq) ||
2991                 optlen > sizeof (struct target_ip_mreqn))
2992                 return -TARGET_EINVAL;
2993 
2994             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2995             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2996             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2997             break;
2998 
2999         case IP_BLOCK_SOURCE:
3000         case IP_UNBLOCK_SOURCE:
3001         case IP_ADD_SOURCE_MEMBERSHIP:
3002         case IP_DROP_SOURCE_MEMBERSHIP:
3003             if (optlen != sizeof (struct target_ip_mreq_source))
3004                 return -TARGET_EINVAL;
3005 
3006             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3007             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3008             unlock_user (ip_mreq_source, optval_addr, 0);
3009             break;
3010 
3011         default:
3012             goto unimplemented;
3013         }
3014         break;
3015     case SOL_IPV6:
3016         switch (optname) {
3017         case IPV6_MTU_DISCOVER:
3018         case IPV6_MTU:
3019         case IPV6_V6ONLY:
3020         case IPV6_RECVPKTINFO:
3021         case IPV6_UNICAST_HOPS:
3022         case IPV6_MULTICAST_HOPS:
3023         case IPV6_MULTICAST_LOOP:
3024         case IPV6_RECVERR:
3025         case IPV6_RECVHOPLIMIT:
3026         case IPV6_2292HOPLIMIT:
3027         case IPV6_CHECKSUM:
3028             val = 0;
3029             if (optlen < sizeof(uint32_t)) {
3030                 return -TARGET_EINVAL;
3031             }
3032             if (get_user_u32(val, optval_addr)) {
3033                 return -TARGET_EFAULT;
3034             }
3035             ret = get_errno(setsockopt(sockfd, level, optname,
3036                                        &val, sizeof(val)));
3037             break;
3038         case IPV6_PKTINFO:
3039         {
3040             struct in6_pktinfo pki;
3041 
3042             if (optlen < sizeof(pki)) {
3043                 return -TARGET_EINVAL;
3044             }
3045 
3046             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3047                 return -TARGET_EFAULT;
3048             }
3049 
3050             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3051 
3052             ret = get_errno(setsockopt(sockfd, level, optname,
3053                                        &pki, sizeof(pki)));
3054             break;
3055         }
3056         default:
3057             goto unimplemented;
3058         }
3059         break;
3060     case SOL_ICMPV6:
3061         switch (optname) {
3062         case ICMPV6_FILTER:
3063         {
3064             struct icmp6_filter icmp6f;
3065 
3066             if (optlen > sizeof(icmp6f)) {
3067                 optlen = sizeof(icmp6f);
3068             }
3069 
3070             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3071                 return -TARGET_EFAULT;
3072             }
3073 
3074             for (val = 0; val < 8; val++) {
3075                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3076             }
3077 
3078             ret = get_errno(setsockopt(sockfd, level, optname,
3079                                        &icmp6f, optlen));
3080             break;
3081         }
3082         default:
3083             goto unimplemented;
3084         }
3085         break;
3086     case SOL_RAW:
3087         switch (optname) {
3088         case ICMP_FILTER:
3089         case IPV6_CHECKSUM:
3090             /* those take an u32 value */
3091             if (optlen < sizeof(uint32_t)) {
3092                 return -TARGET_EINVAL;
3093             }
3094 
3095             if (get_user_u32(val, optval_addr)) {
3096                 return -TARGET_EFAULT;
3097             }
3098             ret = get_errno(setsockopt(sockfd, level, optname,
3099                                        &val, sizeof(val)));
3100             break;
3101 
3102         default:
3103             goto unimplemented;
3104         }
3105         break;
3106     case TARGET_SOL_SOCKET:
3107         switch (optname) {
3108         case TARGET_SO_RCVTIMEO:
3109         {
3110                 struct timeval tv;
3111 
3112                 optname = SO_RCVTIMEO;
3113 
3114 set_timeout:
3115                 if (optlen != sizeof(struct target_timeval)) {
3116                     return -TARGET_EINVAL;
3117                 }
3118 
3119                 if (copy_from_user_timeval(&tv, optval_addr)) {
3120                     return -TARGET_EFAULT;
3121                 }
3122 
3123                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3124                                 &tv, sizeof(tv)));
3125                 return ret;
3126         }
3127         case TARGET_SO_SNDTIMEO:
3128                 optname = SO_SNDTIMEO;
3129                 goto set_timeout;
3130         case TARGET_SO_ATTACH_FILTER:
3131         {
3132                 struct target_sock_fprog *tfprog;
3133                 struct target_sock_filter *tfilter;
3134                 struct sock_fprog fprog;
3135                 struct sock_filter *filter;
3136                 int i;
3137 
3138                 if (optlen != sizeof(*tfprog)) {
3139                     return -TARGET_EINVAL;
3140                 }
3141                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3142                     return -TARGET_EFAULT;
3143                 }
3144                 if (!lock_user_struct(VERIFY_READ, tfilter,
3145                                       tswapal(tfprog->filter), 0)) {
3146                     unlock_user_struct(tfprog, optval_addr, 1);
3147                     return -TARGET_EFAULT;
3148                 }
3149 
3150                 fprog.len = tswap16(tfprog->len);
3151                 filter = g_try_new(struct sock_filter, fprog.len);
3152                 if (filter == NULL) {
3153                     unlock_user_struct(tfilter, tfprog->filter, 1);
3154                     unlock_user_struct(tfprog, optval_addr, 1);
3155                     return -TARGET_ENOMEM;
3156                 }
3157                 for (i = 0; i < fprog.len; i++) {
3158                     filter[i].code = tswap16(tfilter[i].code);
3159                     filter[i].jt = tfilter[i].jt;
3160                     filter[i].jf = tfilter[i].jf;
3161                     filter[i].k = tswap32(tfilter[i].k);
3162                 }
3163                 fprog.filter = filter;
3164 
3165                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3166                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3167                 g_free(filter);
3168 
3169                 unlock_user_struct(tfilter, tfprog->filter, 1);
3170                 unlock_user_struct(tfprog, optval_addr, 1);
3171                 return ret;
3172         }
3173 	case TARGET_SO_BINDTODEVICE:
3174 	{
3175 		char *dev_ifname, *addr_ifname;
3176 
3177 		if (optlen > IFNAMSIZ - 1) {
3178 		    optlen = IFNAMSIZ - 1;
3179 		}
3180 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3181 		if (!dev_ifname) {
3182 		    return -TARGET_EFAULT;
3183 		}
3184 		optname = SO_BINDTODEVICE;
3185 		addr_ifname = alloca(IFNAMSIZ);
3186 		memcpy(addr_ifname, dev_ifname, optlen);
3187 		addr_ifname[optlen] = 0;
3188 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3189                                            addr_ifname, optlen));
3190 		unlock_user (dev_ifname, optval_addr, 0);
3191 		return ret;
3192 	}
3193             /* Options with 'int' argument.  */
3194         case TARGET_SO_DEBUG:
3195 		optname = SO_DEBUG;
3196 		break;
3197         case TARGET_SO_REUSEADDR:
3198 		optname = SO_REUSEADDR;
3199 		break;
3200         case TARGET_SO_TYPE:
3201 		optname = SO_TYPE;
3202 		break;
3203         case TARGET_SO_ERROR:
3204 		optname = SO_ERROR;
3205 		break;
3206         case TARGET_SO_DONTROUTE:
3207 		optname = SO_DONTROUTE;
3208 		break;
3209         case TARGET_SO_BROADCAST:
3210 		optname = SO_BROADCAST;
3211 		break;
3212         case TARGET_SO_SNDBUF:
3213 		optname = SO_SNDBUF;
3214 		break;
3215         case TARGET_SO_SNDBUFFORCE:
3216                 optname = SO_SNDBUFFORCE;
3217                 break;
3218         case TARGET_SO_RCVBUF:
3219 		optname = SO_RCVBUF;
3220 		break;
3221         case TARGET_SO_RCVBUFFORCE:
3222                 optname = SO_RCVBUFFORCE;
3223                 break;
3224         case TARGET_SO_KEEPALIVE:
3225 		optname = SO_KEEPALIVE;
3226 		break;
3227         case TARGET_SO_OOBINLINE:
3228 		optname = SO_OOBINLINE;
3229 		break;
3230         case TARGET_SO_NO_CHECK:
3231 		optname = SO_NO_CHECK;
3232 		break;
3233         case TARGET_SO_PRIORITY:
3234 		optname = SO_PRIORITY;
3235 		break;
3236 #ifdef SO_BSDCOMPAT
3237         case TARGET_SO_BSDCOMPAT:
3238 		optname = SO_BSDCOMPAT;
3239 		break;
3240 #endif
3241         case TARGET_SO_PASSCRED:
3242 		optname = SO_PASSCRED;
3243 		break;
3244         case TARGET_SO_PASSSEC:
3245                 optname = SO_PASSSEC;
3246                 break;
3247         case TARGET_SO_TIMESTAMP:
3248 		optname = SO_TIMESTAMP;
3249 		break;
3250         case TARGET_SO_RCVLOWAT:
3251 		optname = SO_RCVLOWAT;
3252 		break;
3253         default:
3254             goto unimplemented;
3255         }
3256 	if (optlen < sizeof(uint32_t))
3257             return -TARGET_EINVAL;
3258 
3259 	if (get_user_u32(val, optval_addr))
3260             return -TARGET_EFAULT;
3261 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3262         break;
3263     default:
3264     unimplemented:
3265         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3266         ret = -TARGET_ENOPROTOOPT;
3267     }
3268     return ret;
3269 }
3270 
3271 /* do_getsockopt() Must return target values and target errnos. */
3272 static abi_long do_getsockopt(int sockfd, int level, int optname,
3273                               abi_ulong optval_addr, abi_ulong optlen)
3274 {
3275     abi_long ret;
3276     int len, val;
3277     socklen_t lv;
3278 
3279     switch(level) {
3280     case TARGET_SOL_SOCKET:
3281         level = SOL_SOCKET;
3282         switch (optname) {
3283         /* These don't just return a single integer */
3284         case TARGET_SO_LINGER:
3285         case TARGET_SO_RCVTIMEO:
3286         case TARGET_SO_SNDTIMEO:
3287         case TARGET_SO_PEERNAME:
3288             goto unimplemented;
3289         case TARGET_SO_PEERCRED: {
3290             struct ucred cr;
3291             socklen_t crlen;
3292             struct target_ucred *tcr;
3293 
3294             if (get_user_u32(len, optlen)) {
3295                 return -TARGET_EFAULT;
3296             }
3297             if (len < 0) {
3298                 return -TARGET_EINVAL;
3299             }
3300 
3301             crlen = sizeof(cr);
3302             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3303                                        &cr, &crlen));
3304             if (ret < 0) {
3305                 return ret;
3306             }
3307             if (len > crlen) {
3308                 len = crlen;
3309             }
3310             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3311                 return -TARGET_EFAULT;
3312             }
3313             __put_user(cr.pid, &tcr->pid);
3314             __put_user(cr.uid, &tcr->uid);
3315             __put_user(cr.gid, &tcr->gid);
3316             unlock_user_struct(tcr, optval_addr, 1);
3317             if (put_user_u32(len, optlen)) {
3318                 return -TARGET_EFAULT;
3319             }
3320             break;
3321         }
3322         /* Options with 'int' argument.  */
3323         case TARGET_SO_DEBUG:
3324             optname = SO_DEBUG;
3325             goto int_case;
3326         case TARGET_SO_REUSEADDR:
3327             optname = SO_REUSEADDR;
3328             goto int_case;
3329         case TARGET_SO_TYPE:
3330             optname = SO_TYPE;
3331             goto int_case;
3332         case TARGET_SO_ERROR:
3333             optname = SO_ERROR;
3334             goto int_case;
3335         case TARGET_SO_DONTROUTE:
3336             optname = SO_DONTROUTE;
3337             goto int_case;
3338         case TARGET_SO_BROADCAST:
3339             optname = SO_BROADCAST;
3340             goto int_case;
3341         case TARGET_SO_SNDBUF:
3342             optname = SO_SNDBUF;
3343             goto int_case;
3344         case TARGET_SO_RCVBUF:
3345             optname = SO_RCVBUF;
3346             goto int_case;
3347         case TARGET_SO_KEEPALIVE:
3348             optname = SO_KEEPALIVE;
3349             goto int_case;
3350         case TARGET_SO_OOBINLINE:
3351             optname = SO_OOBINLINE;
3352             goto int_case;
3353         case TARGET_SO_NO_CHECK:
3354             optname = SO_NO_CHECK;
3355             goto int_case;
3356         case TARGET_SO_PRIORITY:
3357             optname = SO_PRIORITY;
3358             goto int_case;
3359 #ifdef SO_BSDCOMPAT
3360         case TARGET_SO_BSDCOMPAT:
3361             optname = SO_BSDCOMPAT;
3362             goto int_case;
3363 #endif
3364         case TARGET_SO_PASSCRED:
3365             optname = SO_PASSCRED;
3366             goto int_case;
3367         case TARGET_SO_TIMESTAMP:
3368             optname = SO_TIMESTAMP;
3369             goto int_case;
3370         case TARGET_SO_RCVLOWAT:
3371             optname = SO_RCVLOWAT;
3372             goto int_case;
3373         case TARGET_SO_ACCEPTCONN:
3374             optname = SO_ACCEPTCONN;
3375             goto int_case;
3376         default:
3377             goto int_case;
3378         }
3379         break;
3380     case SOL_TCP:
3381         /* TCP options all take an 'int' value.  */
3382     int_case:
3383         if (get_user_u32(len, optlen))
3384             return -TARGET_EFAULT;
3385         if (len < 0)
3386             return -TARGET_EINVAL;
3387         lv = sizeof(lv);
3388         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3389         if (ret < 0)
3390             return ret;
3391         if (optname == SO_TYPE) {
3392             val = host_to_target_sock_type(val);
3393         }
3394         if (len > lv)
3395             len = lv;
3396         if (len == 4) {
3397             if (put_user_u32(val, optval_addr))
3398                 return -TARGET_EFAULT;
3399         } else {
3400             if (put_user_u8(val, optval_addr))
3401                 return -TARGET_EFAULT;
3402         }
3403         if (put_user_u32(len, optlen))
3404             return -TARGET_EFAULT;
3405         break;
3406     case SOL_IP:
3407         switch(optname) {
3408         case IP_TOS:
3409         case IP_TTL:
3410         case IP_HDRINCL:
3411         case IP_ROUTER_ALERT:
3412         case IP_RECVOPTS:
3413         case IP_RETOPTS:
3414         case IP_PKTINFO:
3415         case IP_MTU_DISCOVER:
3416         case IP_RECVERR:
3417         case IP_RECVTOS:
3418 #ifdef IP_FREEBIND
3419         case IP_FREEBIND:
3420 #endif
3421         case IP_MULTICAST_TTL:
3422         case IP_MULTICAST_LOOP:
3423             if (get_user_u32(len, optlen))
3424                 return -TARGET_EFAULT;
3425             if (len < 0)
3426                 return -TARGET_EINVAL;
3427             lv = sizeof(lv);
3428             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3429             if (ret < 0)
3430                 return ret;
3431             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3432                 len = 1;
3433                 if (put_user_u32(len, optlen)
3434                     || put_user_u8(val, optval_addr))
3435                     return -TARGET_EFAULT;
3436             } else {
3437                 if (len > sizeof(int))
3438                     len = sizeof(int);
3439                 if (put_user_u32(len, optlen)
3440                     || put_user_u32(val, optval_addr))
3441                     return -TARGET_EFAULT;
3442             }
3443             break;
3444         default:
3445             ret = -TARGET_ENOPROTOOPT;
3446             break;
3447         }
3448         break;
3449     default:
3450     unimplemented:
3451         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3452                  level, optname);
3453         ret = -TARGET_EOPNOTSUPP;
3454         break;
3455     }
3456     return ret;
3457 }
3458 
3459 /* Convert target low/high pair representing file offset into the host
3460  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3461  * as the kernel doesn't handle them either.
3462  */
3463 static void target_to_host_low_high(abi_ulong tlow,
3464                                     abi_ulong thigh,
3465                                     unsigned long *hlow,
3466                                     unsigned long *hhigh)
3467 {
3468     uint64_t off = tlow |
3469         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3470         TARGET_LONG_BITS / 2;
3471 
3472     *hlow = off;
3473     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3474 }
3475 
3476 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3477                                 abi_ulong count, int copy)
3478 {
3479     struct target_iovec *target_vec;
3480     struct iovec *vec;
3481     abi_ulong total_len, max_len;
3482     int i;
3483     int err = 0;
3484     bool bad_address = false;
3485 
3486     if (count == 0) {
3487         errno = 0;
3488         return NULL;
3489     }
3490     if (count > IOV_MAX) {
3491         errno = EINVAL;
3492         return NULL;
3493     }
3494 
3495     vec = g_try_new0(struct iovec, count);
3496     if (vec == NULL) {
3497         errno = ENOMEM;
3498         return NULL;
3499     }
3500 
3501     target_vec = lock_user(VERIFY_READ, target_addr,
3502                            count * sizeof(struct target_iovec), 1);
3503     if (target_vec == NULL) {
3504         err = EFAULT;
3505         goto fail2;
3506     }
3507 
3508     /* ??? If host page size > target page size, this will result in a
3509        value larger than what we can actually support.  */
3510     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3511     total_len = 0;
3512 
3513     for (i = 0; i < count; i++) {
3514         abi_ulong base = tswapal(target_vec[i].iov_base);
3515         abi_long len = tswapal(target_vec[i].iov_len);
3516 
3517         if (len < 0) {
3518             err = EINVAL;
3519             goto fail;
3520         } else if (len == 0) {
3521             /* Zero length pointer is ignored.  */
3522             vec[i].iov_base = 0;
3523         } else {
3524             vec[i].iov_base = lock_user(type, base, len, copy);
3525             /* If the first buffer pointer is bad, this is a fault.  But
3526              * subsequent bad buffers will result in a partial write; this
3527              * is realized by filling the vector with null pointers and
3528              * zero lengths. */
3529             if (!vec[i].iov_base) {
3530                 if (i == 0) {
3531                     err = EFAULT;
3532                     goto fail;
3533                 } else {
3534                     bad_address = true;
3535                 }
3536             }
3537             if (bad_address) {
3538                 len = 0;
3539             }
3540             if (len > max_len - total_len) {
3541                 len = max_len - total_len;
3542             }
3543         }
3544         vec[i].iov_len = len;
3545         total_len += len;
3546     }
3547 
3548     unlock_user(target_vec, target_addr, 0);
3549     return vec;
3550 
3551  fail:
3552     while (--i >= 0) {
3553         if (tswapal(target_vec[i].iov_len) > 0) {
3554             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3555         }
3556     }
3557     unlock_user(target_vec, target_addr, 0);
3558  fail2:
3559     g_free(vec);
3560     errno = err;
3561     return NULL;
3562 }
3563 
3564 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3565                          abi_ulong count, int copy)
3566 {
3567     struct target_iovec *target_vec;
3568     int i;
3569 
3570     target_vec = lock_user(VERIFY_READ, target_addr,
3571                            count * sizeof(struct target_iovec), 1);
3572     if (target_vec) {
3573         for (i = 0; i < count; i++) {
3574             abi_ulong base = tswapal(target_vec[i].iov_base);
3575             abi_long len = tswapal(target_vec[i].iov_len);
3576             if (len < 0) {
3577                 break;
3578             }
3579             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3580         }
3581         unlock_user(target_vec, target_addr, 0);
3582     }
3583 
3584     g_free(vec);
3585 }
3586 
3587 static inline int target_to_host_sock_type(int *type)
3588 {
3589     int host_type = 0;
3590     int target_type = *type;
3591 
3592     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3593     case TARGET_SOCK_DGRAM:
3594         host_type = SOCK_DGRAM;
3595         break;
3596     case TARGET_SOCK_STREAM:
3597         host_type = SOCK_STREAM;
3598         break;
3599     default:
3600         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3601         break;
3602     }
3603     if (target_type & TARGET_SOCK_CLOEXEC) {
3604 #if defined(SOCK_CLOEXEC)
3605         host_type |= SOCK_CLOEXEC;
3606 #else
3607         return -TARGET_EINVAL;
3608 #endif
3609     }
3610     if (target_type & TARGET_SOCK_NONBLOCK) {
3611 #if defined(SOCK_NONBLOCK)
3612         host_type |= SOCK_NONBLOCK;
3613 #elif !defined(O_NONBLOCK)
3614         return -TARGET_EINVAL;
3615 #endif
3616     }
3617     *type = host_type;
3618     return 0;
3619 }
3620 
3621 /* Try to emulate socket type flags after socket creation.  */
3622 static int sock_flags_fixup(int fd, int target_type)
3623 {
3624 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3625     if (target_type & TARGET_SOCK_NONBLOCK) {
3626         int flags = fcntl(fd, F_GETFL);
3627         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3628             close(fd);
3629             return -TARGET_EINVAL;
3630         }
3631     }
3632 #endif
3633     return fd;
3634 }
3635 
3636 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3637                                                abi_ulong target_addr,
3638                                                socklen_t len)
3639 {
3640     struct sockaddr *addr = host_addr;
3641     struct target_sockaddr *target_saddr;
3642 
3643     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3644     if (!target_saddr) {
3645         return -TARGET_EFAULT;
3646     }
3647 
3648     memcpy(addr, target_saddr, len);
3649     addr->sa_family = tswap16(target_saddr->sa_family);
3650     /* spkt_protocol is big-endian */
3651 
3652     unlock_user(target_saddr, target_addr, 0);
3653     return 0;
3654 }
3655 
3656 static TargetFdTrans target_packet_trans = {
3657     .target_to_host_addr = packet_target_to_host_sockaddr,
3658 };
3659 
3660 #ifdef CONFIG_RTNETLINK
3661 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3662 {
3663     abi_long ret;
3664 
3665     ret = target_to_host_nlmsg_route(buf, len);
3666     if (ret < 0) {
3667         return ret;
3668     }
3669 
3670     return len;
3671 }
3672 
3673 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3674 {
3675     abi_long ret;
3676 
3677     ret = host_to_target_nlmsg_route(buf, len);
3678     if (ret < 0) {
3679         return ret;
3680     }
3681 
3682     return len;
3683 }
3684 
3685 static TargetFdTrans target_netlink_route_trans = {
3686     .target_to_host_data = netlink_route_target_to_host,
3687     .host_to_target_data = netlink_route_host_to_target,
3688 };
3689 #endif /* CONFIG_RTNETLINK */
3690 
3691 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3692 {
3693     abi_long ret;
3694 
3695     ret = target_to_host_nlmsg_audit(buf, len);
3696     if (ret < 0) {
3697         return ret;
3698     }
3699 
3700     return len;
3701 }
3702 
3703 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3704 {
3705     abi_long ret;
3706 
3707     ret = host_to_target_nlmsg_audit(buf, len);
3708     if (ret < 0) {
3709         return ret;
3710     }
3711 
3712     return len;
3713 }
3714 
3715 static TargetFdTrans target_netlink_audit_trans = {
3716     .target_to_host_data = netlink_audit_target_to_host,
3717     .host_to_target_data = netlink_audit_host_to_target,
3718 };
3719 
3720 /* do_socket() Must return target values and target errnos. */
3721 static abi_long do_socket(int domain, int type, int protocol)
3722 {
3723     int target_type = type;
3724     int ret;
3725 
3726     ret = target_to_host_sock_type(&type);
3727     if (ret) {
3728         return ret;
3729     }
3730 
3731     if (domain == PF_NETLINK && !(
3732 #ifdef CONFIG_RTNETLINK
3733          protocol == NETLINK_ROUTE ||
3734 #endif
3735          protocol == NETLINK_KOBJECT_UEVENT ||
3736          protocol == NETLINK_AUDIT)) {
3737         return -EPFNOSUPPORT;
3738     }
3739 
3740     if (domain == AF_PACKET ||
3741         (domain == AF_INET && type == SOCK_PACKET)) {
3742         protocol = tswap16(protocol);
3743     }
3744 
3745     ret = get_errno(socket(domain, type, protocol));
3746     if (ret >= 0) {
3747         ret = sock_flags_fixup(ret, target_type);
3748         if (type == SOCK_PACKET) {
3749             /* Manage an obsolete case :
3750              * if socket type is SOCK_PACKET, bind by name
3751              */
3752             fd_trans_register(ret, &target_packet_trans);
3753         } else if (domain == PF_NETLINK) {
3754             switch (protocol) {
3755 #ifdef CONFIG_RTNETLINK
3756             case NETLINK_ROUTE:
3757                 fd_trans_register(ret, &target_netlink_route_trans);
3758                 break;
3759 #endif
3760             case NETLINK_KOBJECT_UEVENT:
3761                 /* nothing to do: messages are strings */
3762                 break;
3763             case NETLINK_AUDIT:
3764                 fd_trans_register(ret, &target_netlink_audit_trans);
3765                 break;
3766             default:
3767                 g_assert_not_reached();
3768             }
3769         }
3770     }
3771     return ret;
3772 }
3773 
3774 /* do_bind() Must return target values and target errnos. */
3775 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3776                         socklen_t addrlen)
3777 {
3778     void *addr;
3779     abi_long ret;
3780 
3781     if ((int)addrlen < 0) {
3782         return -TARGET_EINVAL;
3783     }
3784 
3785     addr = alloca(addrlen+1);
3786 
3787     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3788     if (ret)
3789         return ret;
3790 
3791     return get_errno(bind(sockfd, addr, addrlen));
3792 }
3793 
3794 /* do_connect() Must return target values and target errnos. */
3795 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3796                            socklen_t addrlen)
3797 {
3798     void *addr;
3799     abi_long ret;
3800 
3801     if ((int)addrlen < 0) {
3802         return -TARGET_EINVAL;
3803     }
3804 
3805     addr = alloca(addrlen+1);
3806 
3807     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3808     if (ret)
3809         return ret;
3810 
3811     return get_errno(safe_connect(sockfd, addr, addrlen));
3812 }
3813 
3814 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3815 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3816                                       int flags, int send)
3817 {
3818     abi_long ret, len;
3819     struct msghdr msg;
3820     abi_ulong count;
3821     struct iovec *vec;
3822     abi_ulong target_vec;
3823 
3824     if (msgp->msg_name) {
3825         msg.msg_namelen = tswap32(msgp->msg_namelen);
3826         msg.msg_name = alloca(msg.msg_namelen+1);
3827         ret = target_to_host_sockaddr(fd, msg.msg_name,
3828                                       tswapal(msgp->msg_name),
3829                                       msg.msg_namelen);
3830         if (ret == -TARGET_EFAULT) {
3831             /* For connected sockets msg_name and msg_namelen must
3832              * be ignored, so returning EFAULT immediately is wrong.
3833              * Instead, pass a bad msg_name to the host kernel, and
3834              * let it decide whether to return EFAULT or not.
3835              */
3836             msg.msg_name = (void *)-1;
3837         } else if (ret) {
3838             goto out2;
3839         }
3840     } else {
3841         msg.msg_name = NULL;
3842         msg.msg_namelen = 0;
3843     }
3844     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3845     msg.msg_control = alloca(msg.msg_controllen);
3846     memset(msg.msg_control, 0, msg.msg_controllen);
3847 
3848     msg.msg_flags = tswap32(msgp->msg_flags);
3849 
3850     count = tswapal(msgp->msg_iovlen);
3851     target_vec = tswapal(msgp->msg_iov);
3852 
3853     if (count > IOV_MAX) {
3854         /* sendrcvmsg returns a different errno for this condition than
3855          * readv/writev, so we must catch it here before lock_iovec() does.
3856          */
3857         ret = -TARGET_EMSGSIZE;
3858         goto out2;
3859     }
3860 
3861     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3862                      target_vec, count, send);
3863     if (vec == NULL) {
3864         ret = -host_to_target_errno(errno);
3865         goto out2;
3866     }
3867     msg.msg_iovlen = count;
3868     msg.msg_iov = vec;
3869 
3870     if (send) {
3871         if (fd_trans_target_to_host_data(fd)) {
3872             void *host_msg;
3873 
3874             host_msg = g_malloc(msg.msg_iov->iov_len);
3875             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3876             ret = fd_trans_target_to_host_data(fd)(host_msg,
3877                                                    msg.msg_iov->iov_len);
3878             if (ret >= 0) {
3879                 msg.msg_iov->iov_base = host_msg;
3880                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3881             }
3882             g_free(host_msg);
3883         } else {
3884             ret = target_to_host_cmsg(&msg, msgp);
3885             if (ret == 0) {
3886                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3887             }
3888         }
3889     } else {
3890         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3891         if (!is_error(ret)) {
3892             len = ret;
3893             if (fd_trans_host_to_target_data(fd)) {
3894                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3895                                                        len);
3896             } else {
3897                 ret = host_to_target_cmsg(msgp, &msg);
3898             }
3899             if (!is_error(ret)) {
3900                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3901                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3902                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3903                                     msg.msg_name, msg.msg_namelen);
3904                     if (ret) {
3905                         goto out;
3906                     }
3907                 }
3908 
3909                 ret = len;
3910             }
3911         }
3912     }
3913 
3914 out:
3915     unlock_iovec(vec, target_vec, count, !send);
3916 out2:
3917     return ret;
3918 }
3919 
3920 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3921                                int flags, int send)
3922 {
3923     abi_long ret;
3924     struct target_msghdr *msgp;
3925 
3926     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3927                           msgp,
3928                           target_msg,
3929                           send ? 1 : 0)) {
3930         return -TARGET_EFAULT;
3931     }
3932     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3933     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3934     return ret;
3935 }
3936 
3937 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3938  * so it might not have this *mmsg-specific flag either.
3939  */
3940 #ifndef MSG_WAITFORONE
3941 #define MSG_WAITFORONE 0x10000
3942 #endif
3943 
3944 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3945                                 unsigned int vlen, unsigned int flags,
3946                                 int send)
3947 {
3948     struct target_mmsghdr *mmsgp;
3949     abi_long ret = 0;
3950     int i;
3951 
3952     if (vlen > UIO_MAXIOV) {
3953         vlen = UIO_MAXIOV;
3954     }
3955 
3956     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3957     if (!mmsgp) {
3958         return -TARGET_EFAULT;
3959     }
3960 
3961     for (i = 0; i < vlen; i++) {
3962         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3963         if (is_error(ret)) {
3964             break;
3965         }
3966         mmsgp[i].msg_len = tswap32(ret);
3967         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3968         if (flags & MSG_WAITFORONE) {
3969             flags |= MSG_DONTWAIT;
3970         }
3971     }
3972 
3973     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3974 
3975     /* Return number of datagrams sent if we sent any at all;
3976      * otherwise return the error.
3977      */
3978     if (i) {
3979         return i;
3980     }
3981     return ret;
3982 }
3983 
3984 /* do_accept4() Must return target values and target errnos. */
3985 static abi_long do_accept4(int fd, abi_ulong target_addr,
3986                            abi_ulong target_addrlen_addr, int flags)
3987 {
3988     socklen_t addrlen;
3989     void *addr;
3990     abi_long ret;
3991     int host_flags;
3992 
3993     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3994 
3995     if (target_addr == 0) {
3996         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3997     }
3998 
3999     /* linux returns EINVAL if addrlen pointer is invalid */
4000     if (get_user_u32(addrlen, target_addrlen_addr))
4001         return -TARGET_EINVAL;
4002 
4003     if ((int)addrlen < 0) {
4004         return -TARGET_EINVAL;
4005     }
4006 
4007     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4008         return -TARGET_EINVAL;
4009 
4010     addr = alloca(addrlen);
4011 
4012     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4013     if (!is_error(ret)) {
4014         host_to_target_sockaddr(target_addr, addr, addrlen);
4015         if (put_user_u32(addrlen, target_addrlen_addr))
4016             ret = -TARGET_EFAULT;
4017     }
4018     return ret;
4019 }
4020 
4021 /* do_getpeername() Must return target values and target errnos. */
4022 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4023                                abi_ulong target_addrlen_addr)
4024 {
4025     socklen_t addrlen;
4026     void *addr;
4027     abi_long ret;
4028 
4029     if (get_user_u32(addrlen, target_addrlen_addr))
4030         return -TARGET_EFAULT;
4031 
4032     if ((int)addrlen < 0) {
4033         return -TARGET_EINVAL;
4034     }
4035 
4036     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4037         return -TARGET_EFAULT;
4038 
4039     addr = alloca(addrlen);
4040 
4041     ret = get_errno(getpeername(fd, addr, &addrlen));
4042     if (!is_error(ret)) {
4043         host_to_target_sockaddr(target_addr, addr, addrlen);
4044         if (put_user_u32(addrlen, target_addrlen_addr))
4045             ret = -TARGET_EFAULT;
4046     }
4047     return ret;
4048 }
4049 
4050 /* do_getsockname() Must return target values and target errnos. */
4051 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4052                                abi_ulong target_addrlen_addr)
4053 {
4054     socklen_t addrlen;
4055     void *addr;
4056     abi_long ret;
4057 
4058     if (get_user_u32(addrlen, target_addrlen_addr))
4059         return -TARGET_EFAULT;
4060 
4061     if ((int)addrlen < 0) {
4062         return -TARGET_EINVAL;
4063     }
4064 
4065     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4066         return -TARGET_EFAULT;
4067 
4068     addr = alloca(addrlen);
4069 
4070     ret = get_errno(getsockname(fd, addr, &addrlen));
4071     if (!is_error(ret)) {
4072         host_to_target_sockaddr(target_addr, addr, addrlen);
4073         if (put_user_u32(addrlen, target_addrlen_addr))
4074             ret = -TARGET_EFAULT;
4075     }
4076     return ret;
4077 }
4078 
4079 /* do_socketpair() Must return target values and target errnos. */
4080 static abi_long do_socketpair(int domain, int type, int protocol,
4081                               abi_ulong target_tab_addr)
4082 {
4083     int tab[2];
4084     abi_long ret;
4085 
4086     target_to_host_sock_type(&type);
4087 
4088     ret = get_errno(socketpair(domain, type, protocol, tab));
4089     if (!is_error(ret)) {
4090         if (put_user_s32(tab[0], target_tab_addr)
4091             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4092             ret = -TARGET_EFAULT;
4093     }
4094     return ret;
4095 }
4096 
4097 /* do_sendto() Must return target values and target errnos. */
4098 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4099                           abi_ulong target_addr, socklen_t addrlen)
4100 {
4101     void *addr;
4102     void *host_msg;
4103     void *copy_msg = NULL;
4104     abi_long ret;
4105 
4106     if ((int)addrlen < 0) {
4107         return -TARGET_EINVAL;
4108     }
4109 
4110     host_msg = lock_user(VERIFY_READ, msg, len, 1);
4111     if (!host_msg)
4112         return -TARGET_EFAULT;
4113     if (fd_trans_target_to_host_data(fd)) {
4114         copy_msg = host_msg;
4115         host_msg = g_malloc(len);
4116         memcpy(host_msg, copy_msg, len);
4117         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4118         if (ret < 0) {
4119             goto fail;
4120         }
4121     }
4122     if (target_addr) {
4123         addr = alloca(addrlen+1);
4124         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4125         if (ret) {
4126             goto fail;
4127         }
4128         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4129     } else {
4130         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4131     }
4132 fail:
4133     if (copy_msg) {
4134         g_free(host_msg);
4135         host_msg = copy_msg;
4136     }
4137     unlock_user(host_msg, msg, 0);
4138     return ret;
4139 }
4140 
4141 /* do_recvfrom() Must return target values and target errnos. */
4142 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4143                             abi_ulong target_addr,
4144                             abi_ulong target_addrlen)
4145 {
4146     socklen_t addrlen;
4147     void *addr;
4148     void *host_msg;
4149     abi_long ret;
4150 
4151     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4152     if (!host_msg)
4153         return -TARGET_EFAULT;
4154     if (target_addr) {
4155         if (get_user_u32(addrlen, target_addrlen)) {
4156             ret = -TARGET_EFAULT;
4157             goto fail;
4158         }
4159         if ((int)addrlen < 0) {
4160             ret = -TARGET_EINVAL;
4161             goto fail;
4162         }
4163         addr = alloca(addrlen);
4164         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4165                                       addr, &addrlen));
4166     } else {
4167         addr = NULL; /* To keep compiler quiet.  */
4168         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4169     }
4170     if (!is_error(ret)) {
4171         if (fd_trans_host_to_target_data(fd)) {
4172             ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4173         }
4174         if (target_addr) {
4175             host_to_target_sockaddr(target_addr, addr, addrlen);
4176             if (put_user_u32(addrlen, target_addrlen)) {
4177                 ret = -TARGET_EFAULT;
4178                 goto fail;
4179             }
4180         }
4181         unlock_user(host_msg, msg, len);
4182     } else {
4183 fail:
4184         unlock_user(host_msg, msg, 0);
4185     }
4186     return ret;
4187 }
4188 
4189 #ifdef TARGET_NR_socketcall
4190 /* do_socketcall() must return target values and target errnos. */
4191 static abi_long do_socketcall(int num, abi_ulong vptr)
4192 {
4193     static const unsigned nargs[] = { /* number of arguments per operation */
4194         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
4195         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
4196         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
4197         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
4198         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
4199         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4200         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4201         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
4202         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
4203         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
4204         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
4205         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
4206         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
4207         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4208         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4209         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
4210         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
4211         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
4212         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
4213         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
4214     };
4215     abi_long a[6]; /* max 6 args */
4216     unsigned i;
4217 
4218     /* check the range of the first argument num */
4219     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4220     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4221         return -TARGET_EINVAL;
4222     }
4223     /* ensure we have space for args */
4224     if (nargs[num] > ARRAY_SIZE(a)) {
4225         return -TARGET_EINVAL;
4226     }
4227     /* collect the arguments in a[] according to nargs[] */
4228     for (i = 0; i < nargs[num]; ++i) {
4229         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4230             return -TARGET_EFAULT;
4231         }
4232     }
4233     /* now when we have the args, invoke the appropriate underlying function */
4234     switch (num) {
4235     case TARGET_SYS_SOCKET: /* domain, type, protocol */
4236         return do_socket(a[0], a[1], a[2]);
4237     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4238         return do_bind(a[0], a[1], a[2]);
4239     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4240         return do_connect(a[0], a[1], a[2]);
4241     case TARGET_SYS_LISTEN: /* sockfd, backlog */
4242         return get_errno(listen(a[0], a[1]));
4243     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4244         return do_accept4(a[0], a[1], a[2], 0);
4245     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4246         return do_getsockname(a[0], a[1], a[2]);
4247     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4248         return do_getpeername(a[0], a[1], a[2]);
4249     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4250         return do_socketpair(a[0], a[1], a[2], a[3]);
4251     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4252         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4253     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4254         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4255     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4256         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4257     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4258         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4259     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4260         return get_errno(shutdown(a[0], a[1]));
4261     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4262         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4263     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4264         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4265     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4266         return do_sendrecvmsg(a[0], a[1], a[2], 1);
4267     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4268         return do_sendrecvmsg(a[0], a[1], a[2], 0);
4269     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4270         return do_accept4(a[0], a[1], a[2], a[3]);
4271     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4272         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4273     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4274         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4275     default:
4276         gemu_log("Unsupported socketcall: %d\n", num);
4277         return -TARGET_EINVAL;
4278     }
4279 }
4280 #endif
4281 
4282 #define N_SHM_REGIONS	32
4283 
4284 static struct shm_region {
4285     abi_ulong start;
4286     abi_ulong size;
4287     bool in_use;
4288 } shm_regions[N_SHM_REGIONS];
4289 
4290 #ifndef TARGET_SEMID64_DS
4291 /* asm-generic version of this struct */
4292 struct target_semid64_ds
4293 {
4294   struct target_ipc_perm sem_perm;
4295   abi_ulong sem_otime;
4296 #if TARGET_ABI_BITS == 32
4297   abi_ulong __unused1;
4298 #endif
4299   abi_ulong sem_ctime;
4300 #if TARGET_ABI_BITS == 32
4301   abi_ulong __unused2;
4302 #endif
4303   abi_ulong sem_nsems;
4304   abi_ulong __unused3;
4305   abi_ulong __unused4;
4306 };
4307 #endif
4308 
4309 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4310                                                abi_ulong target_addr)
4311 {
4312     struct target_ipc_perm *target_ip;
4313     struct target_semid64_ds *target_sd;
4314 
4315     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4316         return -TARGET_EFAULT;
4317     target_ip = &(target_sd->sem_perm);
4318     host_ip->__key = tswap32(target_ip->__key);
4319     host_ip->uid = tswap32(target_ip->uid);
4320     host_ip->gid = tswap32(target_ip->gid);
4321     host_ip->cuid = tswap32(target_ip->cuid);
4322     host_ip->cgid = tswap32(target_ip->cgid);
4323 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4324     host_ip->mode = tswap32(target_ip->mode);
4325 #else
4326     host_ip->mode = tswap16(target_ip->mode);
4327 #endif
4328 #if defined(TARGET_PPC)
4329     host_ip->__seq = tswap32(target_ip->__seq);
4330 #else
4331     host_ip->__seq = tswap16(target_ip->__seq);
4332 #endif
4333     unlock_user_struct(target_sd, target_addr, 0);
4334     return 0;
4335 }
4336 
4337 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4338                                                struct ipc_perm *host_ip)
4339 {
4340     struct target_ipc_perm *target_ip;
4341     struct target_semid64_ds *target_sd;
4342 
4343     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4344         return -TARGET_EFAULT;
4345     target_ip = &(target_sd->sem_perm);
4346     target_ip->__key = tswap32(host_ip->__key);
4347     target_ip->uid = tswap32(host_ip->uid);
4348     target_ip->gid = tswap32(host_ip->gid);
4349     target_ip->cuid = tswap32(host_ip->cuid);
4350     target_ip->cgid = tswap32(host_ip->cgid);
4351 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4352     target_ip->mode = tswap32(host_ip->mode);
4353 #else
4354     target_ip->mode = tswap16(host_ip->mode);
4355 #endif
4356 #if defined(TARGET_PPC)
4357     target_ip->__seq = tswap32(host_ip->__seq);
4358 #else
4359     target_ip->__seq = tswap16(host_ip->__seq);
4360 #endif
4361     unlock_user_struct(target_sd, target_addr, 1);
4362     return 0;
4363 }
4364 
4365 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4366                                                abi_ulong target_addr)
4367 {
4368     struct target_semid64_ds *target_sd;
4369 
4370     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4371         return -TARGET_EFAULT;
4372     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4373         return -TARGET_EFAULT;
4374     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4375     host_sd->sem_otime = tswapal(target_sd->sem_otime);
4376     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4377     unlock_user_struct(target_sd, target_addr, 0);
4378     return 0;
4379 }
4380 
4381 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4382                                                struct semid_ds *host_sd)
4383 {
4384     struct target_semid64_ds *target_sd;
4385 
4386     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4387         return -TARGET_EFAULT;
4388     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4389         return -TARGET_EFAULT;
4390     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4391     target_sd->sem_otime = tswapal(host_sd->sem_otime);
4392     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4393     unlock_user_struct(target_sd, target_addr, 1);
4394     return 0;
4395 }
4396 
4397 struct target_seminfo {
4398     int semmap;
4399     int semmni;
4400     int semmns;
4401     int semmnu;
4402     int semmsl;
4403     int semopm;
4404     int semume;
4405     int semusz;
4406     int semvmx;
4407     int semaem;
4408 };
4409 
4410 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4411                                               struct seminfo *host_seminfo)
4412 {
4413     struct target_seminfo *target_seminfo;
4414     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4415         return -TARGET_EFAULT;
4416     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4417     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4418     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4419     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4420     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4421     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4422     __put_user(host_seminfo->semume, &target_seminfo->semume);
4423     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4424     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4425     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4426     unlock_user_struct(target_seminfo, target_addr, 1);
4427     return 0;
4428 }
4429 
4430 union semun {
4431 	int val;
4432 	struct semid_ds *buf;
4433 	unsigned short *array;
4434 	struct seminfo *__buf;
4435 };
4436 
4437 union target_semun {
4438 	int val;
4439 	abi_ulong buf;
4440 	abi_ulong array;
4441 	abi_ulong __buf;
4442 };
4443 
4444 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4445                                                abi_ulong target_addr)
4446 {
4447     int nsems;
4448     unsigned short *array;
4449     union semun semun;
4450     struct semid_ds semid_ds;
4451     int i, ret;
4452 
4453     semun.buf = &semid_ds;
4454 
4455     ret = semctl(semid, 0, IPC_STAT, semun);
4456     if (ret == -1)
4457         return get_errno(ret);
4458 
4459     nsems = semid_ds.sem_nsems;
4460 
4461     *host_array = g_try_new(unsigned short, nsems);
4462     if (!*host_array) {
4463         return -TARGET_ENOMEM;
4464     }
4465     array = lock_user(VERIFY_READ, target_addr,
4466                       nsems*sizeof(unsigned short), 1);
4467     if (!array) {
4468         g_free(*host_array);
4469         return -TARGET_EFAULT;
4470     }
4471 
4472     for(i=0; i<nsems; i++) {
4473         __get_user((*host_array)[i], &array[i]);
4474     }
4475     unlock_user(array, target_addr, 0);
4476 
4477     return 0;
4478 }
4479 
4480 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4481                                                unsigned short **host_array)
4482 {
4483     int nsems;
4484     unsigned short *array;
4485     union semun semun;
4486     struct semid_ds semid_ds;
4487     int i, ret;
4488 
4489     semun.buf = &semid_ds;
4490 
4491     ret = semctl(semid, 0, IPC_STAT, semun);
4492     if (ret == -1)
4493         return get_errno(ret);
4494 
4495     nsems = semid_ds.sem_nsems;
4496 
4497     array = lock_user(VERIFY_WRITE, target_addr,
4498                       nsems*sizeof(unsigned short), 0);
4499     if (!array)
4500         return -TARGET_EFAULT;
4501 
4502     for(i=0; i<nsems; i++) {
4503         __put_user((*host_array)[i], &array[i]);
4504     }
4505     g_free(*host_array);
4506     unlock_user(array, target_addr, 1);
4507 
4508     return 0;
4509 }
4510 
4511 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4512                                  abi_ulong target_arg)
4513 {
4514     union target_semun target_su = { .buf = target_arg };
4515     union semun arg;
4516     struct semid_ds dsarg;
4517     unsigned short *array = NULL;
4518     struct seminfo seminfo;
4519     abi_long ret = -TARGET_EINVAL;
4520     abi_long err;
4521     cmd &= 0xff;
4522 
4523     switch( cmd ) {
4524 	case GETVAL:
4525 	case SETVAL:
4526             /* In 64 bit cross-endian situations, we will erroneously pick up
4527              * the wrong half of the union for the "val" element.  To rectify
4528              * this, the entire 8-byte structure is byteswapped, followed by
4529 	     * a swap of the 4 byte val field. In other cases, the data is
4530 	     * already in proper host byte order. */
4531 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4532 		target_su.buf = tswapal(target_su.buf);
4533 		arg.val = tswap32(target_su.val);
4534 	    } else {
4535 		arg.val = target_su.val;
4536 	    }
4537             ret = get_errno(semctl(semid, semnum, cmd, arg));
4538             break;
4539 	case GETALL:
4540 	case SETALL:
4541             err = target_to_host_semarray(semid, &array, target_su.array);
4542             if (err)
4543                 return err;
4544             arg.array = array;
4545             ret = get_errno(semctl(semid, semnum, cmd, arg));
4546             err = host_to_target_semarray(semid, target_su.array, &array);
4547             if (err)
4548                 return err;
4549             break;
4550 	case IPC_STAT:
4551 	case IPC_SET:
4552 	case SEM_STAT:
4553             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4554             if (err)
4555                 return err;
4556             arg.buf = &dsarg;
4557             ret = get_errno(semctl(semid, semnum, cmd, arg));
4558             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4559             if (err)
4560                 return err;
4561             break;
4562 	case IPC_INFO:
4563 	case SEM_INFO:
4564             arg.__buf = &seminfo;
4565             ret = get_errno(semctl(semid, semnum, cmd, arg));
4566             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4567             if (err)
4568                 return err;
4569             break;
4570 	case IPC_RMID:
4571 	case GETPID:
4572 	case GETNCNT:
4573 	case GETZCNT:
4574             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4575             break;
4576     }
4577 
4578     return ret;
4579 }
4580 
4581 struct target_sembuf {
4582     unsigned short sem_num;
4583     short sem_op;
4584     short sem_flg;
4585 };
4586 
4587 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4588                                              abi_ulong target_addr,
4589                                              unsigned nsops)
4590 {
4591     struct target_sembuf *target_sembuf;
4592     int i;
4593 
4594     target_sembuf = lock_user(VERIFY_READ, target_addr,
4595                               nsops*sizeof(struct target_sembuf), 1);
4596     if (!target_sembuf)
4597         return -TARGET_EFAULT;
4598 
4599     for(i=0; i<nsops; i++) {
4600         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4601         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4602         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4603     }
4604 
4605     unlock_user(target_sembuf, target_addr, 0);
4606 
4607     return 0;
4608 }
4609 
4610 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4611 {
4612     struct sembuf sops[nsops];
4613 
4614     if (target_to_host_sembuf(sops, ptr, nsops))
4615         return -TARGET_EFAULT;
4616 
4617     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4618 }
4619 
4620 struct target_msqid_ds
4621 {
4622     struct target_ipc_perm msg_perm;
4623     abi_ulong msg_stime;
4624 #if TARGET_ABI_BITS == 32
4625     abi_ulong __unused1;
4626 #endif
4627     abi_ulong msg_rtime;
4628 #if TARGET_ABI_BITS == 32
4629     abi_ulong __unused2;
4630 #endif
4631     abi_ulong msg_ctime;
4632 #if TARGET_ABI_BITS == 32
4633     abi_ulong __unused3;
4634 #endif
4635     abi_ulong __msg_cbytes;
4636     abi_ulong msg_qnum;
4637     abi_ulong msg_qbytes;
4638     abi_ulong msg_lspid;
4639     abi_ulong msg_lrpid;
4640     abi_ulong __unused4;
4641     abi_ulong __unused5;
4642 };
4643 
4644 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4645                                                abi_ulong target_addr)
4646 {
4647     struct target_msqid_ds *target_md;
4648 
4649     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4650         return -TARGET_EFAULT;
4651     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4652         return -TARGET_EFAULT;
4653     host_md->msg_stime = tswapal(target_md->msg_stime);
4654     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4655     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4656     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4657     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4658     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4659     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4660     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4661     unlock_user_struct(target_md, target_addr, 0);
4662     return 0;
4663 }
4664 
4665 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4666                                                struct msqid_ds *host_md)
4667 {
4668     struct target_msqid_ds *target_md;
4669 
4670     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4671         return -TARGET_EFAULT;
4672     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4673         return -TARGET_EFAULT;
4674     target_md->msg_stime = tswapal(host_md->msg_stime);
4675     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4676     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4677     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4678     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4679     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4680     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4681     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4682     unlock_user_struct(target_md, target_addr, 1);
4683     return 0;
4684 }
4685 
4686 struct target_msginfo {
4687     int msgpool;
4688     int msgmap;
4689     int msgmax;
4690     int msgmnb;
4691     int msgmni;
4692     int msgssz;
4693     int msgtql;
4694     unsigned short int msgseg;
4695 };
4696 
4697 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4698                                               struct msginfo *host_msginfo)
4699 {
4700     struct target_msginfo *target_msginfo;
4701     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4702         return -TARGET_EFAULT;
4703     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4704     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4705     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4706     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4707     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4708     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4709     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4710     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4711     unlock_user_struct(target_msginfo, target_addr, 1);
4712     return 0;
4713 }
4714 
4715 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4716 {
4717     struct msqid_ds dsarg;
4718     struct msginfo msginfo;
4719     abi_long ret = -TARGET_EINVAL;
4720 
4721     cmd &= 0xff;
4722 
4723     switch (cmd) {
4724     case IPC_STAT:
4725     case IPC_SET:
4726     case MSG_STAT:
4727         if (target_to_host_msqid_ds(&dsarg,ptr))
4728             return -TARGET_EFAULT;
4729         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4730         if (host_to_target_msqid_ds(ptr,&dsarg))
4731             return -TARGET_EFAULT;
4732         break;
4733     case IPC_RMID:
4734         ret = get_errno(msgctl(msgid, cmd, NULL));
4735         break;
4736     case IPC_INFO:
4737     case MSG_INFO:
4738         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4739         if (host_to_target_msginfo(ptr, &msginfo))
4740             return -TARGET_EFAULT;
4741         break;
4742     }
4743 
4744     return ret;
4745 }
4746 
4747 struct target_msgbuf {
4748     abi_long mtype;
4749     char	mtext[1];
4750 };
4751 
4752 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4753                                  ssize_t msgsz, int msgflg)
4754 {
4755     struct target_msgbuf *target_mb;
4756     struct msgbuf *host_mb;
4757     abi_long ret = 0;
4758 
4759     if (msgsz < 0) {
4760         return -TARGET_EINVAL;
4761     }
4762 
4763     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4764         return -TARGET_EFAULT;
4765     host_mb = g_try_malloc(msgsz + sizeof(long));
4766     if (!host_mb) {
4767         unlock_user_struct(target_mb, msgp, 0);
4768         return -TARGET_ENOMEM;
4769     }
4770     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4771     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4772     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4773     g_free(host_mb);
4774     unlock_user_struct(target_mb, msgp, 0);
4775 
4776     return ret;
4777 }
4778 
4779 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4780                                  ssize_t msgsz, abi_long msgtyp,
4781                                  int msgflg)
4782 {
4783     struct target_msgbuf *target_mb;
4784     char *target_mtext;
4785     struct msgbuf *host_mb;
4786     abi_long ret = 0;
4787 
4788     if (msgsz < 0) {
4789         return -TARGET_EINVAL;
4790     }
4791 
4792     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4793         return -TARGET_EFAULT;
4794 
4795     host_mb = g_try_malloc(msgsz + sizeof(long));
4796     if (!host_mb) {
4797         ret = -TARGET_ENOMEM;
4798         goto end;
4799     }
4800     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4801 
4802     if (ret > 0) {
4803         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4804         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4805         if (!target_mtext) {
4806             ret = -TARGET_EFAULT;
4807             goto end;
4808         }
4809         memcpy(target_mb->mtext, host_mb->mtext, ret);
4810         unlock_user(target_mtext, target_mtext_addr, ret);
4811     }
4812 
4813     target_mb->mtype = tswapal(host_mb->mtype);
4814 
4815 end:
4816     if (target_mb)
4817         unlock_user_struct(target_mb, msgp, 1);
4818     g_free(host_mb);
4819     return ret;
4820 }
4821 
4822 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4823                                                abi_ulong target_addr)
4824 {
4825     struct target_shmid_ds *target_sd;
4826 
4827     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4828         return -TARGET_EFAULT;
4829     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4830         return -TARGET_EFAULT;
4831     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4832     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4833     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4834     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4835     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4836     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4837     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4838     unlock_user_struct(target_sd, target_addr, 0);
4839     return 0;
4840 }
4841 
4842 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4843                                                struct shmid_ds *host_sd)
4844 {
4845     struct target_shmid_ds *target_sd;
4846 
4847     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4848         return -TARGET_EFAULT;
4849     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4850         return -TARGET_EFAULT;
4851     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4852     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4853     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4854     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4855     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4856     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4857     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4858     unlock_user_struct(target_sd, target_addr, 1);
4859     return 0;
4860 }
4861 
4862 struct  target_shminfo {
4863     abi_ulong shmmax;
4864     abi_ulong shmmin;
4865     abi_ulong shmmni;
4866     abi_ulong shmseg;
4867     abi_ulong shmall;
4868 };
4869 
4870 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4871                                               struct shminfo *host_shminfo)
4872 {
4873     struct target_shminfo *target_shminfo;
4874     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4875         return -TARGET_EFAULT;
4876     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4877     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4878     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4879     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4880     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4881     unlock_user_struct(target_shminfo, target_addr, 1);
4882     return 0;
4883 }
4884 
4885 struct target_shm_info {
4886     int used_ids;
4887     abi_ulong shm_tot;
4888     abi_ulong shm_rss;
4889     abi_ulong shm_swp;
4890     abi_ulong swap_attempts;
4891     abi_ulong swap_successes;
4892 };
4893 
4894 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4895                                                struct shm_info *host_shm_info)
4896 {
4897     struct target_shm_info *target_shm_info;
4898     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4899         return -TARGET_EFAULT;
4900     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4901     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4902     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4903     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4904     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4905     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4906     unlock_user_struct(target_shm_info, target_addr, 1);
4907     return 0;
4908 }
4909 
4910 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4911 {
4912     struct shmid_ds dsarg;
4913     struct shminfo shminfo;
4914     struct shm_info shm_info;
4915     abi_long ret = -TARGET_EINVAL;
4916 
4917     cmd &= 0xff;
4918 
4919     switch(cmd) {
4920     case IPC_STAT:
4921     case IPC_SET:
4922     case SHM_STAT:
4923         if (target_to_host_shmid_ds(&dsarg, buf))
4924             return -TARGET_EFAULT;
4925         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4926         if (host_to_target_shmid_ds(buf, &dsarg))
4927             return -TARGET_EFAULT;
4928         break;
4929     case IPC_INFO:
4930         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4931         if (host_to_target_shminfo(buf, &shminfo))
4932             return -TARGET_EFAULT;
4933         break;
4934     case SHM_INFO:
4935         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4936         if (host_to_target_shm_info(buf, &shm_info))
4937             return -TARGET_EFAULT;
4938         break;
4939     case IPC_RMID:
4940     case SHM_LOCK:
4941     case SHM_UNLOCK:
4942         ret = get_errno(shmctl(shmid, cmd, NULL));
4943         break;
4944     }
4945 
4946     return ret;
4947 }
4948 
4949 #ifndef TARGET_FORCE_SHMLBA
4950 /* For most architectures, SHMLBA is the same as the page size;
4951  * some architectures have larger values, in which case they should
4952  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4953  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4954  * and defining its own value for SHMLBA.
4955  *
4956  * The kernel also permits SHMLBA to be set by the architecture to a
4957  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4958  * this means that addresses are rounded to the large size if
4959  * SHM_RND is set but addresses not aligned to that size are not rejected
4960  * as long as they are at least page-aligned. Since the only architecture
4961  * which uses this is ia64 this code doesn't provide for that oddity.
4962  */
4963 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4964 {
4965     return TARGET_PAGE_SIZE;
4966 }
4967 #endif
4968 
4969 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4970                                  int shmid, abi_ulong shmaddr, int shmflg)
4971 {
4972     abi_long raddr;
4973     void *host_raddr;
4974     struct shmid_ds shm_info;
4975     int i,ret;
4976     abi_ulong shmlba;
4977 
4978     /* find out the length of the shared memory segment */
4979     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4980     if (is_error(ret)) {
4981         /* can't get length, bail out */
4982         return ret;
4983     }
4984 
4985     shmlba = target_shmlba(cpu_env);
4986 
4987     if (shmaddr & (shmlba - 1)) {
4988         if (shmflg & SHM_RND) {
4989             shmaddr &= ~(shmlba - 1);
4990         } else {
4991             return -TARGET_EINVAL;
4992         }
4993     }
4994     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4995         return -TARGET_EINVAL;
4996     }
4997 
4998     mmap_lock();
4999 
5000     if (shmaddr)
5001         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5002     else {
5003         abi_ulong mmap_start;
5004 
5005         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5006 
5007         if (mmap_start == -1) {
5008             errno = ENOMEM;
5009             host_raddr = (void *)-1;
5010         } else
5011             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5012     }
5013 
5014     if (host_raddr == (void *)-1) {
5015         mmap_unlock();
5016         return get_errno((long)host_raddr);
5017     }
5018     raddr=h2g((unsigned long)host_raddr);
5019 
5020     page_set_flags(raddr, raddr + shm_info.shm_segsz,
5021                    PAGE_VALID | PAGE_READ |
5022                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5023 
5024     for (i = 0; i < N_SHM_REGIONS; i++) {
5025         if (!shm_regions[i].in_use) {
5026             shm_regions[i].in_use = true;
5027             shm_regions[i].start = raddr;
5028             shm_regions[i].size = shm_info.shm_segsz;
5029             break;
5030         }
5031     }
5032 
5033     mmap_unlock();
5034     return raddr;
5035 
5036 }
5037 
5038 static inline abi_long do_shmdt(abi_ulong shmaddr)
5039 {
5040     int i;
5041     abi_long rv;
5042 
5043     mmap_lock();
5044 
5045     for (i = 0; i < N_SHM_REGIONS; ++i) {
5046         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5047             shm_regions[i].in_use = false;
5048             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5049             break;
5050         }
5051     }
5052     rv = get_errno(shmdt(g2h(shmaddr)));
5053 
5054     mmap_unlock();
5055 
5056     return rv;
5057 }
5058 
5059 #ifdef TARGET_NR_ipc
5060 /* ??? This only works with linear mappings.  */
5061 /* do_ipc() must return target values and target errnos. */
5062 static abi_long do_ipc(CPUArchState *cpu_env,
5063                        unsigned int call, abi_long first,
5064                        abi_long second, abi_long third,
5065                        abi_long ptr, abi_long fifth)
5066 {
5067     int version;
5068     abi_long ret = 0;
5069 
5070     version = call >> 16;
5071     call &= 0xffff;
5072 
5073     switch (call) {
5074     case IPCOP_semop:
5075         ret = do_semop(first, ptr, second);
5076         break;
5077 
5078     case IPCOP_semget:
5079         ret = get_errno(semget(first, second, third));
5080         break;
5081 
5082     case IPCOP_semctl: {
5083         /* The semun argument to semctl is passed by value, so dereference the
5084          * ptr argument. */
5085         abi_ulong atptr;
5086         get_user_ual(atptr, ptr);
5087         ret = do_semctl(first, second, third, atptr);
5088         break;
5089     }
5090 
5091     case IPCOP_msgget:
5092         ret = get_errno(msgget(first, second));
5093         break;
5094 
5095     case IPCOP_msgsnd:
5096         ret = do_msgsnd(first, ptr, second, third);
5097         break;
5098 
5099     case IPCOP_msgctl:
5100         ret = do_msgctl(first, second, ptr);
5101         break;
5102 
5103     case IPCOP_msgrcv:
5104         switch (version) {
5105         case 0:
5106             {
5107                 struct target_ipc_kludge {
5108                     abi_long msgp;
5109                     abi_long msgtyp;
5110                 } *tmp;
5111 
5112                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5113                     ret = -TARGET_EFAULT;
5114                     break;
5115                 }
5116 
5117                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5118 
5119                 unlock_user_struct(tmp, ptr, 0);
5120                 break;
5121             }
5122         default:
5123             ret = do_msgrcv(first, ptr, second, fifth, third);
5124         }
5125         break;
5126 
5127     case IPCOP_shmat:
5128         switch (version) {
5129         default:
5130         {
5131             abi_ulong raddr;
5132             raddr = do_shmat(cpu_env, first, ptr, second);
5133             if (is_error(raddr))
5134                 return get_errno(raddr);
5135             if (put_user_ual(raddr, third))
5136                 return -TARGET_EFAULT;
5137             break;
5138         }
5139         case 1:
5140             ret = -TARGET_EINVAL;
5141             break;
5142         }
5143 	break;
5144     case IPCOP_shmdt:
5145         ret = do_shmdt(ptr);
5146 	break;
5147 
5148     case IPCOP_shmget:
5149 	/* IPC_* flag values are the same on all linux platforms */
5150 	ret = get_errno(shmget(first, second, third));
5151 	break;
5152 
5153 	/* IPC_* and SHM_* command values are the same on all linux platforms */
5154     case IPCOP_shmctl:
5155         ret = do_shmctl(first, second, ptr);
5156         break;
5157     default:
5158 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5159 	ret = -TARGET_ENOSYS;
5160 	break;
5161     }
5162     return ret;
5163 }
5164 #endif
5165 
5166 /* kernel structure types definitions */
5167 
5168 #define STRUCT(name, ...) STRUCT_ ## name,
5169 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5170 enum {
5171 #include "syscall_types.h"
5172 STRUCT_MAX
5173 };
5174 #undef STRUCT
5175 #undef STRUCT_SPECIAL
5176 
5177 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
5178 #define STRUCT_SPECIAL(name)
5179 #include "syscall_types.h"
5180 #undef STRUCT
5181 #undef STRUCT_SPECIAL
5182 
5183 typedef struct IOCTLEntry IOCTLEntry;
5184 
5185 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5186                              int fd, int cmd, abi_long arg);
5187 
5188 struct IOCTLEntry {
5189     int target_cmd;
5190     unsigned int host_cmd;
5191     const char *name;
5192     int access;
5193     do_ioctl_fn *do_ioctl;
5194     const argtype arg_type[5];
5195 };
5196 
5197 #define IOC_R 0x0001
5198 #define IOC_W 0x0002
5199 #define IOC_RW (IOC_R | IOC_W)
5200 
5201 #define MAX_STRUCT_SIZE 4096
5202 
5203 #ifdef CONFIG_FIEMAP
5204 /* So fiemap access checks don't overflow on 32 bit systems.
5205  * This is very slightly smaller than the limit imposed by
5206  * the underlying kernel.
5207  */
5208 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
5209                             / sizeof(struct fiemap_extent))
5210 
5211 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5212                                        int fd, int cmd, abi_long arg)
5213 {
5214     /* The parameter for this ioctl is a struct fiemap followed
5215      * by an array of struct fiemap_extent whose size is set
5216      * in fiemap->fm_extent_count. The array is filled in by the
5217      * ioctl.
5218      */
5219     int target_size_in, target_size_out;
5220     struct fiemap *fm;
5221     const argtype *arg_type = ie->arg_type;
5222     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5223     void *argptr, *p;
5224     abi_long ret;
5225     int i, extent_size = thunk_type_size(extent_arg_type, 0);
5226     uint32_t outbufsz;
5227     int free_fm = 0;
5228 
5229     assert(arg_type[0] == TYPE_PTR);
5230     assert(ie->access == IOC_RW);
5231     arg_type++;
5232     target_size_in = thunk_type_size(arg_type, 0);
5233     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5234     if (!argptr) {
5235         return -TARGET_EFAULT;
5236     }
5237     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5238     unlock_user(argptr, arg, 0);
5239     fm = (struct fiemap *)buf_temp;
5240     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5241         return -TARGET_EINVAL;
5242     }
5243 
5244     outbufsz = sizeof (*fm) +
5245         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5246 
5247     if (outbufsz > MAX_STRUCT_SIZE) {
5248         /* We can't fit all the extents into the fixed size buffer.
5249          * Allocate one that is large enough and use it instead.
5250          */
5251         fm = g_try_malloc(outbufsz);
5252         if (!fm) {
5253             return -TARGET_ENOMEM;
5254         }
5255         memcpy(fm, buf_temp, sizeof(struct fiemap));
5256         free_fm = 1;
5257     }
5258     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5259     if (!is_error(ret)) {
5260         target_size_out = target_size_in;
5261         /* An extent_count of 0 means we were only counting the extents
5262          * so there are no structs to copy
5263          */
5264         if (fm->fm_extent_count != 0) {
5265             target_size_out += fm->fm_mapped_extents * extent_size;
5266         }
5267         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5268         if (!argptr) {
5269             ret = -TARGET_EFAULT;
5270         } else {
5271             /* Convert the struct fiemap */
5272             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5273             if (fm->fm_extent_count != 0) {
5274                 p = argptr + target_size_in;
5275                 /* ...and then all the struct fiemap_extents */
5276                 for (i = 0; i < fm->fm_mapped_extents; i++) {
5277                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5278                                   THUNK_TARGET);
5279                     p += extent_size;
5280                 }
5281             }
5282             unlock_user(argptr, arg, target_size_out);
5283         }
5284     }
5285     if (free_fm) {
5286         g_free(fm);
5287     }
5288     return ret;
5289 }
5290 #endif
5291 
5292 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5293                                 int fd, int cmd, abi_long arg)
5294 {
5295     const argtype *arg_type = ie->arg_type;
5296     int target_size;
5297     void *argptr;
5298     int ret;
5299     struct ifconf *host_ifconf;
5300     uint32_t outbufsz;
5301     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5302     int target_ifreq_size;
5303     int nb_ifreq;
5304     int free_buf = 0;
5305     int i;
5306     int target_ifc_len;
5307     abi_long target_ifc_buf;
5308     int host_ifc_len;
5309     char *host_ifc_buf;
5310 
5311     assert(arg_type[0] == TYPE_PTR);
5312     assert(ie->access == IOC_RW);
5313 
5314     arg_type++;
5315     target_size = thunk_type_size(arg_type, 0);
5316 
5317     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5318     if (!argptr)
5319         return -TARGET_EFAULT;
5320     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5321     unlock_user(argptr, arg, 0);
5322 
5323     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5324     target_ifc_len = host_ifconf->ifc_len;
5325     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5326 
5327     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5328     nb_ifreq = target_ifc_len / target_ifreq_size;
5329     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5330 
5331     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5332     if (outbufsz > MAX_STRUCT_SIZE) {
5333         /* We can't fit all the extents into the fixed size buffer.
5334          * Allocate one that is large enough and use it instead.
5335          */
5336         host_ifconf = malloc(outbufsz);
5337         if (!host_ifconf) {
5338             return -TARGET_ENOMEM;
5339         }
5340         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5341         free_buf = 1;
5342     }
5343     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5344 
5345     host_ifconf->ifc_len = host_ifc_len;
5346     host_ifconf->ifc_buf = host_ifc_buf;
5347 
5348     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5349     if (!is_error(ret)) {
5350 	/* convert host ifc_len to target ifc_len */
5351 
5352         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5353         target_ifc_len = nb_ifreq * target_ifreq_size;
5354         host_ifconf->ifc_len = target_ifc_len;
5355 
5356 	/* restore target ifc_buf */
5357 
5358         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5359 
5360 	/* copy struct ifconf to target user */
5361 
5362         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5363         if (!argptr)
5364             return -TARGET_EFAULT;
5365         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5366         unlock_user(argptr, arg, target_size);
5367 
5368 	/* copy ifreq[] to target user */
5369 
5370         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5371         for (i = 0; i < nb_ifreq ; i++) {
5372             thunk_convert(argptr + i * target_ifreq_size,
5373                           host_ifc_buf + i * sizeof(struct ifreq),
5374                           ifreq_arg_type, THUNK_TARGET);
5375         }
5376         unlock_user(argptr, target_ifc_buf, target_ifc_len);
5377     }
5378 
5379     if (free_buf) {
5380         free(host_ifconf);
5381     }
5382 
5383     return ret;
5384 }
5385 
5386 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5387                             int cmd, abi_long arg)
5388 {
5389     void *argptr;
5390     struct dm_ioctl *host_dm;
5391     abi_long guest_data;
5392     uint32_t guest_data_size;
5393     int target_size;
5394     const argtype *arg_type = ie->arg_type;
5395     abi_long ret;
5396     void *big_buf = NULL;
5397     char *host_data;
5398 
5399     arg_type++;
5400     target_size = thunk_type_size(arg_type, 0);
5401     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402     if (!argptr) {
5403         ret = -TARGET_EFAULT;
5404         goto out;
5405     }
5406     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5407     unlock_user(argptr, arg, 0);
5408 
5409     /* buf_temp is too small, so fetch things into a bigger buffer */
5410     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5411     memcpy(big_buf, buf_temp, target_size);
5412     buf_temp = big_buf;
5413     host_dm = big_buf;
5414 
5415     guest_data = arg + host_dm->data_start;
5416     if ((guest_data - arg) < 0) {
5417         ret = -TARGET_EINVAL;
5418         goto out;
5419     }
5420     guest_data_size = host_dm->data_size - host_dm->data_start;
5421     host_data = (char*)host_dm + host_dm->data_start;
5422 
5423     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5424     if (!argptr) {
5425         ret = -TARGET_EFAULT;
5426         goto out;
5427     }
5428 
5429     switch (ie->host_cmd) {
5430     case DM_REMOVE_ALL:
5431     case DM_LIST_DEVICES:
5432     case DM_DEV_CREATE:
5433     case DM_DEV_REMOVE:
5434     case DM_DEV_SUSPEND:
5435     case DM_DEV_STATUS:
5436     case DM_DEV_WAIT:
5437     case DM_TABLE_STATUS:
5438     case DM_TABLE_CLEAR:
5439     case DM_TABLE_DEPS:
5440     case DM_LIST_VERSIONS:
5441         /* no input data */
5442         break;
5443     case DM_DEV_RENAME:
5444     case DM_DEV_SET_GEOMETRY:
5445         /* data contains only strings */
5446         memcpy(host_data, argptr, guest_data_size);
5447         break;
5448     case DM_TARGET_MSG:
5449         memcpy(host_data, argptr, guest_data_size);
5450         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5451         break;
5452     case DM_TABLE_LOAD:
5453     {
5454         void *gspec = argptr;
5455         void *cur_data = host_data;
5456         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5457         int spec_size = thunk_type_size(arg_type, 0);
5458         int i;
5459 
5460         for (i = 0; i < host_dm->target_count; i++) {
5461             struct dm_target_spec *spec = cur_data;
5462             uint32_t next;
5463             int slen;
5464 
5465             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5466             slen = strlen((char*)gspec + spec_size) + 1;
5467             next = spec->next;
5468             spec->next = sizeof(*spec) + slen;
5469             strcpy((char*)&spec[1], gspec + spec_size);
5470             gspec += next;
5471             cur_data += spec->next;
5472         }
5473         break;
5474     }
5475     default:
5476         ret = -TARGET_EINVAL;
5477         unlock_user(argptr, guest_data, 0);
5478         goto out;
5479     }
5480     unlock_user(argptr, guest_data, 0);
5481 
5482     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5483     if (!is_error(ret)) {
5484         guest_data = arg + host_dm->data_start;
5485         guest_data_size = host_dm->data_size - host_dm->data_start;
5486         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5487         switch (ie->host_cmd) {
5488         case DM_REMOVE_ALL:
5489         case DM_DEV_CREATE:
5490         case DM_DEV_REMOVE:
5491         case DM_DEV_RENAME:
5492         case DM_DEV_SUSPEND:
5493         case DM_DEV_STATUS:
5494         case DM_TABLE_LOAD:
5495         case DM_TABLE_CLEAR:
5496         case DM_TARGET_MSG:
5497         case DM_DEV_SET_GEOMETRY:
5498             /* no return data */
5499             break;
5500         case DM_LIST_DEVICES:
5501         {
5502             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5503             uint32_t remaining_data = guest_data_size;
5504             void *cur_data = argptr;
5505             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5506             int nl_size = 12; /* can't use thunk_size due to alignment */
5507 
5508             while (1) {
5509                 uint32_t next = nl->next;
5510                 if (next) {
5511                     nl->next = nl_size + (strlen(nl->name) + 1);
5512                 }
5513                 if (remaining_data < nl->next) {
5514                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5515                     break;
5516                 }
5517                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5518                 strcpy(cur_data + nl_size, nl->name);
5519                 cur_data += nl->next;
5520                 remaining_data -= nl->next;
5521                 if (!next) {
5522                     break;
5523                 }
5524                 nl = (void*)nl + next;
5525             }
5526             break;
5527         }
5528         case DM_DEV_WAIT:
5529         case DM_TABLE_STATUS:
5530         {
5531             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5532             void *cur_data = argptr;
5533             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5534             int spec_size = thunk_type_size(arg_type, 0);
5535             int i;
5536 
5537             for (i = 0; i < host_dm->target_count; i++) {
5538                 uint32_t next = spec->next;
5539                 int slen = strlen((char*)&spec[1]) + 1;
5540                 spec->next = (cur_data - argptr) + spec_size + slen;
5541                 if (guest_data_size < spec->next) {
5542                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5543                     break;
5544                 }
5545                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5546                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5547                 cur_data = argptr + spec->next;
5548                 spec = (void*)host_dm + host_dm->data_start + next;
5549             }
5550             break;
5551         }
5552         case DM_TABLE_DEPS:
5553         {
5554             void *hdata = (void*)host_dm + host_dm->data_start;
5555             int count = *(uint32_t*)hdata;
5556             uint64_t *hdev = hdata + 8;
5557             uint64_t *gdev = argptr + 8;
5558             int i;
5559 
5560             *(uint32_t*)argptr = tswap32(count);
5561             for (i = 0; i < count; i++) {
5562                 *gdev = tswap64(*hdev);
5563                 gdev++;
5564                 hdev++;
5565             }
5566             break;
5567         }
5568         case DM_LIST_VERSIONS:
5569         {
5570             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5571             uint32_t remaining_data = guest_data_size;
5572             void *cur_data = argptr;
5573             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5574             int vers_size = thunk_type_size(arg_type, 0);
5575 
5576             while (1) {
5577                 uint32_t next = vers->next;
5578                 if (next) {
5579                     vers->next = vers_size + (strlen(vers->name) + 1);
5580                 }
5581                 if (remaining_data < vers->next) {
5582                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5583                     break;
5584                 }
5585                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5586                 strcpy(cur_data + vers_size, vers->name);
5587                 cur_data += vers->next;
5588                 remaining_data -= vers->next;
5589                 if (!next) {
5590                     break;
5591                 }
5592                 vers = (void*)vers + next;
5593             }
5594             break;
5595         }
5596         default:
5597             unlock_user(argptr, guest_data, 0);
5598             ret = -TARGET_EINVAL;
5599             goto out;
5600         }
5601         unlock_user(argptr, guest_data, guest_data_size);
5602 
5603         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5604         if (!argptr) {
5605             ret = -TARGET_EFAULT;
5606             goto out;
5607         }
5608         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5609         unlock_user(argptr, arg, target_size);
5610     }
5611 out:
5612     g_free(big_buf);
5613     return ret;
5614 }
5615 
5616 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5617                                int cmd, abi_long arg)
5618 {
5619     void *argptr;
5620     int target_size;
5621     const argtype *arg_type = ie->arg_type;
5622     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5623     abi_long ret;
5624 
5625     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5626     struct blkpg_partition host_part;
5627 
5628     /* Read and convert blkpg */
5629     arg_type++;
5630     target_size = thunk_type_size(arg_type, 0);
5631     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5632     if (!argptr) {
5633         ret = -TARGET_EFAULT;
5634         goto out;
5635     }
5636     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5637     unlock_user(argptr, arg, 0);
5638 
5639     switch (host_blkpg->op) {
5640     case BLKPG_ADD_PARTITION:
5641     case BLKPG_DEL_PARTITION:
5642         /* payload is struct blkpg_partition */
5643         break;
5644     default:
5645         /* Unknown opcode */
5646         ret = -TARGET_EINVAL;
5647         goto out;
5648     }
5649 
5650     /* Read and convert blkpg->data */
5651     arg = (abi_long)(uintptr_t)host_blkpg->data;
5652     target_size = thunk_type_size(part_arg_type, 0);
5653     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5654     if (!argptr) {
5655         ret = -TARGET_EFAULT;
5656         goto out;
5657     }
5658     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5659     unlock_user(argptr, arg, 0);
5660 
5661     /* Swizzle the data pointer to our local copy and call! */
5662     host_blkpg->data = &host_part;
5663     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5664 
5665 out:
5666     return ret;
5667 }
5668 
5669 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5670                                 int fd, int cmd, abi_long arg)
5671 {
5672     const argtype *arg_type = ie->arg_type;
5673     const StructEntry *se;
5674     const argtype *field_types;
5675     const int *dst_offsets, *src_offsets;
5676     int target_size;
5677     void *argptr;
5678     abi_ulong *target_rt_dev_ptr;
5679     unsigned long *host_rt_dev_ptr;
5680     abi_long ret;
5681     int i;
5682 
5683     assert(ie->access == IOC_W);
5684     assert(*arg_type == TYPE_PTR);
5685     arg_type++;
5686     assert(*arg_type == TYPE_STRUCT);
5687     target_size = thunk_type_size(arg_type, 0);
5688     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5689     if (!argptr) {
5690         return -TARGET_EFAULT;
5691     }
5692     arg_type++;
5693     assert(*arg_type == (int)STRUCT_rtentry);
5694     se = struct_entries + *arg_type++;
5695     assert(se->convert[0] == NULL);
5696     /* convert struct here to be able to catch rt_dev string */
5697     field_types = se->field_types;
5698     dst_offsets = se->field_offsets[THUNK_HOST];
5699     src_offsets = se->field_offsets[THUNK_TARGET];
5700     for (i = 0; i < se->nb_fields; i++) {
5701         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5702             assert(*field_types == TYPE_PTRVOID);
5703             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5704             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5705             if (*target_rt_dev_ptr != 0) {
5706                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5707                                                   tswapal(*target_rt_dev_ptr));
5708                 if (!*host_rt_dev_ptr) {
5709                     unlock_user(argptr, arg, 0);
5710                     return -TARGET_EFAULT;
5711                 }
5712             } else {
5713                 *host_rt_dev_ptr = 0;
5714             }
5715             field_types++;
5716             continue;
5717         }
5718         field_types = thunk_convert(buf_temp + dst_offsets[i],
5719                                     argptr + src_offsets[i],
5720                                     field_types, THUNK_HOST);
5721     }
5722     unlock_user(argptr, arg, 0);
5723 
5724     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5725     if (*host_rt_dev_ptr != 0) {
5726         unlock_user((void *)*host_rt_dev_ptr,
5727                     *target_rt_dev_ptr, 0);
5728     }
5729     return ret;
5730 }
5731 
5732 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5733                                      int fd, int cmd, abi_long arg)
5734 {
5735     int sig = target_to_host_signal(arg);
5736     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5737 }
5738 
5739 #ifdef TIOCGPTPEER
5740 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5741                                      int fd, int cmd, abi_long arg)
5742 {
5743     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5744     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5745 }
5746 #endif
5747 
5748 static IOCTLEntry ioctl_entries[] = {
5749 #define IOCTL(cmd, access, ...) \
5750     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5751 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5752     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5753 #define IOCTL_IGNORE(cmd) \
5754     { TARGET_ ## cmd, 0, #cmd },
5755 #include "ioctls.h"
5756     { 0, 0, },
5757 };
5758 
5759 /* ??? Implement proper locking for ioctls.  */
5760 /* do_ioctl() Must return target values and target errnos. */
5761 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5762 {
5763     const IOCTLEntry *ie;
5764     const argtype *arg_type;
5765     abi_long ret;
5766     uint8_t buf_temp[MAX_STRUCT_SIZE];
5767     int target_size;
5768     void *argptr;
5769 
5770     ie = ioctl_entries;
5771     for(;;) {
5772         if (ie->target_cmd == 0) {
5773             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5774             return -TARGET_ENOSYS;
5775         }
5776         if (ie->target_cmd == cmd)
5777             break;
5778         ie++;
5779     }
5780     arg_type = ie->arg_type;
5781 #if defined(DEBUG)
5782     gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5783 #endif
5784     if (ie->do_ioctl) {
5785         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5786     } else if (!ie->host_cmd) {
5787         /* Some architectures define BSD ioctls in their headers
5788            that are not implemented in Linux.  */
5789         return -TARGET_ENOSYS;
5790     }
5791 
5792     switch(arg_type[0]) {
5793     case TYPE_NULL:
5794         /* no argument */
5795         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5796         break;
5797     case TYPE_PTRVOID:
5798     case TYPE_INT:
5799         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5800         break;
5801     case TYPE_PTR:
5802         arg_type++;
5803         target_size = thunk_type_size(arg_type, 0);
5804         switch(ie->access) {
5805         case IOC_R:
5806             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5807             if (!is_error(ret)) {
5808                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5809                 if (!argptr)
5810                     return -TARGET_EFAULT;
5811                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5812                 unlock_user(argptr, arg, target_size);
5813             }
5814             break;
5815         case IOC_W:
5816             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5817             if (!argptr)
5818                 return -TARGET_EFAULT;
5819             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5820             unlock_user(argptr, arg, 0);
5821             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5822             break;
5823         default:
5824         case IOC_RW:
5825             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5826             if (!argptr)
5827                 return -TARGET_EFAULT;
5828             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5829             unlock_user(argptr, arg, 0);
5830             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5831             if (!is_error(ret)) {
5832                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5833                 if (!argptr)
5834                     return -TARGET_EFAULT;
5835                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5836                 unlock_user(argptr, arg, target_size);
5837             }
5838             break;
5839         }
5840         break;
5841     default:
5842         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5843                  (long)cmd, arg_type[0]);
5844         ret = -TARGET_ENOSYS;
5845         break;
5846     }
5847     return ret;
5848 }
5849 
5850 static const bitmask_transtbl iflag_tbl[] = {
5851         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5852         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5853         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5854         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5855         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5856         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5857         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5858         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5859         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5860         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5861         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5862         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5863         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5864         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5865         { 0, 0, 0, 0 }
5866 };
5867 
5868 static const bitmask_transtbl oflag_tbl[] = {
5869 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5870 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5871 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5872 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5873 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5874 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5875 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5876 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5877 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5878 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5879 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5880 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5881 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5882 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5883 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5884 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5885 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5886 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5887 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5888 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5889 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5890 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5891 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5892 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5893 	{ 0, 0, 0, 0 }
5894 };
5895 
5896 static const bitmask_transtbl cflag_tbl[] = {
5897 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5898 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5899 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5900 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5901 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5902 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5903 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5904 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5905 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5906 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5907 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5908 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5909 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5910 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5911 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5912 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5913 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5914 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5915 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5916 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5917 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5918 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5919 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5920 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5921 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5922 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5923 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5924 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5925 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5926 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5927 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5928 	{ 0, 0, 0, 0 }
5929 };
5930 
5931 static const bitmask_transtbl lflag_tbl[] = {
5932 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5933 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5934 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5935 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5936 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5937 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5938 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5939 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5940 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5941 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5942 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5943 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5944 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5945 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5946 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5947 	{ 0, 0, 0, 0 }
5948 };
5949 
5950 static void target_to_host_termios (void *dst, const void *src)
5951 {
5952     struct host_termios *host = dst;
5953     const struct target_termios *target = src;
5954 
5955     host->c_iflag =
5956         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5957     host->c_oflag =
5958         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5959     host->c_cflag =
5960         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5961     host->c_lflag =
5962         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5963     host->c_line = target->c_line;
5964 
5965     memset(host->c_cc, 0, sizeof(host->c_cc));
5966     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5967     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5968     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5969     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5970     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5971     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5972     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5973     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5974     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5975     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5976     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5977     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5978     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5979     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5980     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5981     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5982     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5983 }
5984 
5985 static void host_to_target_termios (void *dst, const void *src)
5986 {
5987     struct target_termios *target = dst;
5988     const struct host_termios *host = src;
5989 
5990     target->c_iflag =
5991         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5992     target->c_oflag =
5993         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5994     target->c_cflag =
5995         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5996     target->c_lflag =
5997         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5998     target->c_line = host->c_line;
5999 
6000     memset(target->c_cc, 0, sizeof(target->c_cc));
6001     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6002     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6003     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6004     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6005     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6006     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6007     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6008     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6009     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6010     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6011     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6012     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6013     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6014     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6015     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6016     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6017     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6018 }
6019 
6020 static const StructEntry struct_termios_def = {
6021     .convert = { host_to_target_termios, target_to_host_termios },
6022     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6023     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6024 };
6025 
6026 static bitmask_transtbl mmap_flags_tbl[] = {
6027     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6028     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6029     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6030     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6031       MAP_ANONYMOUS, MAP_ANONYMOUS },
6032     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6033       MAP_GROWSDOWN, MAP_GROWSDOWN },
6034     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6035       MAP_DENYWRITE, MAP_DENYWRITE },
6036     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6037       MAP_EXECUTABLE, MAP_EXECUTABLE },
6038     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6039     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6040       MAP_NORESERVE, MAP_NORESERVE },
6041     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6042     /* MAP_STACK had been ignored by the kernel for quite some time.
6043        Recognize it for the target insofar as we do not want to pass
6044        it through to the host.  */
6045     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6046     { 0, 0, 0, 0 }
6047 };
6048 
6049 #if defined(TARGET_I386)
6050 
6051 /* NOTE: there is really one LDT for all the threads */
6052 static uint8_t *ldt_table;
6053 
6054 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6055 {
6056     int size;
6057     void *p;
6058 
6059     if (!ldt_table)
6060         return 0;
6061     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6062     if (size > bytecount)
6063         size = bytecount;
6064     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6065     if (!p)
6066         return -TARGET_EFAULT;
6067     /* ??? Should this by byteswapped?  */
6068     memcpy(p, ldt_table, size);
6069     unlock_user(p, ptr, size);
6070     return size;
6071 }
6072 
6073 /* XXX: add locking support */
6074 static abi_long write_ldt(CPUX86State *env,
6075                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6076 {
6077     struct target_modify_ldt_ldt_s ldt_info;
6078     struct target_modify_ldt_ldt_s *target_ldt_info;
6079     int seg_32bit, contents, read_exec_only, limit_in_pages;
6080     int seg_not_present, useable, lm;
6081     uint32_t *lp, entry_1, entry_2;
6082 
6083     if (bytecount != sizeof(ldt_info))
6084         return -TARGET_EINVAL;
6085     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6086         return -TARGET_EFAULT;
6087     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6088     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6089     ldt_info.limit = tswap32(target_ldt_info->limit);
6090     ldt_info.flags = tswap32(target_ldt_info->flags);
6091     unlock_user_struct(target_ldt_info, ptr, 0);
6092 
6093     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6094         return -TARGET_EINVAL;
6095     seg_32bit = ldt_info.flags & 1;
6096     contents = (ldt_info.flags >> 1) & 3;
6097     read_exec_only = (ldt_info.flags >> 3) & 1;
6098     limit_in_pages = (ldt_info.flags >> 4) & 1;
6099     seg_not_present = (ldt_info.flags >> 5) & 1;
6100     useable = (ldt_info.flags >> 6) & 1;
6101 #ifdef TARGET_ABI32
6102     lm = 0;
6103 #else
6104     lm = (ldt_info.flags >> 7) & 1;
6105 #endif
6106     if (contents == 3) {
6107         if (oldmode)
6108             return -TARGET_EINVAL;
6109         if (seg_not_present == 0)
6110             return -TARGET_EINVAL;
6111     }
6112     /* allocate the LDT */
6113     if (!ldt_table) {
6114         env->ldt.base = target_mmap(0,
6115                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6116                                     PROT_READ|PROT_WRITE,
6117                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6118         if (env->ldt.base == -1)
6119             return -TARGET_ENOMEM;
6120         memset(g2h(env->ldt.base), 0,
6121                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6122         env->ldt.limit = 0xffff;
6123         ldt_table = g2h(env->ldt.base);
6124     }
6125 
6126     /* NOTE: same code as Linux kernel */
6127     /* Allow LDTs to be cleared by the user. */
6128     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6129         if (oldmode ||
6130             (contents == 0		&&
6131              read_exec_only == 1	&&
6132              seg_32bit == 0		&&
6133              limit_in_pages == 0	&&
6134              seg_not_present == 1	&&
6135              useable == 0 )) {
6136             entry_1 = 0;
6137             entry_2 = 0;
6138             goto install;
6139         }
6140     }
6141 
6142     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6143         (ldt_info.limit & 0x0ffff);
6144     entry_2 = (ldt_info.base_addr & 0xff000000) |
6145         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6146         (ldt_info.limit & 0xf0000) |
6147         ((read_exec_only ^ 1) << 9) |
6148         (contents << 10) |
6149         ((seg_not_present ^ 1) << 15) |
6150         (seg_32bit << 22) |
6151         (limit_in_pages << 23) |
6152         (lm << 21) |
6153         0x7000;
6154     if (!oldmode)
6155         entry_2 |= (useable << 20);
6156 
6157     /* Install the new entry ...  */
6158 install:
6159     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6160     lp[0] = tswap32(entry_1);
6161     lp[1] = tswap32(entry_2);
6162     return 0;
6163 }
6164 
6165 /* specific and weird i386 syscalls */
6166 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6167                               unsigned long bytecount)
6168 {
6169     abi_long ret;
6170 
6171     switch (func) {
6172     case 0:
6173         ret = read_ldt(ptr, bytecount);
6174         break;
6175     case 1:
6176         ret = write_ldt(env, ptr, bytecount, 1);
6177         break;
6178     case 0x11:
6179         ret = write_ldt(env, ptr, bytecount, 0);
6180         break;
6181     default:
6182         ret = -TARGET_ENOSYS;
6183         break;
6184     }
6185     return ret;
6186 }
6187 
6188 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6189 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6190 {
6191     uint64_t *gdt_table = g2h(env->gdt.base);
6192     struct target_modify_ldt_ldt_s ldt_info;
6193     struct target_modify_ldt_ldt_s *target_ldt_info;
6194     int seg_32bit, contents, read_exec_only, limit_in_pages;
6195     int seg_not_present, useable, lm;
6196     uint32_t *lp, entry_1, entry_2;
6197     int i;
6198 
6199     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6200     if (!target_ldt_info)
6201         return -TARGET_EFAULT;
6202     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6203     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6204     ldt_info.limit = tswap32(target_ldt_info->limit);
6205     ldt_info.flags = tswap32(target_ldt_info->flags);
6206     if (ldt_info.entry_number == -1) {
6207         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6208             if (gdt_table[i] == 0) {
6209                 ldt_info.entry_number = i;
6210                 target_ldt_info->entry_number = tswap32(i);
6211                 break;
6212             }
6213         }
6214     }
6215     unlock_user_struct(target_ldt_info, ptr, 1);
6216 
6217     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6218         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6219            return -TARGET_EINVAL;
6220     seg_32bit = ldt_info.flags & 1;
6221     contents = (ldt_info.flags >> 1) & 3;
6222     read_exec_only = (ldt_info.flags >> 3) & 1;
6223     limit_in_pages = (ldt_info.flags >> 4) & 1;
6224     seg_not_present = (ldt_info.flags >> 5) & 1;
6225     useable = (ldt_info.flags >> 6) & 1;
6226 #ifdef TARGET_ABI32
6227     lm = 0;
6228 #else
6229     lm = (ldt_info.flags >> 7) & 1;
6230 #endif
6231 
6232     if (contents == 3) {
6233         if (seg_not_present == 0)
6234             return -TARGET_EINVAL;
6235     }
6236 
6237     /* NOTE: same code as Linux kernel */
6238     /* Allow LDTs to be cleared by the user. */
6239     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6240         if ((contents == 0             &&
6241              read_exec_only == 1       &&
6242              seg_32bit == 0            &&
6243              limit_in_pages == 0       &&
6244              seg_not_present == 1      &&
6245              useable == 0 )) {
6246             entry_1 = 0;
6247             entry_2 = 0;
6248             goto install;
6249         }
6250     }
6251 
6252     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6253         (ldt_info.limit & 0x0ffff);
6254     entry_2 = (ldt_info.base_addr & 0xff000000) |
6255         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6256         (ldt_info.limit & 0xf0000) |
6257         ((read_exec_only ^ 1) << 9) |
6258         (contents << 10) |
6259         ((seg_not_present ^ 1) << 15) |
6260         (seg_32bit << 22) |
6261         (limit_in_pages << 23) |
6262         (useable << 20) |
6263         (lm << 21) |
6264         0x7000;
6265 
6266     /* Install the new entry ...  */
6267 install:
6268     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6269     lp[0] = tswap32(entry_1);
6270     lp[1] = tswap32(entry_2);
6271     return 0;
6272 }
6273 
6274 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6275 {
6276     struct target_modify_ldt_ldt_s *target_ldt_info;
6277     uint64_t *gdt_table = g2h(env->gdt.base);
6278     uint32_t base_addr, limit, flags;
6279     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6280     int seg_not_present, useable, lm;
6281     uint32_t *lp, entry_1, entry_2;
6282 
6283     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6284     if (!target_ldt_info)
6285         return -TARGET_EFAULT;
6286     idx = tswap32(target_ldt_info->entry_number);
6287     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6288         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6289         unlock_user_struct(target_ldt_info, ptr, 1);
6290         return -TARGET_EINVAL;
6291     }
6292     lp = (uint32_t *)(gdt_table + idx);
6293     entry_1 = tswap32(lp[0]);
6294     entry_2 = tswap32(lp[1]);
6295 
6296     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6297     contents = (entry_2 >> 10) & 3;
6298     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6299     seg_32bit = (entry_2 >> 22) & 1;
6300     limit_in_pages = (entry_2 >> 23) & 1;
6301     useable = (entry_2 >> 20) & 1;
6302 #ifdef TARGET_ABI32
6303     lm = 0;
6304 #else
6305     lm = (entry_2 >> 21) & 1;
6306 #endif
6307     flags = (seg_32bit << 0) | (contents << 1) |
6308         (read_exec_only << 3) | (limit_in_pages << 4) |
6309         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6310     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6311     base_addr = (entry_1 >> 16) |
6312         (entry_2 & 0xff000000) |
6313         ((entry_2 & 0xff) << 16);
6314     target_ldt_info->base_addr = tswapal(base_addr);
6315     target_ldt_info->limit = tswap32(limit);
6316     target_ldt_info->flags = tswap32(flags);
6317     unlock_user_struct(target_ldt_info, ptr, 1);
6318     return 0;
6319 }
6320 #endif /* TARGET_I386 && TARGET_ABI32 */
6321 
6322 #ifndef TARGET_ABI32
6323 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6324 {
6325     abi_long ret = 0;
6326     abi_ulong val;
6327     int idx;
6328 
6329     switch(code) {
6330     case TARGET_ARCH_SET_GS:
6331     case TARGET_ARCH_SET_FS:
6332         if (code == TARGET_ARCH_SET_GS)
6333             idx = R_GS;
6334         else
6335             idx = R_FS;
6336         cpu_x86_load_seg(env, idx, 0);
6337         env->segs[idx].base = addr;
6338         break;
6339     case TARGET_ARCH_GET_GS:
6340     case TARGET_ARCH_GET_FS:
6341         if (code == TARGET_ARCH_GET_GS)
6342             idx = R_GS;
6343         else
6344             idx = R_FS;
6345         val = env->segs[idx].base;
6346         if (put_user(val, addr, abi_ulong))
6347             ret = -TARGET_EFAULT;
6348         break;
6349     default:
6350         ret = -TARGET_EINVAL;
6351         break;
6352     }
6353     return ret;
6354 }
6355 #endif
6356 
6357 #endif /* defined(TARGET_I386) */
6358 
6359 #define NEW_STACK_SIZE 0x40000
6360 
6361 
6362 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6363 typedef struct {
6364     CPUArchState *env;
6365     pthread_mutex_t mutex;
6366     pthread_cond_t cond;
6367     pthread_t thread;
6368     uint32_t tid;
6369     abi_ulong child_tidptr;
6370     abi_ulong parent_tidptr;
6371     sigset_t sigmask;
6372 } new_thread_info;
6373 
6374 static void *clone_func(void *arg)
6375 {
6376     new_thread_info *info = arg;
6377     CPUArchState *env;
6378     CPUState *cpu;
6379     TaskState *ts;
6380 
6381     rcu_register_thread();
6382     tcg_register_thread();
6383     env = info->env;
6384     cpu = ENV_GET_CPU(env);
6385     thread_cpu = cpu;
6386     ts = (TaskState *)cpu->opaque;
6387     info->tid = gettid();
6388     task_settid(ts);
6389     if (info->child_tidptr)
6390         put_user_u32(info->tid, info->child_tidptr);
6391     if (info->parent_tidptr)
6392         put_user_u32(info->tid, info->parent_tidptr);
6393     /* Enable signals.  */
6394     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6395     /* Signal to the parent that we're ready.  */
6396     pthread_mutex_lock(&info->mutex);
6397     pthread_cond_broadcast(&info->cond);
6398     pthread_mutex_unlock(&info->mutex);
6399     /* Wait until the parent has finished initializing the tls state.  */
6400     pthread_mutex_lock(&clone_lock);
6401     pthread_mutex_unlock(&clone_lock);
6402     cpu_loop(env);
6403     /* never exits */
6404     return NULL;
6405 }
6406 
6407 /* do_fork() Must return host values and target errnos (unlike most
6408    do_*() functions). */
6409 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6410                    abi_ulong parent_tidptr, target_ulong newtls,
6411                    abi_ulong child_tidptr)
6412 {
6413     CPUState *cpu = ENV_GET_CPU(env);
6414     int ret;
6415     TaskState *ts;
6416     CPUState *new_cpu;
6417     CPUArchState *new_env;
6418     sigset_t sigmask;
6419 
6420     flags &= ~CLONE_IGNORED_FLAGS;
6421 
6422     /* Emulate vfork() with fork() */
6423     if (flags & CLONE_VFORK)
6424         flags &= ~(CLONE_VFORK | CLONE_VM);
6425 
6426     if (flags & CLONE_VM) {
6427         TaskState *parent_ts = (TaskState *)cpu->opaque;
6428         new_thread_info info;
6429         pthread_attr_t attr;
6430 
6431         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6432             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6433             return -TARGET_EINVAL;
6434         }
6435 
6436         ts = g_new0(TaskState, 1);
6437         init_task_state(ts);
6438 
6439         /* Grab a mutex so that thread setup appears atomic.  */
6440         pthread_mutex_lock(&clone_lock);
6441 
6442         /* we create a new CPU instance. */
6443         new_env = cpu_copy(env);
6444         /* Init regs that differ from the parent.  */
6445         cpu_clone_regs(new_env, newsp);
6446         new_cpu = ENV_GET_CPU(new_env);
6447         new_cpu->opaque = ts;
6448         ts->bprm = parent_ts->bprm;
6449         ts->info = parent_ts->info;
6450         ts->signal_mask = parent_ts->signal_mask;
6451 
6452         if (flags & CLONE_CHILD_CLEARTID) {
6453             ts->child_tidptr = child_tidptr;
6454         }
6455 
6456         if (flags & CLONE_SETTLS) {
6457             cpu_set_tls (new_env, newtls);
6458         }
6459 
6460         memset(&info, 0, sizeof(info));
6461         pthread_mutex_init(&info.mutex, NULL);
6462         pthread_mutex_lock(&info.mutex);
6463         pthread_cond_init(&info.cond, NULL);
6464         info.env = new_env;
6465         if (flags & CLONE_CHILD_SETTID) {
6466             info.child_tidptr = child_tidptr;
6467         }
6468         if (flags & CLONE_PARENT_SETTID) {
6469             info.parent_tidptr = parent_tidptr;
6470         }
6471 
6472         ret = pthread_attr_init(&attr);
6473         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6474         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6475         /* It is not safe to deliver signals until the child has finished
6476            initializing, so temporarily block all signals.  */
6477         sigfillset(&sigmask);
6478         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6479 
6480         /* If this is our first additional thread, we need to ensure we
6481          * generate code for parallel execution and flush old translations.
6482          */
6483         if (!parallel_cpus) {
6484             parallel_cpus = true;
6485             tb_flush(cpu);
6486         }
6487 
6488         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6489         /* TODO: Free new CPU state if thread creation failed.  */
6490 
6491         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6492         pthread_attr_destroy(&attr);
6493         if (ret == 0) {
6494             /* Wait for the child to initialize.  */
6495             pthread_cond_wait(&info.cond, &info.mutex);
6496             ret = info.tid;
6497         } else {
6498             ret = -1;
6499         }
6500         pthread_mutex_unlock(&info.mutex);
6501         pthread_cond_destroy(&info.cond);
6502         pthread_mutex_destroy(&info.mutex);
6503         pthread_mutex_unlock(&clone_lock);
6504     } else {
6505         /* if no CLONE_VM, we consider it is a fork */
6506         if (flags & CLONE_INVALID_FORK_FLAGS) {
6507             return -TARGET_EINVAL;
6508         }
6509 
6510         /* We can't support custom termination signals */
6511         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6512             return -TARGET_EINVAL;
6513         }
6514 
6515         if (block_signals()) {
6516             return -TARGET_ERESTARTSYS;
6517         }
6518 
6519         fork_start();
6520         ret = fork();
6521         if (ret == 0) {
6522             /* Child Process.  */
6523             cpu_clone_regs(env, newsp);
6524             fork_end(1);
6525             /* There is a race condition here.  The parent process could
6526                theoretically read the TID in the child process before the child
6527                tid is set.  This would require using either ptrace
6528                (not implemented) or having *_tidptr to point at a shared memory
6529                mapping.  We can't repeat the spinlock hack used above because
6530                the child process gets its own copy of the lock.  */
6531             if (flags & CLONE_CHILD_SETTID)
6532                 put_user_u32(gettid(), child_tidptr);
6533             if (flags & CLONE_PARENT_SETTID)
6534                 put_user_u32(gettid(), parent_tidptr);
6535             ts = (TaskState *)cpu->opaque;
6536             if (flags & CLONE_SETTLS)
6537                 cpu_set_tls (env, newtls);
6538             if (flags & CLONE_CHILD_CLEARTID)
6539                 ts->child_tidptr = child_tidptr;
6540         } else {
6541             fork_end(0);
6542         }
6543     }
6544     return ret;
6545 }
6546 
6547 /* warning : doesn't handle linux specific flags... */
6548 static int target_to_host_fcntl_cmd(int cmd)
6549 {
6550     int ret;
6551 
6552     switch(cmd) {
6553     case TARGET_F_DUPFD:
6554     case TARGET_F_GETFD:
6555     case TARGET_F_SETFD:
6556     case TARGET_F_GETFL:
6557     case TARGET_F_SETFL:
6558         ret = cmd;
6559         break;
6560     case TARGET_F_GETLK:
6561         ret = F_GETLK64;
6562         break;
6563     case TARGET_F_SETLK:
6564         ret = F_SETLK64;
6565         break;
6566     case TARGET_F_SETLKW:
6567         ret = F_SETLKW64;
6568         break;
6569     case TARGET_F_GETOWN:
6570         ret = F_GETOWN;
6571         break;
6572     case TARGET_F_SETOWN:
6573         ret = F_SETOWN;
6574         break;
6575     case TARGET_F_GETSIG:
6576         ret = F_GETSIG;
6577         break;
6578     case TARGET_F_SETSIG:
6579         ret = F_SETSIG;
6580         break;
6581 #if TARGET_ABI_BITS == 32
6582     case TARGET_F_GETLK64:
6583         ret = F_GETLK64;
6584         break;
6585     case TARGET_F_SETLK64:
6586         ret = F_SETLK64;
6587         break;
6588     case TARGET_F_SETLKW64:
6589         ret = F_SETLKW64;
6590         break;
6591 #endif
6592     case TARGET_F_SETLEASE:
6593         ret = F_SETLEASE;
6594         break;
6595     case TARGET_F_GETLEASE:
6596         ret = F_GETLEASE;
6597         break;
6598 #ifdef F_DUPFD_CLOEXEC
6599     case TARGET_F_DUPFD_CLOEXEC:
6600         ret = F_DUPFD_CLOEXEC;
6601         break;
6602 #endif
6603     case TARGET_F_NOTIFY:
6604         ret = F_NOTIFY;
6605         break;
6606 #ifdef F_GETOWN_EX
6607     case TARGET_F_GETOWN_EX:
6608         ret = F_GETOWN_EX;
6609         break;
6610 #endif
6611 #ifdef F_SETOWN_EX
6612     case TARGET_F_SETOWN_EX:
6613         ret = F_SETOWN_EX;
6614         break;
6615 #endif
6616 #ifdef F_SETPIPE_SZ
6617     case TARGET_F_SETPIPE_SZ:
6618         ret = F_SETPIPE_SZ;
6619         break;
6620     case TARGET_F_GETPIPE_SZ:
6621         ret = F_GETPIPE_SZ;
6622         break;
6623 #endif
6624     default:
6625         ret = -TARGET_EINVAL;
6626         break;
6627     }
6628 
6629 #if defined(__powerpc64__)
6630     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6631      * is not supported by kernel. The glibc fcntl call actually adjusts
6632      * them to 5, 6 and 7 before making the syscall(). Since we make the
6633      * syscall directly, adjust to what is supported by the kernel.
6634      */
6635     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6636         ret -= F_GETLK64 - 5;
6637     }
6638 #endif
6639 
6640     return ret;
6641 }
6642 
6643 #define FLOCK_TRANSTBL \
6644     switch (type) { \
6645     TRANSTBL_CONVERT(F_RDLCK); \
6646     TRANSTBL_CONVERT(F_WRLCK); \
6647     TRANSTBL_CONVERT(F_UNLCK); \
6648     TRANSTBL_CONVERT(F_EXLCK); \
6649     TRANSTBL_CONVERT(F_SHLCK); \
6650     }
6651 
6652 static int target_to_host_flock(int type)
6653 {
6654 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6655     FLOCK_TRANSTBL
6656 #undef  TRANSTBL_CONVERT
6657     return -TARGET_EINVAL;
6658 }
6659 
6660 static int host_to_target_flock(int type)
6661 {
6662 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6663     FLOCK_TRANSTBL
6664 #undef  TRANSTBL_CONVERT
6665     /* if we don't know how to convert the value coming
6666      * from the host we copy to the target field as-is
6667      */
6668     return type;
6669 }
6670 
6671 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6672                                             abi_ulong target_flock_addr)
6673 {
6674     struct target_flock *target_fl;
6675     int l_type;
6676 
6677     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6678         return -TARGET_EFAULT;
6679     }
6680 
6681     __get_user(l_type, &target_fl->l_type);
6682     l_type = target_to_host_flock(l_type);
6683     if (l_type < 0) {
6684         return l_type;
6685     }
6686     fl->l_type = l_type;
6687     __get_user(fl->l_whence, &target_fl->l_whence);
6688     __get_user(fl->l_start, &target_fl->l_start);
6689     __get_user(fl->l_len, &target_fl->l_len);
6690     __get_user(fl->l_pid, &target_fl->l_pid);
6691     unlock_user_struct(target_fl, target_flock_addr, 0);
6692     return 0;
6693 }
6694 
6695 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6696                                           const struct flock64 *fl)
6697 {
6698     struct target_flock *target_fl;
6699     short l_type;
6700 
6701     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6702         return -TARGET_EFAULT;
6703     }
6704 
6705     l_type = host_to_target_flock(fl->l_type);
6706     __put_user(l_type, &target_fl->l_type);
6707     __put_user(fl->l_whence, &target_fl->l_whence);
6708     __put_user(fl->l_start, &target_fl->l_start);
6709     __put_user(fl->l_len, &target_fl->l_len);
6710     __put_user(fl->l_pid, &target_fl->l_pid);
6711     unlock_user_struct(target_fl, target_flock_addr, 1);
6712     return 0;
6713 }
6714 
6715 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6716 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6717 
6718 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6719 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6720                                                    abi_ulong target_flock_addr)
6721 {
6722     struct target_oabi_flock64 *target_fl;
6723     int l_type;
6724 
6725     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6726         return -TARGET_EFAULT;
6727     }
6728 
6729     __get_user(l_type, &target_fl->l_type);
6730     l_type = target_to_host_flock(l_type);
6731     if (l_type < 0) {
6732         return l_type;
6733     }
6734     fl->l_type = l_type;
6735     __get_user(fl->l_whence, &target_fl->l_whence);
6736     __get_user(fl->l_start, &target_fl->l_start);
6737     __get_user(fl->l_len, &target_fl->l_len);
6738     __get_user(fl->l_pid, &target_fl->l_pid);
6739     unlock_user_struct(target_fl, target_flock_addr, 0);
6740     return 0;
6741 }
6742 
6743 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6744                                                  const struct flock64 *fl)
6745 {
6746     struct target_oabi_flock64 *target_fl;
6747     short l_type;
6748 
6749     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6750         return -TARGET_EFAULT;
6751     }
6752 
6753     l_type = host_to_target_flock(fl->l_type);
6754     __put_user(l_type, &target_fl->l_type);
6755     __put_user(fl->l_whence, &target_fl->l_whence);
6756     __put_user(fl->l_start, &target_fl->l_start);
6757     __put_user(fl->l_len, &target_fl->l_len);
6758     __put_user(fl->l_pid, &target_fl->l_pid);
6759     unlock_user_struct(target_fl, target_flock_addr, 1);
6760     return 0;
6761 }
6762 #endif
6763 
6764 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6765                                               abi_ulong target_flock_addr)
6766 {
6767     struct target_flock64 *target_fl;
6768     int l_type;
6769 
6770     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6771         return -TARGET_EFAULT;
6772     }
6773 
6774     __get_user(l_type, &target_fl->l_type);
6775     l_type = target_to_host_flock(l_type);
6776     if (l_type < 0) {
6777         return l_type;
6778     }
6779     fl->l_type = l_type;
6780     __get_user(fl->l_whence, &target_fl->l_whence);
6781     __get_user(fl->l_start, &target_fl->l_start);
6782     __get_user(fl->l_len, &target_fl->l_len);
6783     __get_user(fl->l_pid, &target_fl->l_pid);
6784     unlock_user_struct(target_fl, target_flock_addr, 0);
6785     return 0;
6786 }
6787 
6788 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6789                                             const struct flock64 *fl)
6790 {
6791     struct target_flock64 *target_fl;
6792     short l_type;
6793 
6794     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6795         return -TARGET_EFAULT;
6796     }
6797 
6798     l_type = host_to_target_flock(fl->l_type);
6799     __put_user(l_type, &target_fl->l_type);
6800     __put_user(fl->l_whence, &target_fl->l_whence);
6801     __put_user(fl->l_start, &target_fl->l_start);
6802     __put_user(fl->l_len, &target_fl->l_len);
6803     __put_user(fl->l_pid, &target_fl->l_pid);
6804     unlock_user_struct(target_fl, target_flock_addr, 1);
6805     return 0;
6806 }
6807 
6808 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6809 {
6810     struct flock64 fl64;
6811 #ifdef F_GETOWN_EX
6812     struct f_owner_ex fox;
6813     struct target_f_owner_ex *target_fox;
6814 #endif
6815     abi_long ret;
6816     int host_cmd = target_to_host_fcntl_cmd(cmd);
6817 
6818     if (host_cmd == -TARGET_EINVAL)
6819 	    return host_cmd;
6820 
6821     switch(cmd) {
6822     case TARGET_F_GETLK:
6823         ret = copy_from_user_flock(&fl64, arg);
6824         if (ret) {
6825             return ret;
6826         }
6827         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6828         if (ret == 0) {
6829             ret = copy_to_user_flock(arg, &fl64);
6830         }
6831         break;
6832 
6833     case TARGET_F_SETLK:
6834     case TARGET_F_SETLKW:
6835         ret = copy_from_user_flock(&fl64, arg);
6836         if (ret) {
6837             return ret;
6838         }
6839         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6840         break;
6841 
6842     case TARGET_F_GETLK64:
6843         ret = copy_from_user_flock64(&fl64, arg);
6844         if (ret) {
6845             return ret;
6846         }
6847         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6848         if (ret == 0) {
6849             ret = copy_to_user_flock64(arg, &fl64);
6850         }
6851         break;
6852     case TARGET_F_SETLK64:
6853     case TARGET_F_SETLKW64:
6854         ret = copy_from_user_flock64(&fl64, arg);
6855         if (ret) {
6856             return ret;
6857         }
6858         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6859         break;
6860 
6861     case TARGET_F_GETFL:
6862         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6863         if (ret >= 0) {
6864             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6865         }
6866         break;
6867 
6868     case TARGET_F_SETFL:
6869         ret = get_errno(safe_fcntl(fd, host_cmd,
6870                                    target_to_host_bitmask(arg,
6871                                                           fcntl_flags_tbl)));
6872         break;
6873 
6874 #ifdef F_GETOWN_EX
6875     case TARGET_F_GETOWN_EX:
6876         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6877         if (ret >= 0) {
6878             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6879                 return -TARGET_EFAULT;
6880             target_fox->type = tswap32(fox.type);
6881             target_fox->pid = tswap32(fox.pid);
6882             unlock_user_struct(target_fox, arg, 1);
6883         }
6884         break;
6885 #endif
6886 
6887 #ifdef F_SETOWN_EX
6888     case TARGET_F_SETOWN_EX:
6889         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6890             return -TARGET_EFAULT;
6891         fox.type = tswap32(target_fox->type);
6892         fox.pid = tswap32(target_fox->pid);
6893         unlock_user_struct(target_fox, arg, 0);
6894         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6895         break;
6896 #endif
6897 
6898     case TARGET_F_SETOWN:
6899     case TARGET_F_GETOWN:
6900     case TARGET_F_SETSIG:
6901     case TARGET_F_GETSIG:
6902     case TARGET_F_SETLEASE:
6903     case TARGET_F_GETLEASE:
6904     case TARGET_F_SETPIPE_SZ:
6905     case TARGET_F_GETPIPE_SZ:
6906         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6907         break;
6908 
6909     default:
6910         ret = get_errno(safe_fcntl(fd, cmd, arg));
6911         break;
6912     }
6913     return ret;
6914 }
6915 
6916 #ifdef USE_UID16
6917 
6918 static inline int high2lowuid(int uid)
6919 {
6920     if (uid > 65535)
6921         return 65534;
6922     else
6923         return uid;
6924 }
6925 
6926 static inline int high2lowgid(int gid)
6927 {
6928     if (gid > 65535)
6929         return 65534;
6930     else
6931         return gid;
6932 }
6933 
6934 static inline int low2highuid(int uid)
6935 {
6936     if ((int16_t)uid == -1)
6937         return -1;
6938     else
6939         return uid;
6940 }
6941 
6942 static inline int low2highgid(int gid)
6943 {
6944     if ((int16_t)gid == -1)
6945         return -1;
6946     else
6947         return gid;
6948 }
6949 static inline int tswapid(int id)
6950 {
6951     return tswap16(id);
6952 }
6953 
6954 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6955 
6956 #else /* !USE_UID16 */
6957 static inline int high2lowuid(int uid)
6958 {
6959     return uid;
6960 }
6961 static inline int high2lowgid(int gid)
6962 {
6963     return gid;
6964 }
6965 static inline int low2highuid(int uid)
6966 {
6967     return uid;
6968 }
6969 static inline int low2highgid(int gid)
6970 {
6971     return gid;
6972 }
6973 static inline int tswapid(int id)
6974 {
6975     return tswap32(id);
6976 }
6977 
6978 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6979 
6980 #endif /* USE_UID16 */
6981 
6982 /* We must do direct syscalls for setting UID/GID, because we want to
6983  * implement the Linux system call semantics of "change only for this thread",
6984  * not the libc/POSIX semantics of "change for all threads in process".
6985  * (See http://ewontfix.com/17/ for more details.)
6986  * We use the 32-bit version of the syscalls if present; if it is not
6987  * then either the host architecture supports 32-bit UIDs natively with
6988  * the standard syscall, or the 16-bit UID is the best we can do.
6989  */
6990 #ifdef __NR_setuid32
6991 #define __NR_sys_setuid __NR_setuid32
6992 #else
6993 #define __NR_sys_setuid __NR_setuid
6994 #endif
6995 #ifdef __NR_setgid32
6996 #define __NR_sys_setgid __NR_setgid32
6997 #else
6998 #define __NR_sys_setgid __NR_setgid
6999 #endif
7000 #ifdef __NR_setresuid32
7001 #define __NR_sys_setresuid __NR_setresuid32
7002 #else
7003 #define __NR_sys_setresuid __NR_setresuid
7004 #endif
7005 #ifdef __NR_setresgid32
7006 #define __NR_sys_setresgid __NR_setresgid32
7007 #else
7008 #define __NR_sys_setresgid __NR_setresgid
7009 #endif
7010 
7011 _syscall1(int, sys_setuid, uid_t, uid)
7012 _syscall1(int, sys_setgid, gid_t, gid)
7013 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7014 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7015 
7016 void syscall_init(void)
7017 {
7018     IOCTLEntry *ie;
7019     const argtype *arg_type;
7020     int size;
7021     int i;
7022 
7023     thunk_init(STRUCT_MAX);
7024 
7025 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7026 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7027 #include "syscall_types.h"
7028 #undef STRUCT
7029 #undef STRUCT_SPECIAL
7030 
7031     /* Build target_to_host_errno_table[] table from
7032      * host_to_target_errno_table[]. */
7033     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7034         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7035     }
7036 
7037     /* we patch the ioctl size if necessary. We rely on the fact that
7038        no ioctl has all the bits at '1' in the size field */
7039     ie = ioctl_entries;
7040     while (ie->target_cmd != 0) {
7041         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7042             TARGET_IOC_SIZEMASK) {
7043             arg_type = ie->arg_type;
7044             if (arg_type[0] != TYPE_PTR) {
7045                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7046                         ie->target_cmd);
7047                 exit(1);
7048             }
7049             arg_type++;
7050             size = thunk_type_size(arg_type, 0);
7051             ie->target_cmd = (ie->target_cmd &
7052                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7053                 (size << TARGET_IOC_SIZESHIFT);
7054         }
7055 
7056         /* automatic consistency check if same arch */
7057 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7058     (defined(__x86_64__) && defined(TARGET_X86_64))
7059         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7060             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7061                     ie->name, ie->target_cmd, ie->host_cmd);
7062         }
7063 #endif
7064         ie++;
7065     }
7066 }
7067 
7068 #if TARGET_ABI_BITS == 32
7069 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7070 {
7071 #ifdef TARGET_WORDS_BIGENDIAN
7072     return ((uint64_t)word0 << 32) | word1;
7073 #else
7074     return ((uint64_t)word1 << 32) | word0;
7075 #endif
7076 }
7077 #else /* TARGET_ABI_BITS == 32 */
7078 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7079 {
7080     return word0;
7081 }
7082 #endif /* TARGET_ABI_BITS != 32 */
7083 
7084 #ifdef TARGET_NR_truncate64
7085 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7086                                          abi_long arg2,
7087                                          abi_long arg3,
7088                                          abi_long arg4)
7089 {
7090     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7091         arg2 = arg3;
7092         arg3 = arg4;
7093     }
7094     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7095 }
7096 #endif
7097 
7098 #ifdef TARGET_NR_ftruncate64
7099 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7100                                           abi_long arg2,
7101                                           abi_long arg3,
7102                                           abi_long arg4)
7103 {
7104     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7105         arg2 = arg3;
7106         arg3 = arg4;
7107     }
7108     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7109 }
7110 #endif
7111 
7112 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7113                                                abi_ulong target_addr)
7114 {
7115     struct target_timespec *target_ts;
7116 
7117     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7118         return -TARGET_EFAULT;
7119     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7120     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7121     unlock_user_struct(target_ts, target_addr, 0);
7122     return 0;
7123 }
7124 
7125 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7126                                                struct timespec *host_ts)
7127 {
7128     struct target_timespec *target_ts;
7129 
7130     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7131         return -TARGET_EFAULT;
7132     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7133     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7134     unlock_user_struct(target_ts, target_addr, 1);
7135     return 0;
7136 }
7137 
7138 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7139                                                  abi_ulong target_addr)
7140 {
7141     struct target_itimerspec *target_itspec;
7142 
7143     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7144         return -TARGET_EFAULT;
7145     }
7146 
7147     host_itspec->it_interval.tv_sec =
7148                             tswapal(target_itspec->it_interval.tv_sec);
7149     host_itspec->it_interval.tv_nsec =
7150                             tswapal(target_itspec->it_interval.tv_nsec);
7151     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7152     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7153 
7154     unlock_user_struct(target_itspec, target_addr, 1);
7155     return 0;
7156 }
7157 
7158 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7159                                                struct itimerspec *host_its)
7160 {
7161     struct target_itimerspec *target_itspec;
7162 
7163     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7164         return -TARGET_EFAULT;
7165     }
7166 
7167     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7168     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7169 
7170     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7171     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7172 
7173     unlock_user_struct(target_itspec, target_addr, 0);
7174     return 0;
7175 }
7176 
7177 static inline abi_long target_to_host_timex(struct timex *host_tx,
7178                                             abi_long target_addr)
7179 {
7180     struct target_timex *target_tx;
7181 
7182     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7183         return -TARGET_EFAULT;
7184     }
7185 
7186     __get_user(host_tx->modes, &target_tx->modes);
7187     __get_user(host_tx->offset, &target_tx->offset);
7188     __get_user(host_tx->freq, &target_tx->freq);
7189     __get_user(host_tx->maxerror, &target_tx->maxerror);
7190     __get_user(host_tx->esterror, &target_tx->esterror);
7191     __get_user(host_tx->status, &target_tx->status);
7192     __get_user(host_tx->constant, &target_tx->constant);
7193     __get_user(host_tx->precision, &target_tx->precision);
7194     __get_user(host_tx->tolerance, &target_tx->tolerance);
7195     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7196     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7197     __get_user(host_tx->tick, &target_tx->tick);
7198     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7199     __get_user(host_tx->jitter, &target_tx->jitter);
7200     __get_user(host_tx->shift, &target_tx->shift);
7201     __get_user(host_tx->stabil, &target_tx->stabil);
7202     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7203     __get_user(host_tx->calcnt, &target_tx->calcnt);
7204     __get_user(host_tx->errcnt, &target_tx->errcnt);
7205     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7206     __get_user(host_tx->tai, &target_tx->tai);
7207 
7208     unlock_user_struct(target_tx, target_addr, 0);
7209     return 0;
7210 }
7211 
7212 static inline abi_long host_to_target_timex(abi_long target_addr,
7213                                             struct timex *host_tx)
7214 {
7215     struct target_timex *target_tx;
7216 
7217     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7218         return -TARGET_EFAULT;
7219     }
7220 
7221     __put_user(host_tx->modes, &target_tx->modes);
7222     __put_user(host_tx->offset, &target_tx->offset);
7223     __put_user(host_tx->freq, &target_tx->freq);
7224     __put_user(host_tx->maxerror, &target_tx->maxerror);
7225     __put_user(host_tx->esterror, &target_tx->esterror);
7226     __put_user(host_tx->status, &target_tx->status);
7227     __put_user(host_tx->constant, &target_tx->constant);
7228     __put_user(host_tx->precision, &target_tx->precision);
7229     __put_user(host_tx->tolerance, &target_tx->tolerance);
7230     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7231     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7232     __put_user(host_tx->tick, &target_tx->tick);
7233     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7234     __put_user(host_tx->jitter, &target_tx->jitter);
7235     __put_user(host_tx->shift, &target_tx->shift);
7236     __put_user(host_tx->stabil, &target_tx->stabil);
7237     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7238     __put_user(host_tx->calcnt, &target_tx->calcnt);
7239     __put_user(host_tx->errcnt, &target_tx->errcnt);
7240     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7241     __put_user(host_tx->tai, &target_tx->tai);
7242 
7243     unlock_user_struct(target_tx, target_addr, 1);
7244     return 0;
7245 }
7246 
7247 
7248 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7249                                                abi_ulong target_addr)
7250 {
7251     struct target_sigevent *target_sevp;
7252 
7253     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7254         return -TARGET_EFAULT;
7255     }
7256 
7257     /* This union is awkward on 64 bit systems because it has a 32 bit
7258      * integer and a pointer in it; we follow the conversion approach
7259      * used for handling sigval types in signal.c so the guest should get
7260      * the correct value back even if we did a 64 bit byteswap and it's
7261      * using the 32 bit integer.
7262      */
7263     host_sevp->sigev_value.sival_ptr =
7264         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7265     host_sevp->sigev_signo =
7266         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7267     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7268     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7269 
7270     unlock_user_struct(target_sevp, target_addr, 1);
7271     return 0;
7272 }
7273 
7274 #if defined(TARGET_NR_mlockall)
7275 static inline int target_to_host_mlockall_arg(int arg)
7276 {
7277     int result = 0;
7278 
7279     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7280         result |= MCL_CURRENT;
7281     }
7282     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7283         result |= MCL_FUTURE;
7284     }
7285     return result;
7286 }
7287 #endif
7288 
7289 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7290      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7291      defined(TARGET_NR_newfstatat))
7292 static inline abi_long host_to_target_stat64(void *cpu_env,
7293                                              abi_ulong target_addr,
7294                                              struct stat *host_st)
7295 {
7296 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7297     if (((CPUARMState *)cpu_env)->eabi) {
7298         struct target_eabi_stat64 *target_st;
7299 
7300         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7301             return -TARGET_EFAULT;
7302         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7303         __put_user(host_st->st_dev, &target_st->st_dev);
7304         __put_user(host_st->st_ino, &target_st->st_ino);
7305 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7306         __put_user(host_st->st_ino, &target_st->__st_ino);
7307 #endif
7308         __put_user(host_st->st_mode, &target_st->st_mode);
7309         __put_user(host_st->st_nlink, &target_st->st_nlink);
7310         __put_user(host_st->st_uid, &target_st->st_uid);
7311         __put_user(host_st->st_gid, &target_st->st_gid);
7312         __put_user(host_st->st_rdev, &target_st->st_rdev);
7313         __put_user(host_st->st_size, &target_st->st_size);
7314         __put_user(host_st->st_blksize, &target_st->st_blksize);
7315         __put_user(host_st->st_blocks, &target_st->st_blocks);
7316         __put_user(host_st->st_atime, &target_st->target_st_atime);
7317         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7318         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7319         unlock_user_struct(target_st, target_addr, 1);
7320     } else
7321 #endif
7322     {
7323 #if defined(TARGET_HAS_STRUCT_STAT64)
7324         struct target_stat64 *target_st;
7325 #else
7326         struct target_stat *target_st;
7327 #endif
7328 
7329         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7330             return -TARGET_EFAULT;
7331         memset(target_st, 0, sizeof(*target_st));
7332         __put_user(host_st->st_dev, &target_st->st_dev);
7333         __put_user(host_st->st_ino, &target_st->st_ino);
7334 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7335         __put_user(host_st->st_ino, &target_st->__st_ino);
7336 #endif
7337         __put_user(host_st->st_mode, &target_st->st_mode);
7338         __put_user(host_st->st_nlink, &target_st->st_nlink);
7339         __put_user(host_st->st_uid, &target_st->st_uid);
7340         __put_user(host_st->st_gid, &target_st->st_gid);
7341         __put_user(host_st->st_rdev, &target_st->st_rdev);
7342         /* XXX: better use of kernel struct */
7343         __put_user(host_st->st_size, &target_st->st_size);
7344         __put_user(host_st->st_blksize, &target_st->st_blksize);
7345         __put_user(host_st->st_blocks, &target_st->st_blocks);
7346         __put_user(host_st->st_atime, &target_st->target_st_atime);
7347         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7348         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7349         unlock_user_struct(target_st, target_addr, 1);
7350     }
7351 
7352     return 0;
7353 }
7354 #endif
7355 
7356 /* ??? Using host futex calls even when target atomic operations
7357    are not really atomic probably breaks things.  However implementing
7358    futexes locally would make futexes shared between multiple processes
7359    tricky.  However they're probably useless because guest atomic
7360    operations won't work either.  */
7361 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7362                     target_ulong uaddr2, int val3)
7363 {
7364     struct timespec ts, *pts;
7365     int base_op;
7366 
7367     /* ??? We assume FUTEX_* constants are the same on both host
7368        and target.  */
7369 #ifdef FUTEX_CMD_MASK
7370     base_op = op & FUTEX_CMD_MASK;
7371 #else
7372     base_op = op;
7373 #endif
7374     switch (base_op) {
7375     case FUTEX_WAIT:
7376     case FUTEX_WAIT_BITSET:
7377         if (timeout) {
7378             pts = &ts;
7379             target_to_host_timespec(pts, timeout);
7380         } else {
7381             pts = NULL;
7382         }
7383         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7384                          pts, NULL, val3));
7385     case FUTEX_WAKE:
7386         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7387     case FUTEX_FD:
7388         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7389     case FUTEX_REQUEUE:
7390     case FUTEX_CMP_REQUEUE:
7391     case FUTEX_WAKE_OP:
7392         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7393            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7394            But the prototype takes a `struct timespec *'; insert casts
7395            to satisfy the compiler.  We do not need to tswap TIMEOUT
7396            since it's not compared to guest memory.  */
7397         pts = (struct timespec *)(uintptr_t) timeout;
7398         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7399                                     g2h(uaddr2),
7400                                     (base_op == FUTEX_CMP_REQUEUE
7401                                      ? tswap32(val3)
7402                                      : val3)));
7403     default:
7404         return -TARGET_ENOSYS;
7405     }
7406 }
7407 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7408 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7409                                      abi_long handle, abi_long mount_id,
7410                                      abi_long flags)
7411 {
7412     struct file_handle *target_fh;
7413     struct file_handle *fh;
7414     int mid = 0;
7415     abi_long ret;
7416     char *name;
7417     unsigned int size, total_size;
7418 
7419     if (get_user_s32(size, handle)) {
7420         return -TARGET_EFAULT;
7421     }
7422 
7423     name = lock_user_string(pathname);
7424     if (!name) {
7425         return -TARGET_EFAULT;
7426     }
7427 
7428     total_size = sizeof(struct file_handle) + size;
7429     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7430     if (!target_fh) {
7431         unlock_user(name, pathname, 0);
7432         return -TARGET_EFAULT;
7433     }
7434 
7435     fh = g_malloc0(total_size);
7436     fh->handle_bytes = size;
7437 
7438     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7439     unlock_user(name, pathname, 0);
7440 
7441     /* man name_to_handle_at(2):
7442      * Other than the use of the handle_bytes field, the caller should treat
7443      * the file_handle structure as an opaque data type
7444      */
7445 
7446     memcpy(target_fh, fh, total_size);
7447     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7448     target_fh->handle_type = tswap32(fh->handle_type);
7449     g_free(fh);
7450     unlock_user(target_fh, handle, total_size);
7451 
7452     if (put_user_s32(mid, mount_id)) {
7453         return -TARGET_EFAULT;
7454     }
7455 
7456     return ret;
7457 
7458 }
7459 #endif
7460 
7461 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7462 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7463                                      abi_long flags)
7464 {
7465     struct file_handle *target_fh;
7466     struct file_handle *fh;
7467     unsigned int size, total_size;
7468     abi_long ret;
7469 
7470     if (get_user_s32(size, handle)) {
7471         return -TARGET_EFAULT;
7472     }
7473 
7474     total_size = sizeof(struct file_handle) + size;
7475     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7476     if (!target_fh) {
7477         return -TARGET_EFAULT;
7478     }
7479 
7480     fh = g_memdup(target_fh, total_size);
7481     fh->handle_bytes = size;
7482     fh->handle_type = tswap32(target_fh->handle_type);
7483 
7484     ret = get_errno(open_by_handle_at(mount_fd, fh,
7485                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7486 
7487     g_free(fh);
7488 
7489     unlock_user(target_fh, handle, total_size);
7490 
7491     return ret;
7492 }
7493 #endif
7494 
7495 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7496 
7497 /* signalfd siginfo conversion */
7498 
7499 static void
7500 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7501                                 const struct signalfd_siginfo *info)
7502 {
7503     int sig = host_to_target_signal(info->ssi_signo);
7504 
7505     /* linux/signalfd.h defines a ssi_addr_lsb
7506      * not defined in sys/signalfd.h but used by some kernels
7507      */
7508 
7509 #ifdef BUS_MCEERR_AO
7510     if (tinfo->ssi_signo == SIGBUS &&
7511         (tinfo->ssi_code == BUS_MCEERR_AR ||
7512          tinfo->ssi_code == BUS_MCEERR_AO)) {
7513         uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7514         uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7515         *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7516     }
7517 #endif
7518 
7519     tinfo->ssi_signo = tswap32(sig);
7520     tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7521     tinfo->ssi_code = tswap32(info->ssi_code);
7522     tinfo->ssi_pid = tswap32(info->ssi_pid);
7523     tinfo->ssi_uid = tswap32(info->ssi_uid);
7524     tinfo->ssi_fd = tswap32(info->ssi_fd);
7525     tinfo->ssi_tid = tswap32(info->ssi_tid);
7526     tinfo->ssi_band = tswap32(info->ssi_band);
7527     tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7528     tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7529     tinfo->ssi_status = tswap32(info->ssi_status);
7530     tinfo->ssi_int = tswap32(info->ssi_int);
7531     tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7532     tinfo->ssi_utime = tswap64(info->ssi_utime);
7533     tinfo->ssi_stime = tswap64(info->ssi_stime);
7534     tinfo->ssi_addr = tswap64(info->ssi_addr);
7535 }
7536 
7537 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7538 {
7539     int i;
7540 
7541     for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7542         host_to_target_signalfd_siginfo(buf + i, buf + i);
7543     }
7544 
7545     return len;
7546 }
7547 
7548 static TargetFdTrans target_signalfd_trans = {
7549     .host_to_target_data = host_to_target_data_signalfd,
7550 };
7551 
7552 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7553 {
7554     int host_flags;
7555     target_sigset_t *target_mask;
7556     sigset_t host_mask;
7557     abi_long ret;
7558 
7559     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7560         return -TARGET_EINVAL;
7561     }
7562     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7563         return -TARGET_EFAULT;
7564     }
7565 
7566     target_to_host_sigset(&host_mask, target_mask);
7567 
7568     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7569 
7570     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7571     if (ret >= 0) {
7572         fd_trans_register(ret, &target_signalfd_trans);
7573     }
7574 
7575     unlock_user_struct(target_mask, mask, 0);
7576 
7577     return ret;
7578 }
7579 #endif
7580 
7581 /* Map host to target signal numbers for the wait family of syscalls.
7582    Assume all other status bits are the same.  */
7583 int host_to_target_waitstatus(int status)
7584 {
7585     if (WIFSIGNALED(status)) {
7586         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7587     }
7588     if (WIFSTOPPED(status)) {
7589         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7590                | (status & 0xff);
7591     }
7592     return status;
7593 }
7594 
7595 static int open_self_cmdline(void *cpu_env, int fd)
7596 {
7597     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7598     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7599     int i;
7600 
7601     for (i = 0; i < bprm->argc; i++) {
7602         size_t len = strlen(bprm->argv[i]) + 1;
7603 
7604         if (write(fd, bprm->argv[i], len) != len) {
7605             return -1;
7606         }
7607     }
7608 
7609     return 0;
7610 }
7611 
7612 static int open_self_maps(void *cpu_env, int fd)
7613 {
7614     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7615     TaskState *ts = cpu->opaque;
7616     FILE *fp;
7617     char *line = NULL;
7618     size_t len = 0;
7619     ssize_t read;
7620 
7621     fp = fopen("/proc/self/maps", "r");
7622     if (fp == NULL) {
7623         return -1;
7624     }
7625 
7626     while ((read = getline(&line, &len, fp)) != -1) {
7627         int fields, dev_maj, dev_min, inode;
7628         uint64_t min, max, offset;
7629         char flag_r, flag_w, flag_x, flag_p;
7630         char path[512] = "";
7631         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7632                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7633                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7634 
7635         if ((fields < 10) || (fields > 11)) {
7636             continue;
7637         }
7638         if (h2g_valid(min)) {
7639             int flags = page_get_flags(h2g(min));
7640             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7641             if (page_check_range(h2g(min), max - min, flags) == -1) {
7642                 continue;
7643             }
7644             if (h2g(min) == ts->info->stack_limit) {
7645                 pstrcpy(path, sizeof(path), "      [stack]");
7646             }
7647             dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7648                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7649                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7650                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7651                     path[0] ? "         " : "", path);
7652         }
7653     }
7654 
7655     free(line);
7656     fclose(fp);
7657 
7658     return 0;
7659 }
7660 
7661 static int open_self_stat(void *cpu_env, int fd)
7662 {
7663     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7664     TaskState *ts = cpu->opaque;
7665     abi_ulong start_stack = ts->info->start_stack;
7666     int i;
7667 
7668     for (i = 0; i < 44; i++) {
7669       char buf[128];
7670       int len;
7671       uint64_t val = 0;
7672 
7673       if (i == 0) {
7674         /* pid */
7675         val = getpid();
7676         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7677       } else if (i == 1) {
7678         /* app name */
7679         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7680       } else if (i == 27) {
7681         /* stack bottom */
7682         val = start_stack;
7683         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7684       } else {
7685         /* for the rest, there is MasterCard */
7686         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7687       }
7688 
7689       len = strlen(buf);
7690       if (write(fd, buf, len) != len) {
7691           return -1;
7692       }
7693     }
7694 
7695     return 0;
7696 }
7697 
7698 static int open_self_auxv(void *cpu_env, int fd)
7699 {
7700     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7701     TaskState *ts = cpu->opaque;
7702     abi_ulong auxv = ts->info->saved_auxv;
7703     abi_ulong len = ts->info->auxv_len;
7704     char *ptr;
7705 
7706     /*
7707      * Auxiliary vector is stored in target process stack.
7708      * read in whole auxv vector and copy it to file
7709      */
7710     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7711     if (ptr != NULL) {
7712         while (len > 0) {
7713             ssize_t r;
7714             r = write(fd, ptr, len);
7715             if (r <= 0) {
7716                 break;
7717             }
7718             len -= r;
7719             ptr += r;
7720         }
7721         lseek(fd, 0, SEEK_SET);
7722         unlock_user(ptr, auxv, len);
7723     }
7724 
7725     return 0;
7726 }
7727 
7728 static int is_proc_myself(const char *filename, const char *entry)
7729 {
7730     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7731         filename += strlen("/proc/");
7732         if (!strncmp(filename, "self/", strlen("self/"))) {
7733             filename += strlen("self/");
7734         } else if (*filename >= '1' && *filename <= '9') {
7735             char myself[80];
7736             snprintf(myself, sizeof(myself), "%d/", getpid());
7737             if (!strncmp(filename, myself, strlen(myself))) {
7738                 filename += strlen(myself);
7739             } else {
7740                 return 0;
7741             }
7742         } else {
7743             return 0;
7744         }
7745         if (!strcmp(filename, entry)) {
7746             return 1;
7747         }
7748     }
7749     return 0;
7750 }
7751 
7752 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7753 static int is_proc(const char *filename, const char *entry)
7754 {
7755     return strcmp(filename, entry) == 0;
7756 }
7757 
7758 static int open_net_route(void *cpu_env, int fd)
7759 {
7760     FILE *fp;
7761     char *line = NULL;
7762     size_t len = 0;
7763     ssize_t read;
7764 
7765     fp = fopen("/proc/net/route", "r");
7766     if (fp == NULL) {
7767         return -1;
7768     }
7769 
7770     /* read header */
7771 
7772     read = getline(&line, &len, fp);
7773     dprintf(fd, "%s", line);
7774 
7775     /* read routes */
7776 
7777     while ((read = getline(&line, &len, fp)) != -1) {
7778         char iface[16];
7779         uint32_t dest, gw, mask;
7780         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7781         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7782                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7783                      &mask, &mtu, &window, &irtt);
7784         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7785                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7786                 metric, tswap32(mask), mtu, window, irtt);
7787     }
7788 
7789     free(line);
7790     fclose(fp);
7791 
7792     return 0;
7793 }
7794 #endif
7795 
7796 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7797 {
7798     struct fake_open {
7799         const char *filename;
7800         int (*fill)(void *cpu_env, int fd);
7801         int (*cmp)(const char *s1, const char *s2);
7802     };
7803     const struct fake_open *fake_open;
7804     static const struct fake_open fakes[] = {
7805         { "maps", open_self_maps, is_proc_myself },
7806         { "stat", open_self_stat, is_proc_myself },
7807         { "auxv", open_self_auxv, is_proc_myself },
7808         { "cmdline", open_self_cmdline, is_proc_myself },
7809 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7810         { "/proc/net/route", open_net_route, is_proc },
7811 #endif
7812         { NULL, NULL, NULL }
7813     };
7814 
7815     if (is_proc_myself(pathname, "exe")) {
7816         int execfd = qemu_getauxval(AT_EXECFD);
7817         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7818     }
7819 
7820     for (fake_open = fakes; fake_open->filename; fake_open++) {
7821         if (fake_open->cmp(pathname, fake_open->filename)) {
7822             break;
7823         }
7824     }
7825 
7826     if (fake_open->filename) {
7827         const char *tmpdir;
7828         char filename[PATH_MAX];
7829         int fd, r;
7830 
7831         /* create temporary file to map stat to */
7832         tmpdir = getenv("TMPDIR");
7833         if (!tmpdir)
7834             tmpdir = "/tmp";
7835         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7836         fd = mkstemp(filename);
7837         if (fd < 0) {
7838             return fd;
7839         }
7840         unlink(filename);
7841 
7842         if ((r = fake_open->fill(cpu_env, fd))) {
7843             int e = errno;
7844             close(fd);
7845             errno = e;
7846             return r;
7847         }
7848         lseek(fd, 0, SEEK_SET);
7849 
7850         return fd;
7851     }
7852 
7853     return safe_openat(dirfd, path(pathname), flags, mode);
7854 }
7855 
7856 #define TIMER_MAGIC 0x0caf0000
7857 #define TIMER_MAGIC_MASK 0xffff0000
7858 
7859 /* Convert QEMU provided timer ID back to internal 16bit index format */
7860 static target_timer_t get_timer_id(abi_long arg)
7861 {
7862     target_timer_t timerid = arg;
7863 
7864     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7865         return -TARGET_EINVAL;
7866     }
7867 
7868     timerid &= 0xffff;
7869 
7870     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7871         return -TARGET_EINVAL;
7872     }
7873 
7874     return timerid;
7875 }
7876 
7877 static abi_long swap_data_eventfd(void *buf, size_t len)
7878 {
7879     uint64_t *counter = buf;
7880     int i;
7881 
7882     if (len < sizeof(uint64_t)) {
7883         return -EINVAL;
7884     }
7885 
7886     for (i = 0; i < len; i += sizeof(uint64_t)) {
7887         *counter = tswap64(*counter);
7888         counter++;
7889     }
7890 
7891     return len;
7892 }
7893 
7894 static TargetFdTrans target_eventfd_trans = {
7895     .host_to_target_data = swap_data_eventfd,
7896     .target_to_host_data = swap_data_eventfd,
7897 };
7898 
7899 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7900     (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7901      defined(__NR_inotify_init1))
7902 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7903 {
7904     struct inotify_event *ev;
7905     int i;
7906     uint32_t name_len;
7907 
7908     for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7909         ev = (struct inotify_event *)((char *)buf + i);
7910         name_len = ev->len;
7911 
7912         ev->wd = tswap32(ev->wd);
7913         ev->mask = tswap32(ev->mask);
7914         ev->cookie = tswap32(ev->cookie);
7915         ev->len = tswap32(name_len);
7916     }
7917 
7918     return len;
7919 }
7920 
7921 static TargetFdTrans target_inotify_trans = {
7922     .host_to_target_data = host_to_target_data_inotify,
7923 };
7924 #endif
7925 
7926 static int target_to_host_cpu_mask(unsigned long *host_mask,
7927                                    size_t host_size,
7928                                    abi_ulong target_addr,
7929                                    size_t target_size)
7930 {
7931     unsigned target_bits = sizeof(abi_ulong) * 8;
7932     unsigned host_bits = sizeof(*host_mask) * 8;
7933     abi_ulong *target_mask;
7934     unsigned i, j;
7935 
7936     assert(host_size >= target_size);
7937 
7938     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7939     if (!target_mask) {
7940         return -TARGET_EFAULT;
7941     }
7942     memset(host_mask, 0, host_size);
7943 
7944     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7945         unsigned bit = i * target_bits;
7946         abi_ulong val;
7947 
7948         __get_user(val, &target_mask[i]);
7949         for (j = 0; j < target_bits; j++, bit++) {
7950             if (val & (1UL << j)) {
7951                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7952             }
7953         }
7954     }
7955 
7956     unlock_user(target_mask, target_addr, 0);
7957     return 0;
7958 }
7959 
7960 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7961                                    size_t host_size,
7962                                    abi_ulong target_addr,
7963                                    size_t target_size)
7964 {
7965     unsigned target_bits = sizeof(abi_ulong) * 8;
7966     unsigned host_bits = sizeof(*host_mask) * 8;
7967     abi_ulong *target_mask;
7968     unsigned i, j;
7969 
7970     assert(host_size >= target_size);
7971 
7972     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7973     if (!target_mask) {
7974         return -TARGET_EFAULT;
7975     }
7976 
7977     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7978         unsigned bit = i * target_bits;
7979         abi_ulong val = 0;
7980 
7981         for (j = 0; j < target_bits; j++, bit++) {
7982             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7983                 val |= 1UL << j;
7984             }
7985         }
7986         __put_user(val, &target_mask[i]);
7987     }
7988 
7989     unlock_user(target_mask, target_addr, target_size);
7990     return 0;
7991 }
7992 
7993 /* do_syscall() should always have a single exit point at the end so
7994    that actions, such as logging of syscall results, can be performed.
7995    All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7996 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7997                     abi_long arg2, abi_long arg3, abi_long arg4,
7998                     abi_long arg5, abi_long arg6, abi_long arg7,
7999                     abi_long arg8)
8000 {
8001     CPUState *cpu = ENV_GET_CPU(cpu_env);
8002     abi_long ret;
8003 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8004     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8005     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
8006     struct stat st;
8007 #endif
8008 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8009     || defined(TARGET_NR_fstatfs)
8010     struct statfs stfs;
8011 #endif
8012     void *p;
8013 
8014 #if defined(DEBUG_ERESTARTSYS)
8015     /* Debug-only code for exercising the syscall-restart code paths
8016      * in the per-architecture cpu main loops: restart every syscall
8017      * the guest makes once before letting it through.
8018      */
8019     {
8020         static int flag;
8021 
8022         flag = !flag;
8023         if (flag) {
8024             return -TARGET_ERESTARTSYS;
8025         }
8026     }
8027 #endif
8028 
8029 #ifdef DEBUG
8030     gemu_log("syscall %d", num);
8031 #endif
8032     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
8033     if(do_strace)
8034         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
8035 
8036     switch(num) {
8037     case TARGET_NR_exit:
8038         /* In old applications this may be used to implement _exit(2).
8039            However in threaded applictions it is used for thread termination,
8040            and _exit_group is used for application termination.
8041            Do thread termination if we have more then one thread.  */
8042 
8043         if (block_signals()) {
8044             ret = -TARGET_ERESTARTSYS;
8045             break;
8046         }
8047 
8048         cpu_list_lock();
8049 
8050         if (CPU_NEXT(first_cpu)) {
8051             TaskState *ts;
8052 
8053             /* Remove the CPU from the list.  */
8054             QTAILQ_REMOVE(&cpus, cpu, node);
8055 
8056             cpu_list_unlock();
8057 
8058             ts = cpu->opaque;
8059             if (ts->child_tidptr) {
8060                 put_user_u32(0, ts->child_tidptr);
8061                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8062                           NULL, NULL, 0);
8063             }
8064             thread_cpu = NULL;
8065             object_unref(OBJECT(cpu));
8066             g_free(ts);
8067             rcu_unregister_thread();
8068             pthread_exit(NULL);
8069         }
8070 
8071         cpu_list_unlock();
8072         preexit_cleanup(cpu_env, arg1);
8073         _exit(arg1);
8074         ret = 0; /* avoid warning */
8075         break;
8076     case TARGET_NR_read:
8077         if (arg3 == 0)
8078             ret = 0;
8079         else {
8080             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8081                 goto efault;
8082             ret = get_errno(safe_read(arg1, p, arg3));
8083             if (ret >= 0 &&
8084                 fd_trans_host_to_target_data(arg1)) {
8085                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8086             }
8087             unlock_user(p, arg2, ret);
8088         }
8089         break;
8090     case TARGET_NR_write:
8091         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8092             goto efault;
8093         if (fd_trans_target_to_host_data(arg1)) {
8094             void *copy = g_malloc(arg3);
8095             memcpy(copy, p, arg3);
8096             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8097             if (ret >= 0) {
8098                 ret = get_errno(safe_write(arg1, copy, ret));
8099             }
8100             g_free(copy);
8101         } else {
8102             ret = get_errno(safe_write(arg1, p, arg3));
8103         }
8104         unlock_user(p, arg2, 0);
8105         break;
8106 #ifdef TARGET_NR_open
8107     case TARGET_NR_open:
8108         if (!(p = lock_user_string(arg1)))
8109             goto efault;
8110         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8111                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8112                                   arg3));
8113         fd_trans_unregister(ret);
8114         unlock_user(p, arg1, 0);
8115         break;
8116 #endif
8117     case TARGET_NR_openat:
8118         if (!(p = lock_user_string(arg2)))
8119             goto efault;
8120         ret = get_errno(do_openat(cpu_env, arg1, p,
8121                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8122                                   arg4));
8123         fd_trans_unregister(ret);
8124         unlock_user(p, arg2, 0);
8125         break;
8126 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8127     case TARGET_NR_name_to_handle_at:
8128         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8129         break;
8130 #endif
8131 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8132     case TARGET_NR_open_by_handle_at:
8133         ret = do_open_by_handle_at(arg1, arg2, arg3);
8134         fd_trans_unregister(ret);
8135         break;
8136 #endif
8137     case TARGET_NR_close:
8138         fd_trans_unregister(arg1);
8139         ret = get_errno(close(arg1));
8140         break;
8141     case TARGET_NR_brk:
8142         ret = do_brk(arg1);
8143         break;
8144 #ifdef TARGET_NR_fork
8145     case TARGET_NR_fork:
8146         ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8147         break;
8148 #endif
8149 #ifdef TARGET_NR_waitpid
8150     case TARGET_NR_waitpid:
8151         {
8152             int status;
8153             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8154             if (!is_error(ret) && arg2 && ret
8155                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8156                 goto efault;
8157         }
8158         break;
8159 #endif
8160 #ifdef TARGET_NR_waitid
8161     case TARGET_NR_waitid:
8162         {
8163             siginfo_t info;
8164             info.si_pid = 0;
8165             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8166             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8167                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8168                     goto efault;
8169                 host_to_target_siginfo(p, &info);
8170                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8171             }
8172         }
8173         break;
8174 #endif
8175 #ifdef TARGET_NR_creat /* not on alpha */
8176     case TARGET_NR_creat:
8177         if (!(p = lock_user_string(arg1)))
8178             goto efault;
8179         ret = get_errno(creat(p, arg2));
8180         fd_trans_unregister(ret);
8181         unlock_user(p, arg1, 0);
8182         break;
8183 #endif
8184 #ifdef TARGET_NR_link
8185     case TARGET_NR_link:
8186         {
8187             void * p2;
8188             p = lock_user_string(arg1);
8189             p2 = lock_user_string(arg2);
8190             if (!p || !p2)
8191                 ret = -TARGET_EFAULT;
8192             else
8193                 ret = get_errno(link(p, p2));
8194             unlock_user(p2, arg2, 0);
8195             unlock_user(p, arg1, 0);
8196         }
8197         break;
8198 #endif
8199 #if defined(TARGET_NR_linkat)
8200     case TARGET_NR_linkat:
8201         {
8202             void * p2 = NULL;
8203             if (!arg2 || !arg4)
8204                 goto efault;
8205             p  = lock_user_string(arg2);
8206             p2 = lock_user_string(arg4);
8207             if (!p || !p2)
8208                 ret = -TARGET_EFAULT;
8209             else
8210                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8211             unlock_user(p, arg2, 0);
8212             unlock_user(p2, arg4, 0);
8213         }
8214         break;
8215 #endif
8216 #ifdef TARGET_NR_unlink
8217     case TARGET_NR_unlink:
8218         if (!(p = lock_user_string(arg1)))
8219             goto efault;
8220         ret = get_errno(unlink(p));
8221         unlock_user(p, arg1, 0);
8222         break;
8223 #endif
8224 #if defined(TARGET_NR_unlinkat)
8225     case TARGET_NR_unlinkat:
8226         if (!(p = lock_user_string(arg2)))
8227             goto efault;
8228         ret = get_errno(unlinkat(arg1, p, arg3));
8229         unlock_user(p, arg2, 0);
8230         break;
8231 #endif
8232     case TARGET_NR_execve:
8233         {
8234             char **argp, **envp;
8235             int argc, envc;
8236             abi_ulong gp;
8237             abi_ulong guest_argp;
8238             abi_ulong guest_envp;
8239             abi_ulong addr;
8240             char **q;
8241             int total_size = 0;
8242 
8243             argc = 0;
8244             guest_argp = arg2;
8245             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8246                 if (get_user_ual(addr, gp))
8247                     goto efault;
8248                 if (!addr)
8249                     break;
8250                 argc++;
8251             }
8252             envc = 0;
8253             guest_envp = arg3;
8254             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8255                 if (get_user_ual(addr, gp))
8256                     goto efault;
8257                 if (!addr)
8258                     break;
8259                 envc++;
8260             }
8261 
8262             argp = g_new0(char *, argc + 1);
8263             envp = g_new0(char *, envc + 1);
8264 
8265             for (gp = guest_argp, q = argp; gp;
8266                   gp += sizeof(abi_ulong), q++) {
8267                 if (get_user_ual(addr, gp))
8268                     goto execve_efault;
8269                 if (!addr)
8270                     break;
8271                 if (!(*q = lock_user_string(addr)))
8272                     goto execve_efault;
8273                 total_size += strlen(*q) + 1;
8274             }
8275             *q = NULL;
8276 
8277             for (gp = guest_envp, q = envp; gp;
8278                   gp += sizeof(abi_ulong), q++) {
8279                 if (get_user_ual(addr, gp))
8280                     goto execve_efault;
8281                 if (!addr)
8282                     break;
8283                 if (!(*q = lock_user_string(addr)))
8284                     goto execve_efault;
8285                 total_size += strlen(*q) + 1;
8286             }
8287             *q = NULL;
8288 
8289             if (!(p = lock_user_string(arg1)))
8290                 goto execve_efault;
8291             /* Although execve() is not an interruptible syscall it is
8292              * a special case where we must use the safe_syscall wrapper:
8293              * if we allow a signal to happen before we make the host
8294              * syscall then we will 'lose' it, because at the point of
8295              * execve the process leaves QEMU's control. So we use the
8296              * safe syscall wrapper to ensure that we either take the
8297              * signal as a guest signal, or else it does not happen
8298              * before the execve completes and makes it the other
8299              * program's problem.
8300              */
8301             ret = get_errno(safe_execve(p, argp, envp));
8302             unlock_user(p, arg1, 0);
8303 
8304             goto execve_end;
8305 
8306         execve_efault:
8307             ret = -TARGET_EFAULT;
8308 
8309         execve_end:
8310             for (gp = guest_argp, q = argp; *q;
8311                   gp += sizeof(abi_ulong), q++) {
8312                 if (get_user_ual(addr, gp)
8313                     || !addr)
8314                     break;
8315                 unlock_user(*q, addr, 0);
8316             }
8317             for (gp = guest_envp, q = envp; *q;
8318                   gp += sizeof(abi_ulong), q++) {
8319                 if (get_user_ual(addr, gp)
8320                     || !addr)
8321                     break;
8322                 unlock_user(*q, addr, 0);
8323             }
8324 
8325             g_free(argp);
8326             g_free(envp);
8327         }
8328         break;
8329     case TARGET_NR_chdir:
8330         if (!(p = lock_user_string(arg1)))
8331             goto efault;
8332         ret = get_errno(chdir(p));
8333         unlock_user(p, arg1, 0);
8334         break;
8335 #ifdef TARGET_NR_time
8336     case TARGET_NR_time:
8337         {
8338             time_t host_time;
8339             ret = get_errno(time(&host_time));
8340             if (!is_error(ret)
8341                 && arg1
8342                 && put_user_sal(host_time, arg1))
8343                 goto efault;
8344         }
8345         break;
8346 #endif
8347 #ifdef TARGET_NR_mknod
8348     case TARGET_NR_mknod:
8349         if (!(p = lock_user_string(arg1)))
8350             goto efault;
8351         ret = get_errno(mknod(p, arg2, arg3));
8352         unlock_user(p, arg1, 0);
8353         break;
8354 #endif
8355 #if defined(TARGET_NR_mknodat)
8356     case TARGET_NR_mknodat:
8357         if (!(p = lock_user_string(arg2)))
8358             goto efault;
8359         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8360         unlock_user(p, arg2, 0);
8361         break;
8362 #endif
8363 #ifdef TARGET_NR_chmod
8364     case TARGET_NR_chmod:
8365         if (!(p = lock_user_string(arg1)))
8366             goto efault;
8367         ret = get_errno(chmod(p, arg2));
8368         unlock_user(p, arg1, 0);
8369         break;
8370 #endif
8371 #ifdef TARGET_NR_break
8372     case TARGET_NR_break:
8373         goto unimplemented;
8374 #endif
8375 #ifdef TARGET_NR_oldstat
8376     case TARGET_NR_oldstat:
8377         goto unimplemented;
8378 #endif
8379 #ifdef TARGET_NR_lseek
8380     case TARGET_NR_lseek:
8381         ret = get_errno(lseek(arg1, arg2, arg3));
8382         break;
8383 #endif
8384 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8385     /* Alpha specific */
8386     case TARGET_NR_getxpid:
8387         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8388         ret = get_errno(getpid());
8389         break;
8390 #endif
8391 #ifdef TARGET_NR_getpid
8392     case TARGET_NR_getpid:
8393         ret = get_errno(getpid());
8394         break;
8395 #endif
8396     case TARGET_NR_mount:
8397         {
8398             /* need to look at the data field */
8399             void *p2, *p3;
8400 
8401             if (arg1) {
8402                 p = lock_user_string(arg1);
8403                 if (!p) {
8404                     goto efault;
8405                 }
8406             } else {
8407                 p = NULL;
8408             }
8409 
8410             p2 = lock_user_string(arg2);
8411             if (!p2) {
8412                 if (arg1) {
8413                     unlock_user(p, arg1, 0);
8414                 }
8415                 goto efault;
8416             }
8417 
8418             if (arg3) {
8419                 p3 = lock_user_string(arg3);
8420                 if (!p3) {
8421                     if (arg1) {
8422                         unlock_user(p, arg1, 0);
8423                     }
8424                     unlock_user(p2, arg2, 0);
8425                     goto efault;
8426                 }
8427             } else {
8428                 p3 = NULL;
8429             }
8430 
8431             /* FIXME - arg5 should be locked, but it isn't clear how to
8432              * do that since it's not guaranteed to be a NULL-terminated
8433              * string.
8434              */
8435             if (!arg5) {
8436                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8437             } else {
8438                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8439             }
8440             ret = get_errno(ret);
8441 
8442             if (arg1) {
8443                 unlock_user(p, arg1, 0);
8444             }
8445             unlock_user(p2, arg2, 0);
8446             if (arg3) {
8447                 unlock_user(p3, arg3, 0);
8448             }
8449         }
8450         break;
8451 #ifdef TARGET_NR_umount
8452     case TARGET_NR_umount:
8453         if (!(p = lock_user_string(arg1)))
8454             goto efault;
8455         ret = get_errno(umount(p));
8456         unlock_user(p, arg1, 0);
8457         break;
8458 #endif
8459 #ifdef TARGET_NR_stime /* not on alpha */
8460     case TARGET_NR_stime:
8461         {
8462             time_t host_time;
8463             if (get_user_sal(host_time, arg1))
8464                 goto efault;
8465             ret = get_errno(stime(&host_time));
8466         }
8467         break;
8468 #endif
8469     case TARGET_NR_ptrace:
8470         goto unimplemented;
8471 #ifdef TARGET_NR_alarm /* not on alpha */
8472     case TARGET_NR_alarm:
8473         ret = alarm(arg1);
8474         break;
8475 #endif
8476 #ifdef TARGET_NR_oldfstat
8477     case TARGET_NR_oldfstat:
8478         goto unimplemented;
8479 #endif
8480 #ifdef TARGET_NR_pause /* not on alpha */
8481     case TARGET_NR_pause:
8482         if (!block_signals()) {
8483             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8484         }
8485         ret = -TARGET_EINTR;
8486         break;
8487 #endif
8488 #ifdef TARGET_NR_utime
8489     case TARGET_NR_utime:
8490         {
8491             struct utimbuf tbuf, *host_tbuf;
8492             struct target_utimbuf *target_tbuf;
8493             if (arg2) {
8494                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8495                     goto efault;
8496                 tbuf.actime = tswapal(target_tbuf->actime);
8497                 tbuf.modtime = tswapal(target_tbuf->modtime);
8498                 unlock_user_struct(target_tbuf, arg2, 0);
8499                 host_tbuf = &tbuf;
8500             } else {
8501                 host_tbuf = NULL;
8502             }
8503             if (!(p = lock_user_string(arg1)))
8504                 goto efault;
8505             ret = get_errno(utime(p, host_tbuf));
8506             unlock_user(p, arg1, 0);
8507         }
8508         break;
8509 #endif
8510 #ifdef TARGET_NR_utimes
8511     case TARGET_NR_utimes:
8512         {
8513             struct timeval *tvp, tv[2];
8514             if (arg2) {
8515                 if (copy_from_user_timeval(&tv[0], arg2)
8516                     || copy_from_user_timeval(&tv[1],
8517                                               arg2 + sizeof(struct target_timeval)))
8518                     goto efault;
8519                 tvp = tv;
8520             } else {
8521                 tvp = NULL;
8522             }
8523             if (!(p = lock_user_string(arg1)))
8524                 goto efault;
8525             ret = get_errno(utimes(p, tvp));
8526             unlock_user(p, arg1, 0);
8527         }
8528         break;
8529 #endif
8530 #if defined(TARGET_NR_futimesat)
8531     case TARGET_NR_futimesat:
8532         {
8533             struct timeval *tvp, tv[2];
8534             if (arg3) {
8535                 if (copy_from_user_timeval(&tv[0], arg3)
8536                     || copy_from_user_timeval(&tv[1],
8537                                               arg3 + sizeof(struct target_timeval)))
8538                     goto efault;
8539                 tvp = tv;
8540             } else {
8541                 tvp = NULL;
8542             }
8543             if (!(p = lock_user_string(arg2)))
8544                 goto efault;
8545             ret = get_errno(futimesat(arg1, path(p), tvp));
8546             unlock_user(p, arg2, 0);
8547         }
8548         break;
8549 #endif
8550 #ifdef TARGET_NR_stty
8551     case TARGET_NR_stty:
8552         goto unimplemented;
8553 #endif
8554 #ifdef TARGET_NR_gtty
8555     case TARGET_NR_gtty:
8556         goto unimplemented;
8557 #endif
8558 #ifdef TARGET_NR_access
8559     case TARGET_NR_access:
8560         if (!(p = lock_user_string(arg1)))
8561             goto efault;
8562         ret = get_errno(access(path(p), arg2));
8563         unlock_user(p, arg1, 0);
8564         break;
8565 #endif
8566 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8567     case TARGET_NR_faccessat:
8568         if (!(p = lock_user_string(arg2)))
8569             goto efault;
8570         ret = get_errno(faccessat(arg1, p, arg3, 0));
8571         unlock_user(p, arg2, 0);
8572         break;
8573 #endif
8574 #ifdef TARGET_NR_nice /* not on alpha */
8575     case TARGET_NR_nice:
8576         ret = get_errno(nice(arg1));
8577         break;
8578 #endif
8579 #ifdef TARGET_NR_ftime
8580     case TARGET_NR_ftime:
8581         goto unimplemented;
8582 #endif
8583     case TARGET_NR_sync:
8584         sync();
8585         ret = 0;
8586         break;
8587 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8588     case TARGET_NR_syncfs:
8589         ret = get_errno(syncfs(arg1));
8590         break;
8591 #endif
8592     case TARGET_NR_kill:
8593         ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8594         break;
8595 #ifdef TARGET_NR_rename
8596     case TARGET_NR_rename:
8597         {
8598             void *p2;
8599             p = lock_user_string(arg1);
8600             p2 = lock_user_string(arg2);
8601             if (!p || !p2)
8602                 ret = -TARGET_EFAULT;
8603             else
8604                 ret = get_errno(rename(p, p2));
8605             unlock_user(p2, arg2, 0);
8606             unlock_user(p, arg1, 0);
8607         }
8608         break;
8609 #endif
8610 #if defined(TARGET_NR_renameat)
8611     case TARGET_NR_renameat:
8612         {
8613             void *p2;
8614             p  = lock_user_string(arg2);
8615             p2 = lock_user_string(arg4);
8616             if (!p || !p2)
8617                 ret = -TARGET_EFAULT;
8618             else
8619                 ret = get_errno(renameat(arg1, p, arg3, p2));
8620             unlock_user(p2, arg4, 0);
8621             unlock_user(p, arg2, 0);
8622         }
8623         break;
8624 #endif
8625 #if defined(TARGET_NR_renameat2)
8626     case TARGET_NR_renameat2:
8627         {
8628             void *p2;
8629             p  = lock_user_string(arg2);
8630             p2 = lock_user_string(arg4);
8631             if (!p || !p2) {
8632                 ret = -TARGET_EFAULT;
8633             } else {
8634                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8635             }
8636             unlock_user(p2, arg4, 0);
8637             unlock_user(p, arg2, 0);
8638         }
8639         break;
8640 #endif
8641 #ifdef TARGET_NR_mkdir
8642     case TARGET_NR_mkdir:
8643         if (!(p = lock_user_string(arg1)))
8644             goto efault;
8645         ret = get_errno(mkdir(p, arg2));
8646         unlock_user(p, arg1, 0);
8647         break;
8648 #endif
8649 #if defined(TARGET_NR_mkdirat)
8650     case TARGET_NR_mkdirat:
8651         if (!(p = lock_user_string(arg2)))
8652             goto efault;
8653         ret = get_errno(mkdirat(arg1, p, arg3));
8654         unlock_user(p, arg2, 0);
8655         break;
8656 #endif
8657 #ifdef TARGET_NR_rmdir
8658     case TARGET_NR_rmdir:
8659         if (!(p = lock_user_string(arg1)))
8660             goto efault;
8661         ret = get_errno(rmdir(p));
8662         unlock_user(p, arg1, 0);
8663         break;
8664 #endif
8665     case TARGET_NR_dup:
8666         ret = get_errno(dup(arg1));
8667         if (ret >= 0) {
8668             fd_trans_dup(arg1, ret);
8669         }
8670         break;
8671 #ifdef TARGET_NR_pipe
8672     case TARGET_NR_pipe:
8673         ret = do_pipe(cpu_env, arg1, 0, 0);
8674         break;
8675 #endif
8676 #ifdef TARGET_NR_pipe2
8677     case TARGET_NR_pipe2:
8678         ret = do_pipe(cpu_env, arg1,
8679                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8680         break;
8681 #endif
8682     case TARGET_NR_times:
8683         {
8684             struct target_tms *tmsp;
8685             struct tms tms;
8686             ret = get_errno(times(&tms));
8687             if (arg1) {
8688                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8689                 if (!tmsp)
8690                     goto efault;
8691                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8692                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8693                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8694                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8695             }
8696             if (!is_error(ret))
8697                 ret = host_to_target_clock_t(ret);
8698         }
8699         break;
8700 #ifdef TARGET_NR_prof
8701     case TARGET_NR_prof:
8702         goto unimplemented;
8703 #endif
8704 #ifdef TARGET_NR_signal
8705     case TARGET_NR_signal:
8706         goto unimplemented;
8707 #endif
8708     case TARGET_NR_acct:
8709         if (arg1 == 0) {
8710             ret = get_errno(acct(NULL));
8711         } else {
8712             if (!(p = lock_user_string(arg1)))
8713                 goto efault;
8714             ret = get_errno(acct(path(p)));
8715             unlock_user(p, arg1, 0);
8716         }
8717         break;
8718 #ifdef TARGET_NR_umount2
8719     case TARGET_NR_umount2:
8720         if (!(p = lock_user_string(arg1)))
8721             goto efault;
8722         ret = get_errno(umount2(p, arg2));
8723         unlock_user(p, arg1, 0);
8724         break;
8725 #endif
8726 #ifdef TARGET_NR_lock
8727     case TARGET_NR_lock:
8728         goto unimplemented;
8729 #endif
8730     case TARGET_NR_ioctl:
8731         ret = do_ioctl(arg1, arg2, arg3);
8732         break;
8733 #ifdef TARGET_NR_fcntl
8734     case TARGET_NR_fcntl:
8735         ret = do_fcntl(arg1, arg2, arg3);
8736         break;
8737 #endif
8738 #ifdef TARGET_NR_mpx
8739     case TARGET_NR_mpx:
8740         goto unimplemented;
8741 #endif
8742     case TARGET_NR_setpgid:
8743         ret = get_errno(setpgid(arg1, arg2));
8744         break;
8745 #ifdef TARGET_NR_ulimit
8746     case TARGET_NR_ulimit:
8747         goto unimplemented;
8748 #endif
8749 #ifdef TARGET_NR_oldolduname
8750     case TARGET_NR_oldolduname:
8751         goto unimplemented;
8752 #endif
8753     case TARGET_NR_umask:
8754         ret = get_errno(umask(arg1));
8755         break;
8756     case TARGET_NR_chroot:
8757         if (!(p = lock_user_string(arg1)))
8758             goto efault;
8759         ret = get_errno(chroot(p));
8760         unlock_user(p, arg1, 0);
8761         break;
8762 #ifdef TARGET_NR_ustat
8763     case TARGET_NR_ustat:
8764         goto unimplemented;
8765 #endif
8766 #ifdef TARGET_NR_dup2
8767     case TARGET_NR_dup2:
8768         ret = get_errno(dup2(arg1, arg2));
8769         if (ret >= 0) {
8770             fd_trans_dup(arg1, arg2);
8771         }
8772         break;
8773 #endif
8774 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8775     case TARGET_NR_dup3:
8776     {
8777         int host_flags;
8778 
8779         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8780             return -EINVAL;
8781         }
8782         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8783         ret = get_errno(dup3(arg1, arg2, host_flags));
8784         if (ret >= 0) {
8785             fd_trans_dup(arg1, arg2);
8786         }
8787         break;
8788     }
8789 #endif
8790 #ifdef TARGET_NR_getppid /* not on alpha */
8791     case TARGET_NR_getppid:
8792         ret = get_errno(getppid());
8793         break;
8794 #endif
8795 #ifdef TARGET_NR_getpgrp
8796     case TARGET_NR_getpgrp:
8797         ret = get_errno(getpgrp());
8798         break;
8799 #endif
8800     case TARGET_NR_setsid:
8801         ret = get_errno(setsid());
8802         break;
8803 #ifdef TARGET_NR_sigaction
8804     case TARGET_NR_sigaction:
8805         {
8806 #if defined(TARGET_ALPHA)
8807             struct target_sigaction act, oact, *pact = 0;
8808             struct target_old_sigaction *old_act;
8809             if (arg2) {
8810                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8811                     goto efault;
8812                 act._sa_handler = old_act->_sa_handler;
8813                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8814                 act.sa_flags = old_act->sa_flags;
8815                 act.sa_restorer = 0;
8816                 unlock_user_struct(old_act, arg2, 0);
8817                 pact = &act;
8818             }
8819             ret = get_errno(do_sigaction(arg1, pact, &oact));
8820             if (!is_error(ret) && arg3) {
8821                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8822                     goto efault;
8823                 old_act->_sa_handler = oact._sa_handler;
8824                 old_act->sa_mask = oact.sa_mask.sig[0];
8825                 old_act->sa_flags = oact.sa_flags;
8826                 unlock_user_struct(old_act, arg3, 1);
8827             }
8828 #elif defined(TARGET_MIPS)
8829 	    struct target_sigaction act, oact, *pact, *old_act;
8830 
8831 	    if (arg2) {
8832                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8833                     goto efault;
8834 		act._sa_handler = old_act->_sa_handler;
8835 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8836 		act.sa_flags = old_act->sa_flags;
8837 		unlock_user_struct(old_act, arg2, 0);
8838 		pact = &act;
8839 	    } else {
8840 		pact = NULL;
8841 	    }
8842 
8843 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8844 
8845 	    if (!is_error(ret) && arg3) {
8846                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8847                     goto efault;
8848 		old_act->_sa_handler = oact._sa_handler;
8849 		old_act->sa_flags = oact.sa_flags;
8850 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8851 		old_act->sa_mask.sig[1] = 0;
8852 		old_act->sa_mask.sig[2] = 0;
8853 		old_act->sa_mask.sig[3] = 0;
8854 		unlock_user_struct(old_act, arg3, 1);
8855 	    }
8856 #else
8857             struct target_old_sigaction *old_act;
8858             struct target_sigaction act, oact, *pact;
8859             if (arg2) {
8860                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8861                     goto efault;
8862                 act._sa_handler = old_act->_sa_handler;
8863                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8864                 act.sa_flags = old_act->sa_flags;
8865                 act.sa_restorer = old_act->sa_restorer;
8866 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8867                 act.ka_restorer = 0;
8868 #endif
8869                 unlock_user_struct(old_act, arg2, 0);
8870                 pact = &act;
8871             } else {
8872                 pact = NULL;
8873             }
8874             ret = get_errno(do_sigaction(arg1, pact, &oact));
8875             if (!is_error(ret) && arg3) {
8876                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8877                     goto efault;
8878                 old_act->_sa_handler = oact._sa_handler;
8879                 old_act->sa_mask = oact.sa_mask.sig[0];
8880                 old_act->sa_flags = oact.sa_flags;
8881                 old_act->sa_restorer = oact.sa_restorer;
8882                 unlock_user_struct(old_act, arg3, 1);
8883             }
8884 #endif
8885         }
8886         break;
8887 #endif
8888     case TARGET_NR_rt_sigaction:
8889         {
8890 #if defined(TARGET_ALPHA)
8891             /* For Alpha and SPARC this is a 5 argument syscall, with
8892              * a 'restorer' parameter which must be copied into the
8893              * sa_restorer field of the sigaction struct.
8894              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8895              * and arg5 is the sigsetsize.
8896              * Alpha also has a separate rt_sigaction struct that it uses
8897              * here; SPARC uses the usual sigaction struct.
8898              */
8899             struct target_rt_sigaction *rt_act;
8900             struct target_sigaction act, oact, *pact = 0;
8901 
8902             if (arg4 != sizeof(target_sigset_t)) {
8903                 ret = -TARGET_EINVAL;
8904                 break;
8905             }
8906             if (arg2) {
8907                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8908                     goto efault;
8909                 act._sa_handler = rt_act->_sa_handler;
8910                 act.sa_mask = rt_act->sa_mask;
8911                 act.sa_flags = rt_act->sa_flags;
8912                 act.sa_restorer = arg5;
8913                 unlock_user_struct(rt_act, arg2, 0);
8914                 pact = &act;
8915             }
8916             ret = get_errno(do_sigaction(arg1, pact, &oact));
8917             if (!is_error(ret) && arg3) {
8918                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8919                     goto efault;
8920                 rt_act->_sa_handler = oact._sa_handler;
8921                 rt_act->sa_mask = oact.sa_mask;
8922                 rt_act->sa_flags = oact.sa_flags;
8923                 unlock_user_struct(rt_act, arg3, 1);
8924             }
8925 #else
8926 #ifdef TARGET_SPARC
8927             target_ulong restorer = arg4;
8928             target_ulong sigsetsize = arg5;
8929 #else
8930             target_ulong sigsetsize = arg4;
8931 #endif
8932             struct target_sigaction *act;
8933             struct target_sigaction *oact;
8934 
8935             if (sigsetsize != sizeof(target_sigset_t)) {
8936                 ret = -TARGET_EINVAL;
8937                 break;
8938             }
8939             if (arg2) {
8940                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8941                     goto efault;
8942                 }
8943 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8944                 act->ka_restorer = restorer;
8945 #endif
8946             } else {
8947                 act = NULL;
8948             }
8949             if (arg3) {
8950                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8951                     ret = -TARGET_EFAULT;
8952                     goto rt_sigaction_fail;
8953                 }
8954             } else
8955                 oact = NULL;
8956             ret = get_errno(do_sigaction(arg1, act, oact));
8957 	rt_sigaction_fail:
8958             if (act)
8959                 unlock_user_struct(act, arg2, 0);
8960             if (oact)
8961                 unlock_user_struct(oact, arg3, 1);
8962 #endif
8963         }
8964         break;
8965 #ifdef TARGET_NR_sgetmask /* not on alpha */
8966     case TARGET_NR_sgetmask:
8967         {
8968             sigset_t cur_set;
8969             abi_ulong target_set;
8970             ret = do_sigprocmask(0, NULL, &cur_set);
8971             if (!ret) {
8972                 host_to_target_old_sigset(&target_set, &cur_set);
8973                 ret = target_set;
8974             }
8975         }
8976         break;
8977 #endif
8978 #ifdef TARGET_NR_ssetmask /* not on alpha */
8979     case TARGET_NR_ssetmask:
8980         {
8981             sigset_t set, oset;
8982             abi_ulong target_set = arg1;
8983             target_to_host_old_sigset(&set, &target_set);
8984             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8985             if (!ret) {
8986                 host_to_target_old_sigset(&target_set, &oset);
8987                 ret = target_set;
8988             }
8989         }
8990         break;
8991 #endif
8992 #ifdef TARGET_NR_sigprocmask
8993     case TARGET_NR_sigprocmask:
8994         {
8995 #if defined(TARGET_ALPHA)
8996             sigset_t set, oldset;
8997             abi_ulong mask;
8998             int how;
8999 
9000             switch (arg1) {
9001             case TARGET_SIG_BLOCK:
9002                 how = SIG_BLOCK;
9003                 break;
9004             case TARGET_SIG_UNBLOCK:
9005                 how = SIG_UNBLOCK;
9006                 break;
9007             case TARGET_SIG_SETMASK:
9008                 how = SIG_SETMASK;
9009                 break;
9010             default:
9011                 ret = -TARGET_EINVAL;
9012                 goto fail;
9013             }
9014             mask = arg2;
9015             target_to_host_old_sigset(&set, &mask);
9016 
9017             ret = do_sigprocmask(how, &set, &oldset);
9018             if (!is_error(ret)) {
9019                 host_to_target_old_sigset(&mask, &oldset);
9020                 ret = mask;
9021                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9022             }
9023 #else
9024             sigset_t set, oldset, *set_ptr;
9025             int how;
9026 
9027             if (arg2) {
9028                 switch (arg1) {
9029                 case TARGET_SIG_BLOCK:
9030                     how = SIG_BLOCK;
9031                     break;
9032                 case TARGET_SIG_UNBLOCK:
9033                     how = SIG_UNBLOCK;
9034                     break;
9035                 case TARGET_SIG_SETMASK:
9036                     how = SIG_SETMASK;
9037                     break;
9038                 default:
9039                     ret = -TARGET_EINVAL;
9040                     goto fail;
9041                 }
9042                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9043                     goto efault;
9044                 target_to_host_old_sigset(&set, p);
9045                 unlock_user(p, arg2, 0);
9046                 set_ptr = &set;
9047             } else {
9048                 how = 0;
9049                 set_ptr = NULL;
9050             }
9051             ret = do_sigprocmask(how, set_ptr, &oldset);
9052             if (!is_error(ret) && arg3) {
9053                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9054                     goto efault;
9055                 host_to_target_old_sigset(p, &oldset);
9056                 unlock_user(p, arg3, sizeof(target_sigset_t));
9057             }
9058 #endif
9059         }
9060         break;
9061 #endif
9062     case TARGET_NR_rt_sigprocmask:
9063         {
9064             int how = arg1;
9065             sigset_t set, oldset, *set_ptr;
9066 
9067             if (arg4 != sizeof(target_sigset_t)) {
9068                 ret = -TARGET_EINVAL;
9069                 break;
9070             }
9071 
9072             if (arg2) {
9073                 switch(how) {
9074                 case TARGET_SIG_BLOCK:
9075                     how = SIG_BLOCK;
9076                     break;
9077                 case TARGET_SIG_UNBLOCK:
9078                     how = SIG_UNBLOCK;
9079                     break;
9080                 case TARGET_SIG_SETMASK:
9081                     how = SIG_SETMASK;
9082                     break;
9083                 default:
9084                     ret = -TARGET_EINVAL;
9085                     goto fail;
9086                 }
9087                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9088                     goto efault;
9089                 target_to_host_sigset(&set, p);
9090                 unlock_user(p, arg2, 0);
9091                 set_ptr = &set;
9092             } else {
9093                 how = 0;
9094                 set_ptr = NULL;
9095             }
9096             ret = do_sigprocmask(how, set_ptr, &oldset);
9097             if (!is_error(ret) && arg3) {
9098                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9099                     goto efault;
9100                 host_to_target_sigset(p, &oldset);
9101                 unlock_user(p, arg3, sizeof(target_sigset_t));
9102             }
9103         }
9104         break;
9105 #ifdef TARGET_NR_sigpending
9106     case TARGET_NR_sigpending:
9107         {
9108             sigset_t set;
9109             ret = get_errno(sigpending(&set));
9110             if (!is_error(ret)) {
9111                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9112                     goto efault;
9113                 host_to_target_old_sigset(p, &set);
9114                 unlock_user(p, arg1, sizeof(target_sigset_t));
9115             }
9116         }
9117         break;
9118 #endif
9119     case TARGET_NR_rt_sigpending:
9120         {
9121             sigset_t set;
9122 
9123             /* Yes, this check is >, not != like most. We follow the kernel's
9124              * logic and it does it like this because it implements
9125              * NR_sigpending through the same code path, and in that case
9126              * the old_sigset_t is smaller in size.
9127              */
9128             if (arg2 > sizeof(target_sigset_t)) {
9129                 ret = -TARGET_EINVAL;
9130                 break;
9131             }
9132 
9133             ret = get_errno(sigpending(&set));
9134             if (!is_error(ret)) {
9135                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9136                     goto efault;
9137                 host_to_target_sigset(p, &set);
9138                 unlock_user(p, arg1, sizeof(target_sigset_t));
9139             }
9140         }
9141         break;
9142 #ifdef TARGET_NR_sigsuspend
9143     case TARGET_NR_sigsuspend:
9144         {
9145             TaskState *ts = cpu->opaque;
9146 #if defined(TARGET_ALPHA)
9147             abi_ulong mask = arg1;
9148             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9149 #else
9150             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9151                 goto efault;
9152             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9153             unlock_user(p, arg1, 0);
9154 #endif
9155             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9156                                                SIGSET_T_SIZE));
9157             if (ret != -TARGET_ERESTARTSYS) {
9158                 ts->in_sigsuspend = 1;
9159             }
9160         }
9161         break;
9162 #endif
9163     case TARGET_NR_rt_sigsuspend:
9164         {
9165             TaskState *ts = cpu->opaque;
9166 
9167             if (arg2 != sizeof(target_sigset_t)) {
9168                 ret = -TARGET_EINVAL;
9169                 break;
9170             }
9171             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9172                 goto efault;
9173             target_to_host_sigset(&ts->sigsuspend_mask, p);
9174             unlock_user(p, arg1, 0);
9175             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9176                                                SIGSET_T_SIZE));
9177             if (ret != -TARGET_ERESTARTSYS) {
9178                 ts->in_sigsuspend = 1;
9179             }
9180         }
9181         break;
9182     case TARGET_NR_rt_sigtimedwait:
9183         {
9184             sigset_t set;
9185             struct timespec uts, *puts;
9186             siginfo_t uinfo;
9187 
9188             if (arg4 != sizeof(target_sigset_t)) {
9189                 ret = -TARGET_EINVAL;
9190                 break;
9191             }
9192 
9193             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9194                 goto efault;
9195             target_to_host_sigset(&set, p);
9196             unlock_user(p, arg1, 0);
9197             if (arg3) {
9198                 puts = &uts;
9199                 target_to_host_timespec(puts, arg3);
9200             } else {
9201                 puts = NULL;
9202             }
9203             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9204                                                  SIGSET_T_SIZE));
9205             if (!is_error(ret)) {
9206                 if (arg2) {
9207                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9208                                   0);
9209                     if (!p) {
9210                         goto efault;
9211                     }
9212                     host_to_target_siginfo(p, &uinfo);
9213                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9214                 }
9215                 ret = host_to_target_signal(ret);
9216             }
9217         }
9218         break;
9219     case TARGET_NR_rt_sigqueueinfo:
9220         {
9221             siginfo_t uinfo;
9222 
9223             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9224             if (!p) {
9225                 goto efault;
9226             }
9227             target_to_host_siginfo(&uinfo, p);
9228             unlock_user(p, arg3, 0);
9229             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9230         }
9231         break;
9232     case TARGET_NR_rt_tgsigqueueinfo:
9233         {
9234             siginfo_t uinfo;
9235 
9236             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9237             if (!p) {
9238                 goto efault;
9239             }
9240             target_to_host_siginfo(&uinfo, p);
9241             unlock_user(p, arg4, 0);
9242             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9243         }
9244         break;
9245 #ifdef TARGET_NR_sigreturn
9246     case TARGET_NR_sigreturn:
9247         if (block_signals()) {
9248             ret = -TARGET_ERESTARTSYS;
9249         } else {
9250             ret = do_sigreturn(cpu_env);
9251         }
9252         break;
9253 #endif
9254     case TARGET_NR_rt_sigreturn:
9255         if (block_signals()) {
9256             ret = -TARGET_ERESTARTSYS;
9257         } else {
9258             ret = do_rt_sigreturn(cpu_env);
9259         }
9260         break;
9261     case TARGET_NR_sethostname:
9262         if (!(p = lock_user_string(arg1)))
9263             goto efault;
9264         ret = get_errno(sethostname(p, arg2));
9265         unlock_user(p, arg1, 0);
9266         break;
9267 #ifdef TARGET_NR_setrlimit
9268     case TARGET_NR_setrlimit:
9269         {
9270             int resource = target_to_host_resource(arg1);
9271             struct target_rlimit *target_rlim;
9272             struct rlimit rlim;
9273             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9274                 goto efault;
9275             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9276             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9277             unlock_user_struct(target_rlim, arg2, 0);
9278             ret = get_errno(setrlimit(resource, &rlim));
9279         }
9280         break;
9281 #endif
9282 #ifdef TARGET_NR_getrlimit
9283     case TARGET_NR_getrlimit:
9284         {
9285             int resource = target_to_host_resource(arg1);
9286             struct target_rlimit *target_rlim;
9287             struct rlimit rlim;
9288 
9289             ret = get_errno(getrlimit(resource, &rlim));
9290             if (!is_error(ret)) {
9291                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9292                     goto efault;
9293                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9294                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9295                 unlock_user_struct(target_rlim, arg2, 1);
9296             }
9297         }
9298         break;
9299 #endif
9300     case TARGET_NR_getrusage:
9301         {
9302             struct rusage rusage;
9303             ret = get_errno(getrusage(arg1, &rusage));
9304             if (!is_error(ret)) {
9305                 ret = host_to_target_rusage(arg2, &rusage);
9306             }
9307         }
9308         break;
9309     case TARGET_NR_gettimeofday:
9310         {
9311             struct timeval tv;
9312             ret = get_errno(gettimeofday(&tv, NULL));
9313             if (!is_error(ret)) {
9314                 if (copy_to_user_timeval(arg1, &tv))
9315                     goto efault;
9316             }
9317         }
9318         break;
9319     case TARGET_NR_settimeofday:
9320         {
9321             struct timeval tv, *ptv = NULL;
9322             struct timezone tz, *ptz = NULL;
9323 
9324             if (arg1) {
9325                 if (copy_from_user_timeval(&tv, arg1)) {
9326                     goto efault;
9327                 }
9328                 ptv = &tv;
9329             }
9330 
9331             if (arg2) {
9332                 if (copy_from_user_timezone(&tz, arg2)) {
9333                     goto efault;
9334                 }
9335                 ptz = &tz;
9336             }
9337 
9338             ret = get_errno(settimeofday(ptv, ptz));
9339         }
9340         break;
9341 #if defined(TARGET_NR_select)
9342     case TARGET_NR_select:
9343 #if defined(TARGET_WANT_NI_OLD_SELECT)
9344         /* some architectures used to have old_select here
9345          * but now ENOSYS it.
9346          */
9347         ret = -TARGET_ENOSYS;
9348 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9349         ret = do_old_select(arg1);
9350 #else
9351         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9352 #endif
9353         break;
9354 #endif
9355 #ifdef TARGET_NR_pselect6
9356     case TARGET_NR_pselect6:
9357         {
9358             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9359             fd_set rfds, wfds, efds;
9360             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9361             struct timespec ts, *ts_ptr;
9362 
9363             /*
9364              * The 6th arg is actually two args smashed together,
9365              * so we cannot use the C library.
9366              */
9367             sigset_t set;
9368             struct {
9369                 sigset_t *set;
9370                 size_t size;
9371             } sig, *sig_ptr;
9372 
9373             abi_ulong arg_sigset, arg_sigsize, *arg7;
9374             target_sigset_t *target_sigset;
9375 
9376             n = arg1;
9377             rfd_addr = arg2;
9378             wfd_addr = arg3;
9379             efd_addr = arg4;
9380             ts_addr = arg5;
9381 
9382             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9383             if (ret) {
9384                 goto fail;
9385             }
9386             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9387             if (ret) {
9388                 goto fail;
9389             }
9390             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9391             if (ret) {
9392                 goto fail;
9393             }
9394 
9395             /*
9396              * This takes a timespec, and not a timeval, so we cannot
9397              * use the do_select() helper ...
9398              */
9399             if (ts_addr) {
9400                 if (target_to_host_timespec(&ts, ts_addr)) {
9401                     goto efault;
9402                 }
9403                 ts_ptr = &ts;
9404             } else {
9405                 ts_ptr = NULL;
9406             }
9407 
9408             /* Extract the two packed args for the sigset */
9409             if (arg6) {
9410                 sig_ptr = &sig;
9411                 sig.size = SIGSET_T_SIZE;
9412 
9413                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9414                 if (!arg7) {
9415                     goto efault;
9416                 }
9417                 arg_sigset = tswapal(arg7[0]);
9418                 arg_sigsize = tswapal(arg7[1]);
9419                 unlock_user(arg7, arg6, 0);
9420 
9421                 if (arg_sigset) {
9422                     sig.set = &set;
9423                     if (arg_sigsize != sizeof(*target_sigset)) {
9424                         /* Like the kernel, we enforce correct size sigsets */
9425                         ret = -TARGET_EINVAL;
9426                         goto fail;
9427                     }
9428                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9429                                               sizeof(*target_sigset), 1);
9430                     if (!target_sigset) {
9431                         goto efault;
9432                     }
9433                     target_to_host_sigset(&set, target_sigset);
9434                     unlock_user(target_sigset, arg_sigset, 0);
9435                 } else {
9436                     sig.set = NULL;
9437                 }
9438             } else {
9439                 sig_ptr = NULL;
9440             }
9441 
9442             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9443                                           ts_ptr, sig_ptr));
9444 
9445             if (!is_error(ret)) {
9446                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9447                     goto efault;
9448                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9449                     goto efault;
9450                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9451                     goto efault;
9452 
9453                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9454                     goto efault;
9455             }
9456         }
9457         break;
9458 #endif
9459 #ifdef TARGET_NR_symlink
9460     case TARGET_NR_symlink:
9461         {
9462             void *p2;
9463             p = lock_user_string(arg1);
9464             p2 = lock_user_string(arg2);
9465             if (!p || !p2)
9466                 ret = -TARGET_EFAULT;
9467             else
9468                 ret = get_errno(symlink(p, p2));
9469             unlock_user(p2, arg2, 0);
9470             unlock_user(p, arg1, 0);
9471         }
9472         break;
9473 #endif
9474 #if defined(TARGET_NR_symlinkat)
9475     case TARGET_NR_symlinkat:
9476         {
9477             void *p2;
9478             p  = lock_user_string(arg1);
9479             p2 = lock_user_string(arg3);
9480             if (!p || !p2)
9481                 ret = -TARGET_EFAULT;
9482             else
9483                 ret = get_errno(symlinkat(p, arg2, p2));
9484             unlock_user(p2, arg3, 0);
9485             unlock_user(p, arg1, 0);
9486         }
9487         break;
9488 #endif
9489 #ifdef TARGET_NR_oldlstat
9490     case TARGET_NR_oldlstat:
9491         goto unimplemented;
9492 #endif
9493 #ifdef TARGET_NR_readlink
9494     case TARGET_NR_readlink:
9495         {
9496             void *p2;
9497             p = lock_user_string(arg1);
9498             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9499             if (!p || !p2) {
9500                 ret = -TARGET_EFAULT;
9501             } else if (!arg3) {
9502                 /* Short circuit this for the magic exe check. */
9503                 ret = -TARGET_EINVAL;
9504             } else if (is_proc_myself((const char *)p, "exe")) {
9505                 char real[PATH_MAX], *temp;
9506                 temp = realpath(exec_path, real);
9507                 /* Return value is # of bytes that we wrote to the buffer. */
9508                 if (temp == NULL) {
9509                     ret = get_errno(-1);
9510                 } else {
9511                     /* Don't worry about sign mismatch as earlier mapping
9512                      * logic would have thrown a bad address error. */
9513                     ret = MIN(strlen(real), arg3);
9514                     /* We cannot NUL terminate the string. */
9515                     memcpy(p2, real, ret);
9516                 }
9517             } else {
9518                 ret = get_errno(readlink(path(p), p2, arg3));
9519             }
9520             unlock_user(p2, arg2, ret);
9521             unlock_user(p, arg1, 0);
9522         }
9523         break;
9524 #endif
9525 #if defined(TARGET_NR_readlinkat)
9526     case TARGET_NR_readlinkat:
9527         {
9528             void *p2;
9529             p  = lock_user_string(arg2);
9530             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9531             if (!p || !p2) {
9532                 ret = -TARGET_EFAULT;
9533             } else if (is_proc_myself((const char *)p, "exe")) {
9534                 char real[PATH_MAX], *temp;
9535                 temp = realpath(exec_path, real);
9536                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9537                 snprintf((char *)p2, arg4, "%s", real);
9538             } else {
9539                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9540             }
9541             unlock_user(p2, arg3, ret);
9542             unlock_user(p, arg2, 0);
9543         }
9544         break;
9545 #endif
9546 #ifdef TARGET_NR_uselib
9547     case TARGET_NR_uselib:
9548         goto unimplemented;
9549 #endif
9550 #ifdef TARGET_NR_swapon
9551     case TARGET_NR_swapon:
9552         if (!(p = lock_user_string(arg1)))
9553             goto efault;
9554         ret = get_errno(swapon(p, arg2));
9555         unlock_user(p, arg1, 0);
9556         break;
9557 #endif
9558     case TARGET_NR_reboot:
9559         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9560            /* arg4 must be ignored in all other cases */
9561            p = lock_user_string(arg4);
9562            if (!p) {
9563               goto efault;
9564            }
9565            ret = get_errno(reboot(arg1, arg2, arg3, p));
9566            unlock_user(p, arg4, 0);
9567         } else {
9568            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9569         }
9570         break;
9571 #ifdef TARGET_NR_readdir
9572     case TARGET_NR_readdir:
9573         goto unimplemented;
9574 #endif
9575 #ifdef TARGET_NR_mmap
9576     case TARGET_NR_mmap:
9577 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9578     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9579     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9580     || defined(TARGET_S390X)
9581         {
9582             abi_ulong *v;
9583             abi_ulong v1, v2, v3, v4, v5, v6;
9584             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9585                 goto efault;
9586             v1 = tswapal(v[0]);
9587             v2 = tswapal(v[1]);
9588             v3 = tswapal(v[2]);
9589             v4 = tswapal(v[3]);
9590             v5 = tswapal(v[4]);
9591             v6 = tswapal(v[5]);
9592             unlock_user(v, arg1, 0);
9593             ret = get_errno(target_mmap(v1, v2, v3,
9594                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9595                                         v5, v6));
9596         }
9597 #else
9598         ret = get_errno(target_mmap(arg1, arg2, arg3,
9599                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9600                                     arg5,
9601                                     arg6));
9602 #endif
9603         break;
9604 #endif
9605 #ifdef TARGET_NR_mmap2
9606     case TARGET_NR_mmap2:
9607 #ifndef MMAP_SHIFT
9608 #define MMAP_SHIFT 12
9609 #endif
9610         ret = get_errno(target_mmap(arg1, arg2, arg3,
9611                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9612                                     arg5,
9613                                     arg6 << MMAP_SHIFT));
9614         break;
9615 #endif
9616     case TARGET_NR_munmap:
9617         ret = get_errno(target_munmap(arg1, arg2));
9618         break;
9619     case TARGET_NR_mprotect:
9620         {
9621             TaskState *ts = cpu->opaque;
9622             /* Special hack to detect libc making the stack executable.  */
9623             if ((arg3 & PROT_GROWSDOWN)
9624                 && arg1 >= ts->info->stack_limit
9625                 && arg1 <= ts->info->start_stack) {
9626                 arg3 &= ~PROT_GROWSDOWN;
9627                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9628                 arg1 = ts->info->stack_limit;
9629             }
9630         }
9631         ret = get_errno(target_mprotect(arg1, arg2, arg3));
9632         break;
9633 #ifdef TARGET_NR_mremap
9634     case TARGET_NR_mremap:
9635         ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9636         break;
9637 #endif
9638         /* ??? msync/mlock/munlock are broken for softmmu.  */
9639 #ifdef TARGET_NR_msync
9640     case TARGET_NR_msync:
9641         ret = get_errno(msync(g2h(arg1), arg2, arg3));
9642         break;
9643 #endif
9644 #ifdef TARGET_NR_mlock
9645     case TARGET_NR_mlock:
9646         ret = get_errno(mlock(g2h(arg1), arg2));
9647         break;
9648 #endif
9649 #ifdef TARGET_NR_munlock
9650     case TARGET_NR_munlock:
9651         ret = get_errno(munlock(g2h(arg1), arg2));
9652         break;
9653 #endif
9654 #ifdef TARGET_NR_mlockall
9655     case TARGET_NR_mlockall:
9656         ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9657         break;
9658 #endif
9659 #ifdef TARGET_NR_munlockall
9660     case TARGET_NR_munlockall:
9661         ret = get_errno(munlockall());
9662         break;
9663 #endif
9664 #ifdef TARGET_NR_truncate
9665     case TARGET_NR_truncate:
9666         if (!(p = lock_user_string(arg1)))
9667             goto efault;
9668         ret = get_errno(truncate(p, arg2));
9669         unlock_user(p, arg1, 0);
9670         break;
9671 #endif
9672 #ifdef TARGET_NR_ftruncate
9673     case TARGET_NR_ftruncate:
9674         ret = get_errno(ftruncate(arg1, arg2));
9675         break;
9676 #endif
9677     case TARGET_NR_fchmod:
9678         ret = get_errno(fchmod(arg1, arg2));
9679         break;
9680 #if defined(TARGET_NR_fchmodat)
9681     case TARGET_NR_fchmodat:
9682         if (!(p = lock_user_string(arg2)))
9683             goto efault;
9684         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9685         unlock_user(p, arg2, 0);
9686         break;
9687 #endif
9688     case TARGET_NR_getpriority:
9689         /* Note that negative values are valid for getpriority, so we must
9690            differentiate based on errno settings.  */
9691         errno = 0;
9692         ret = getpriority(arg1, arg2);
9693         if (ret == -1 && errno != 0) {
9694             ret = -host_to_target_errno(errno);
9695             break;
9696         }
9697 #ifdef TARGET_ALPHA
9698         /* Return value is the unbiased priority.  Signal no error.  */
9699         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9700 #else
9701         /* Return value is a biased priority to avoid negative numbers.  */
9702         ret = 20 - ret;
9703 #endif
9704         break;
9705     case TARGET_NR_setpriority:
9706         ret = get_errno(setpriority(arg1, arg2, arg3));
9707         break;
9708 #ifdef TARGET_NR_profil
9709     case TARGET_NR_profil:
9710         goto unimplemented;
9711 #endif
9712 #ifdef TARGET_NR_statfs
9713     case TARGET_NR_statfs:
9714         if (!(p = lock_user_string(arg1)))
9715             goto efault;
9716         ret = get_errno(statfs(path(p), &stfs));
9717         unlock_user(p, arg1, 0);
9718     convert_statfs:
9719         if (!is_error(ret)) {
9720             struct target_statfs *target_stfs;
9721 
9722             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9723                 goto efault;
9724             __put_user(stfs.f_type, &target_stfs->f_type);
9725             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9726             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9727             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9728             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9729             __put_user(stfs.f_files, &target_stfs->f_files);
9730             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9731             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9732             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9733             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9734             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9735 #ifdef _STATFS_F_FLAGS
9736             __put_user(stfs.f_flags, &target_stfs->f_flags);
9737 #else
9738             __put_user(0, &target_stfs->f_flags);
9739 #endif
9740             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9741             unlock_user_struct(target_stfs, arg2, 1);
9742         }
9743         break;
9744 #endif
9745 #ifdef TARGET_NR_fstatfs
9746     case TARGET_NR_fstatfs:
9747         ret = get_errno(fstatfs(arg1, &stfs));
9748         goto convert_statfs;
9749 #endif
9750 #ifdef TARGET_NR_statfs64
9751     case TARGET_NR_statfs64:
9752         if (!(p = lock_user_string(arg1)))
9753             goto efault;
9754         ret = get_errno(statfs(path(p), &stfs));
9755         unlock_user(p, arg1, 0);
9756     convert_statfs64:
9757         if (!is_error(ret)) {
9758             struct target_statfs64 *target_stfs;
9759 
9760             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9761                 goto efault;
9762             __put_user(stfs.f_type, &target_stfs->f_type);
9763             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9764             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9765             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9766             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9767             __put_user(stfs.f_files, &target_stfs->f_files);
9768             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9769             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9770             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9771             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9772             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9773             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9774             unlock_user_struct(target_stfs, arg3, 1);
9775         }
9776         break;
9777     case TARGET_NR_fstatfs64:
9778         ret = get_errno(fstatfs(arg1, &stfs));
9779         goto convert_statfs64;
9780 #endif
9781 #ifdef TARGET_NR_ioperm
9782     case TARGET_NR_ioperm:
9783         goto unimplemented;
9784 #endif
9785 #ifdef TARGET_NR_socketcall
9786     case TARGET_NR_socketcall:
9787         ret = do_socketcall(arg1, arg2);
9788         break;
9789 #endif
9790 #ifdef TARGET_NR_accept
9791     case TARGET_NR_accept:
9792         ret = do_accept4(arg1, arg2, arg3, 0);
9793         break;
9794 #endif
9795 #ifdef TARGET_NR_accept4
9796     case TARGET_NR_accept4:
9797         ret = do_accept4(arg1, arg2, arg3, arg4);
9798         break;
9799 #endif
9800 #ifdef TARGET_NR_bind
9801     case TARGET_NR_bind:
9802         ret = do_bind(arg1, arg2, arg3);
9803         break;
9804 #endif
9805 #ifdef TARGET_NR_connect
9806     case TARGET_NR_connect:
9807         ret = do_connect(arg1, arg2, arg3);
9808         break;
9809 #endif
9810 #ifdef TARGET_NR_getpeername
9811     case TARGET_NR_getpeername:
9812         ret = do_getpeername(arg1, arg2, arg3);
9813         break;
9814 #endif
9815 #ifdef TARGET_NR_getsockname
9816     case TARGET_NR_getsockname:
9817         ret = do_getsockname(arg1, arg2, arg3);
9818         break;
9819 #endif
9820 #ifdef TARGET_NR_getsockopt
9821     case TARGET_NR_getsockopt:
9822         ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9823         break;
9824 #endif
9825 #ifdef TARGET_NR_listen
9826     case TARGET_NR_listen:
9827         ret = get_errno(listen(arg1, arg2));
9828         break;
9829 #endif
9830 #ifdef TARGET_NR_recv
9831     case TARGET_NR_recv:
9832         ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9833         break;
9834 #endif
9835 #ifdef TARGET_NR_recvfrom
9836     case TARGET_NR_recvfrom:
9837         ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9838         break;
9839 #endif
9840 #ifdef TARGET_NR_recvmsg
9841     case TARGET_NR_recvmsg:
9842         ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9843         break;
9844 #endif
9845 #ifdef TARGET_NR_send
9846     case TARGET_NR_send:
9847         ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9848         break;
9849 #endif
9850 #ifdef TARGET_NR_sendmsg
9851     case TARGET_NR_sendmsg:
9852         ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9853         break;
9854 #endif
9855 #ifdef TARGET_NR_sendmmsg
9856     case TARGET_NR_sendmmsg:
9857         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9858         break;
9859     case TARGET_NR_recvmmsg:
9860         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9861         break;
9862 #endif
9863 #ifdef TARGET_NR_sendto
9864     case TARGET_NR_sendto:
9865         ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9866         break;
9867 #endif
9868 #ifdef TARGET_NR_shutdown
9869     case TARGET_NR_shutdown:
9870         ret = get_errno(shutdown(arg1, arg2));
9871         break;
9872 #endif
9873 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9874     case TARGET_NR_getrandom:
9875         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9876         if (!p) {
9877             goto efault;
9878         }
9879         ret = get_errno(getrandom(p, arg2, arg3));
9880         unlock_user(p, arg1, ret);
9881         break;
9882 #endif
9883 #ifdef TARGET_NR_socket
9884     case TARGET_NR_socket:
9885         ret = do_socket(arg1, arg2, arg3);
9886         break;
9887 #endif
9888 #ifdef TARGET_NR_socketpair
9889     case TARGET_NR_socketpair:
9890         ret = do_socketpair(arg1, arg2, arg3, arg4);
9891         break;
9892 #endif
9893 #ifdef TARGET_NR_setsockopt
9894     case TARGET_NR_setsockopt:
9895         ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9896         break;
9897 #endif
9898 #if defined(TARGET_NR_syslog)
9899     case TARGET_NR_syslog:
9900         {
9901             int len = arg2;
9902 
9903             switch (arg1) {
9904             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9905             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9906             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9907             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9908             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9909             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9910             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9911             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9912                 {
9913                     ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9914                 }
9915                 break;
9916             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9917             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9918             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9919                 {
9920                     ret = -TARGET_EINVAL;
9921                     if (len < 0) {
9922                         goto fail;
9923                     }
9924                     ret = 0;
9925                     if (len == 0) {
9926                         break;
9927                     }
9928                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9929                     if (!p) {
9930                         ret = -TARGET_EFAULT;
9931                         goto fail;
9932                     }
9933                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9934                     unlock_user(p, arg2, arg3);
9935                 }
9936                 break;
9937             default:
9938                 ret = -EINVAL;
9939                 break;
9940             }
9941         }
9942         break;
9943 #endif
9944     case TARGET_NR_setitimer:
9945         {
9946             struct itimerval value, ovalue, *pvalue;
9947 
9948             if (arg2) {
9949                 pvalue = &value;
9950                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9951                     || copy_from_user_timeval(&pvalue->it_value,
9952                                               arg2 + sizeof(struct target_timeval)))
9953                     goto efault;
9954             } else {
9955                 pvalue = NULL;
9956             }
9957             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9958             if (!is_error(ret) && arg3) {
9959                 if (copy_to_user_timeval(arg3,
9960                                          &ovalue.it_interval)
9961                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9962                                             &ovalue.it_value))
9963                     goto efault;
9964             }
9965         }
9966         break;
9967     case TARGET_NR_getitimer:
9968         {
9969             struct itimerval value;
9970 
9971             ret = get_errno(getitimer(arg1, &value));
9972             if (!is_error(ret) && arg2) {
9973                 if (copy_to_user_timeval(arg2,
9974                                          &value.it_interval)
9975                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9976                                             &value.it_value))
9977                     goto efault;
9978             }
9979         }
9980         break;
9981 #ifdef TARGET_NR_stat
9982     case TARGET_NR_stat:
9983         if (!(p = lock_user_string(arg1)))
9984             goto efault;
9985         ret = get_errno(stat(path(p), &st));
9986         unlock_user(p, arg1, 0);
9987         goto do_stat;
9988 #endif
9989 #ifdef TARGET_NR_lstat
9990     case TARGET_NR_lstat:
9991         if (!(p = lock_user_string(arg1)))
9992             goto efault;
9993         ret = get_errno(lstat(path(p), &st));
9994         unlock_user(p, arg1, 0);
9995         goto do_stat;
9996 #endif
9997 #ifdef TARGET_NR_fstat
9998     case TARGET_NR_fstat:
9999         {
10000             ret = get_errno(fstat(arg1, &st));
10001 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10002         do_stat:
10003 #endif
10004             if (!is_error(ret)) {
10005                 struct target_stat *target_st;
10006 
10007                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10008                     goto efault;
10009                 memset(target_st, 0, sizeof(*target_st));
10010                 __put_user(st.st_dev, &target_st->st_dev);
10011                 __put_user(st.st_ino, &target_st->st_ino);
10012                 __put_user(st.st_mode, &target_st->st_mode);
10013                 __put_user(st.st_uid, &target_st->st_uid);
10014                 __put_user(st.st_gid, &target_st->st_gid);
10015                 __put_user(st.st_nlink, &target_st->st_nlink);
10016                 __put_user(st.st_rdev, &target_st->st_rdev);
10017                 __put_user(st.st_size, &target_st->st_size);
10018                 __put_user(st.st_blksize, &target_st->st_blksize);
10019                 __put_user(st.st_blocks, &target_st->st_blocks);
10020                 __put_user(st.st_atime, &target_st->target_st_atime);
10021                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10022                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10023                 unlock_user_struct(target_st, arg2, 1);
10024             }
10025         }
10026         break;
10027 #endif
10028 #ifdef TARGET_NR_olduname
10029     case TARGET_NR_olduname:
10030         goto unimplemented;
10031 #endif
10032 #ifdef TARGET_NR_iopl
10033     case TARGET_NR_iopl:
10034         goto unimplemented;
10035 #endif
10036     case TARGET_NR_vhangup:
10037         ret = get_errno(vhangup());
10038         break;
10039 #ifdef TARGET_NR_idle
10040     case TARGET_NR_idle:
10041         goto unimplemented;
10042 #endif
10043 #ifdef TARGET_NR_syscall
10044     case TARGET_NR_syscall:
10045         ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10046                          arg6, arg7, arg8, 0);
10047         break;
10048 #endif
10049     case TARGET_NR_wait4:
10050         {
10051             int status;
10052             abi_long status_ptr = arg2;
10053             struct rusage rusage, *rusage_ptr;
10054             abi_ulong target_rusage = arg4;
10055             abi_long rusage_err;
10056             if (target_rusage)
10057                 rusage_ptr = &rusage;
10058             else
10059                 rusage_ptr = NULL;
10060             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10061             if (!is_error(ret)) {
10062                 if (status_ptr && ret) {
10063                     status = host_to_target_waitstatus(status);
10064                     if (put_user_s32(status, status_ptr))
10065                         goto efault;
10066                 }
10067                 if (target_rusage) {
10068                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10069                     if (rusage_err) {
10070                         ret = rusage_err;
10071                     }
10072                 }
10073             }
10074         }
10075         break;
10076 #ifdef TARGET_NR_swapoff
10077     case TARGET_NR_swapoff:
10078         if (!(p = lock_user_string(arg1)))
10079             goto efault;
10080         ret = get_errno(swapoff(p));
10081         unlock_user(p, arg1, 0);
10082         break;
10083 #endif
10084     case TARGET_NR_sysinfo:
10085         {
10086             struct target_sysinfo *target_value;
10087             struct sysinfo value;
10088             ret = get_errno(sysinfo(&value));
10089             if (!is_error(ret) && arg1)
10090             {
10091                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10092                     goto efault;
10093                 __put_user(value.uptime, &target_value->uptime);
10094                 __put_user(value.loads[0], &target_value->loads[0]);
10095                 __put_user(value.loads[1], &target_value->loads[1]);
10096                 __put_user(value.loads[2], &target_value->loads[2]);
10097                 __put_user(value.totalram, &target_value->totalram);
10098                 __put_user(value.freeram, &target_value->freeram);
10099                 __put_user(value.sharedram, &target_value->sharedram);
10100                 __put_user(value.bufferram, &target_value->bufferram);
10101                 __put_user(value.totalswap, &target_value->totalswap);
10102                 __put_user(value.freeswap, &target_value->freeswap);
10103                 __put_user(value.procs, &target_value->procs);
10104                 __put_user(value.totalhigh, &target_value->totalhigh);
10105                 __put_user(value.freehigh, &target_value->freehigh);
10106                 __put_user(value.mem_unit, &target_value->mem_unit);
10107                 unlock_user_struct(target_value, arg1, 1);
10108             }
10109         }
10110         break;
10111 #ifdef TARGET_NR_ipc
10112     case TARGET_NR_ipc:
10113         ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10114         break;
10115 #endif
10116 #ifdef TARGET_NR_semget
10117     case TARGET_NR_semget:
10118         ret = get_errno(semget(arg1, arg2, arg3));
10119         break;
10120 #endif
10121 #ifdef TARGET_NR_semop
10122     case TARGET_NR_semop:
10123         ret = do_semop(arg1, arg2, arg3);
10124         break;
10125 #endif
10126 #ifdef TARGET_NR_semctl
10127     case TARGET_NR_semctl:
10128         ret = do_semctl(arg1, arg2, arg3, arg4);
10129         break;
10130 #endif
10131 #ifdef TARGET_NR_msgctl
10132     case TARGET_NR_msgctl:
10133         ret = do_msgctl(arg1, arg2, arg3);
10134         break;
10135 #endif
10136 #ifdef TARGET_NR_msgget
10137     case TARGET_NR_msgget:
10138         ret = get_errno(msgget(arg1, arg2));
10139         break;
10140 #endif
10141 #ifdef TARGET_NR_msgrcv
10142     case TARGET_NR_msgrcv:
10143         ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10144         break;
10145 #endif
10146 #ifdef TARGET_NR_msgsnd
10147     case TARGET_NR_msgsnd:
10148         ret = do_msgsnd(arg1, arg2, arg3, arg4);
10149         break;
10150 #endif
10151 #ifdef TARGET_NR_shmget
10152     case TARGET_NR_shmget:
10153         ret = get_errno(shmget(arg1, arg2, arg3));
10154         break;
10155 #endif
10156 #ifdef TARGET_NR_shmctl
10157     case TARGET_NR_shmctl:
10158         ret = do_shmctl(arg1, arg2, arg3);
10159         break;
10160 #endif
10161 #ifdef TARGET_NR_shmat
10162     case TARGET_NR_shmat:
10163         ret = do_shmat(cpu_env, arg1, arg2, arg3);
10164         break;
10165 #endif
10166 #ifdef TARGET_NR_shmdt
10167     case TARGET_NR_shmdt:
10168         ret = do_shmdt(arg1);
10169         break;
10170 #endif
10171     case TARGET_NR_fsync:
10172         ret = get_errno(fsync(arg1));
10173         break;
10174     case TARGET_NR_clone:
10175         /* Linux manages to have three different orderings for its
10176          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10177          * match the kernel's CONFIG_CLONE_* settings.
10178          * Microblaze is further special in that it uses a sixth
10179          * implicit argument to clone for the TLS pointer.
10180          */
10181 #if defined(TARGET_MICROBLAZE)
10182         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10183 #elif defined(TARGET_CLONE_BACKWARDS)
10184         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10185 #elif defined(TARGET_CLONE_BACKWARDS2)
10186         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10187 #else
10188         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10189 #endif
10190         break;
10191 #ifdef __NR_exit_group
10192         /* new thread calls */
10193     case TARGET_NR_exit_group:
10194         preexit_cleanup(cpu_env, arg1);
10195         ret = get_errno(exit_group(arg1));
10196         break;
10197 #endif
10198     case TARGET_NR_setdomainname:
10199         if (!(p = lock_user_string(arg1)))
10200             goto efault;
10201         ret = get_errno(setdomainname(p, arg2));
10202         unlock_user(p, arg1, 0);
10203         break;
10204     case TARGET_NR_uname:
10205         /* no need to transcode because we use the linux syscall */
10206         {
10207             struct new_utsname * buf;
10208 
10209             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10210                 goto efault;
10211             ret = get_errno(sys_uname(buf));
10212             if (!is_error(ret)) {
10213                 /* Overwrite the native machine name with whatever is being
10214                    emulated. */
10215                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10216                           sizeof(buf->machine));
10217                 /* Allow the user to override the reported release.  */
10218                 if (qemu_uname_release && *qemu_uname_release) {
10219                     g_strlcpy(buf->release, qemu_uname_release,
10220                               sizeof(buf->release));
10221                 }
10222             }
10223             unlock_user_struct(buf, arg1, 1);
10224         }
10225         break;
10226 #ifdef TARGET_I386
10227     case TARGET_NR_modify_ldt:
10228         ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10229         break;
10230 #if !defined(TARGET_X86_64)
10231     case TARGET_NR_vm86old:
10232         goto unimplemented;
10233     case TARGET_NR_vm86:
10234         ret = do_vm86(cpu_env, arg1, arg2);
10235         break;
10236 #endif
10237 #endif
10238     case TARGET_NR_adjtimex:
10239         {
10240             struct timex host_buf;
10241 
10242             if (target_to_host_timex(&host_buf, arg1) != 0) {
10243                 goto efault;
10244             }
10245             ret = get_errno(adjtimex(&host_buf));
10246             if (!is_error(ret)) {
10247                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10248                     goto efault;
10249                 }
10250             }
10251         }
10252         break;
10253 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10254     case TARGET_NR_clock_adjtime:
10255         {
10256             struct timex htx, *phtx = &htx;
10257 
10258             if (target_to_host_timex(phtx, arg2) != 0) {
10259                 goto efault;
10260             }
10261             ret = get_errno(clock_adjtime(arg1, phtx));
10262             if (!is_error(ret) && phtx) {
10263                 if (host_to_target_timex(arg2, phtx) != 0) {
10264                     goto efault;
10265                 }
10266             }
10267         }
10268         break;
10269 #endif
10270 #ifdef TARGET_NR_create_module
10271     case TARGET_NR_create_module:
10272 #endif
10273     case TARGET_NR_init_module:
10274     case TARGET_NR_delete_module:
10275 #ifdef TARGET_NR_get_kernel_syms
10276     case TARGET_NR_get_kernel_syms:
10277 #endif
10278         goto unimplemented;
10279     case TARGET_NR_quotactl:
10280         goto unimplemented;
10281     case TARGET_NR_getpgid:
10282         ret = get_errno(getpgid(arg1));
10283         break;
10284     case TARGET_NR_fchdir:
10285         ret = get_errno(fchdir(arg1));
10286         break;
10287 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10288     case TARGET_NR_bdflush:
10289         goto unimplemented;
10290 #endif
10291 #ifdef TARGET_NR_sysfs
10292     case TARGET_NR_sysfs:
10293         goto unimplemented;
10294 #endif
10295     case TARGET_NR_personality:
10296         ret = get_errno(personality(arg1));
10297         break;
10298 #ifdef TARGET_NR_afs_syscall
10299     case TARGET_NR_afs_syscall:
10300         goto unimplemented;
10301 #endif
10302 #ifdef TARGET_NR__llseek /* Not on alpha */
10303     case TARGET_NR__llseek:
10304         {
10305             int64_t res;
10306 #if !defined(__NR_llseek)
10307             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10308             if (res == -1) {
10309                 ret = get_errno(res);
10310             } else {
10311                 ret = 0;
10312             }
10313 #else
10314             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10315 #endif
10316             if ((ret == 0) && put_user_s64(res, arg4)) {
10317                 goto efault;
10318             }
10319         }
10320         break;
10321 #endif
10322 #ifdef TARGET_NR_getdents
10323     case TARGET_NR_getdents:
10324 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10325 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10326         {
10327             struct target_dirent *target_dirp;
10328             struct linux_dirent *dirp;
10329             abi_long count = arg3;
10330 
10331             dirp = g_try_malloc(count);
10332             if (!dirp) {
10333                 ret = -TARGET_ENOMEM;
10334                 goto fail;
10335             }
10336 
10337             ret = get_errno(sys_getdents(arg1, dirp, count));
10338             if (!is_error(ret)) {
10339                 struct linux_dirent *de;
10340 		struct target_dirent *tde;
10341                 int len = ret;
10342                 int reclen, treclen;
10343 		int count1, tnamelen;
10344 
10345 		count1 = 0;
10346                 de = dirp;
10347                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10348                     goto efault;
10349 		tde = target_dirp;
10350                 while (len > 0) {
10351                     reclen = de->d_reclen;
10352                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10353                     assert(tnamelen >= 0);
10354                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10355                     assert(count1 + treclen <= count);
10356                     tde->d_reclen = tswap16(treclen);
10357                     tde->d_ino = tswapal(de->d_ino);
10358                     tde->d_off = tswapal(de->d_off);
10359                     memcpy(tde->d_name, de->d_name, tnamelen);
10360                     de = (struct linux_dirent *)((char *)de + reclen);
10361                     len -= reclen;
10362                     tde = (struct target_dirent *)((char *)tde + treclen);
10363 		    count1 += treclen;
10364                 }
10365 		ret = count1;
10366                 unlock_user(target_dirp, arg2, ret);
10367             }
10368             g_free(dirp);
10369         }
10370 #else
10371         {
10372             struct linux_dirent *dirp;
10373             abi_long count = arg3;
10374 
10375             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10376                 goto efault;
10377             ret = get_errno(sys_getdents(arg1, dirp, count));
10378             if (!is_error(ret)) {
10379                 struct linux_dirent *de;
10380                 int len = ret;
10381                 int reclen;
10382                 de = dirp;
10383                 while (len > 0) {
10384                     reclen = de->d_reclen;
10385                     if (reclen > len)
10386                         break;
10387                     de->d_reclen = tswap16(reclen);
10388                     tswapls(&de->d_ino);
10389                     tswapls(&de->d_off);
10390                     de = (struct linux_dirent *)((char *)de + reclen);
10391                     len -= reclen;
10392                 }
10393             }
10394             unlock_user(dirp, arg2, ret);
10395         }
10396 #endif
10397 #else
10398         /* Implement getdents in terms of getdents64 */
10399         {
10400             struct linux_dirent64 *dirp;
10401             abi_long count = arg3;
10402 
10403             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10404             if (!dirp) {
10405                 goto efault;
10406             }
10407             ret = get_errno(sys_getdents64(arg1, dirp, count));
10408             if (!is_error(ret)) {
10409                 /* Convert the dirent64 structs to target dirent.  We do this
10410                  * in-place, since we can guarantee that a target_dirent is no
10411                  * larger than a dirent64; however this means we have to be
10412                  * careful to read everything before writing in the new format.
10413                  */
10414                 struct linux_dirent64 *de;
10415                 struct target_dirent *tde;
10416                 int len = ret;
10417                 int tlen = 0;
10418 
10419                 de = dirp;
10420                 tde = (struct target_dirent *)dirp;
10421                 while (len > 0) {
10422                     int namelen, treclen;
10423                     int reclen = de->d_reclen;
10424                     uint64_t ino = de->d_ino;
10425                     int64_t off = de->d_off;
10426                     uint8_t type = de->d_type;
10427 
10428                     namelen = strlen(de->d_name);
10429                     treclen = offsetof(struct target_dirent, d_name)
10430                         + namelen + 2;
10431                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10432 
10433                     memmove(tde->d_name, de->d_name, namelen + 1);
10434                     tde->d_ino = tswapal(ino);
10435                     tde->d_off = tswapal(off);
10436                     tde->d_reclen = tswap16(treclen);
10437                     /* The target_dirent type is in what was formerly a padding
10438                      * byte at the end of the structure:
10439                      */
10440                     *(((char *)tde) + treclen - 1) = type;
10441 
10442                     de = (struct linux_dirent64 *)((char *)de + reclen);
10443                     tde = (struct target_dirent *)((char *)tde + treclen);
10444                     len -= reclen;
10445                     tlen += treclen;
10446                 }
10447                 ret = tlen;
10448             }
10449             unlock_user(dirp, arg2, ret);
10450         }
10451 #endif
10452         break;
10453 #endif /* TARGET_NR_getdents */
10454 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10455     case TARGET_NR_getdents64:
10456         {
10457             struct linux_dirent64 *dirp;
10458             abi_long count = arg3;
10459             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10460                 goto efault;
10461             ret = get_errno(sys_getdents64(arg1, dirp, count));
10462             if (!is_error(ret)) {
10463                 struct linux_dirent64 *de;
10464                 int len = ret;
10465                 int reclen;
10466                 de = dirp;
10467                 while (len > 0) {
10468                     reclen = de->d_reclen;
10469                     if (reclen > len)
10470                         break;
10471                     de->d_reclen = tswap16(reclen);
10472                     tswap64s((uint64_t *)&de->d_ino);
10473                     tswap64s((uint64_t *)&de->d_off);
10474                     de = (struct linux_dirent64 *)((char *)de + reclen);
10475                     len -= reclen;
10476                 }
10477             }
10478             unlock_user(dirp, arg2, ret);
10479         }
10480         break;
10481 #endif /* TARGET_NR_getdents64 */
10482 #if defined(TARGET_NR__newselect)
10483     case TARGET_NR__newselect:
10484         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10485         break;
10486 #endif
10487 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10488 # ifdef TARGET_NR_poll
10489     case TARGET_NR_poll:
10490 # endif
10491 # ifdef TARGET_NR_ppoll
10492     case TARGET_NR_ppoll:
10493 # endif
10494         {
10495             struct target_pollfd *target_pfd;
10496             unsigned int nfds = arg2;
10497             struct pollfd *pfd;
10498             unsigned int i;
10499 
10500             pfd = NULL;
10501             target_pfd = NULL;
10502             if (nfds) {
10503                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10504                     ret = -TARGET_EINVAL;
10505                     break;
10506                 }
10507 
10508                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10509                                        sizeof(struct target_pollfd) * nfds, 1);
10510                 if (!target_pfd) {
10511                     goto efault;
10512                 }
10513 
10514                 pfd = alloca(sizeof(struct pollfd) * nfds);
10515                 for (i = 0; i < nfds; i++) {
10516                     pfd[i].fd = tswap32(target_pfd[i].fd);
10517                     pfd[i].events = tswap16(target_pfd[i].events);
10518                 }
10519             }
10520 
10521             switch (num) {
10522 # ifdef TARGET_NR_ppoll
10523             case TARGET_NR_ppoll:
10524             {
10525                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10526                 target_sigset_t *target_set;
10527                 sigset_t _set, *set = &_set;
10528 
10529                 if (arg3) {
10530                     if (target_to_host_timespec(timeout_ts, arg3)) {
10531                         unlock_user(target_pfd, arg1, 0);
10532                         goto efault;
10533                     }
10534                 } else {
10535                     timeout_ts = NULL;
10536                 }
10537 
10538                 if (arg4) {
10539                     if (arg5 != sizeof(target_sigset_t)) {
10540                         unlock_user(target_pfd, arg1, 0);
10541                         ret = -TARGET_EINVAL;
10542                         break;
10543                     }
10544 
10545                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10546                     if (!target_set) {
10547                         unlock_user(target_pfd, arg1, 0);
10548                         goto efault;
10549                     }
10550                     target_to_host_sigset(set, target_set);
10551                 } else {
10552                     set = NULL;
10553                 }
10554 
10555                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10556                                            set, SIGSET_T_SIZE));
10557 
10558                 if (!is_error(ret) && arg3) {
10559                     host_to_target_timespec(arg3, timeout_ts);
10560                 }
10561                 if (arg4) {
10562                     unlock_user(target_set, arg4, 0);
10563                 }
10564                 break;
10565             }
10566 # endif
10567 # ifdef TARGET_NR_poll
10568             case TARGET_NR_poll:
10569             {
10570                 struct timespec ts, *pts;
10571 
10572                 if (arg3 >= 0) {
10573                     /* Convert ms to secs, ns */
10574                     ts.tv_sec = arg3 / 1000;
10575                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10576                     pts = &ts;
10577                 } else {
10578                     /* -ve poll() timeout means "infinite" */
10579                     pts = NULL;
10580                 }
10581                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10582                 break;
10583             }
10584 # endif
10585             default:
10586                 g_assert_not_reached();
10587             }
10588 
10589             if (!is_error(ret)) {
10590                 for(i = 0; i < nfds; i++) {
10591                     target_pfd[i].revents = tswap16(pfd[i].revents);
10592                 }
10593             }
10594             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10595         }
10596         break;
10597 #endif
10598     case TARGET_NR_flock:
10599         /* NOTE: the flock constant seems to be the same for every
10600            Linux platform */
10601         ret = get_errno(safe_flock(arg1, arg2));
10602         break;
10603     case TARGET_NR_readv:
10604         {
10605             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10606             if (vec != NULL) {
10607                 ret = get_errno(safe_readv(arg1, vec, arg3));
10608                 unlock_iovec(vec, arg2, arg3, 1);
10609             } else {
10610                 ret = -host_to_target_errno(errno);
10611             }
10612         }
10613         break;
10614     case TARGET_NR_writev:
10615         {
10616             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10617             if (vec != NULL) {
10618                 ret = get_errno(safe_writev(arg1, vec, arg3));
10619                 unlock_iovec(vec, arg2, arg3, 0);
10620             } else {
10621                 ret = -host_to_target_errno(errno);
10622             }
10623         }
10624         break;
10625 #if defined(TARGET_NR_preadv)
10626     case TARGET_NR_preadv:
10627         {
10628             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10629             if (vec != NULL) {
10630                 unsigned long low, high;
10631 
10632                 target_to_host_low_high(arg4, arg5, &low, &high);
10633                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10634                 unlock_iovec(vec, arg2, arg3, 1);
10635             } else {
10636                 ret = -host_to_target_errno(errno);
10637            }
10638         }
10639         break;
10640 #endif
10641 #if defined(TARGET_NR_pwritev)
10642     case TARGET_NR_pwritev:
10643         {
10644             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10645             if (vec != NULL) {
10646                 unsigned long low, high;
10647 
10648                 target_to_host_low_high(arg4, arg5, &low, &high);
10649                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10650                 unlock_iovec(vec, arg2, arg3, 0);
10651             } else {
10652                 ret = -host_to_target_errno(errno);
10653            }
10654         }
10655         break;
10656 #endif
10657     case TARGET_NR_getsid:
10658         ret = get_errno(getsid(arg1));
10659         break;
10660 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10661     case TARGET_NR_fdatasync:
10662         ret = get_errno(fdatasync(arg1));
10663         break;
10664 #endif
10665 #ifdef TARGET_NR__sysctl
10666     case TARGET_NR__sysctl:
10667         /* We don't implement this, but ENOTDIR is always a safe
10668            return value. */
10669         ret = -TARGET_ENOTDIR;
10670         break;
10671 #endif
10672     case TARGET_NR_sched_getaffinity:
10673         {
10674             unsigned int mask_size;
10675             unsigned long *mask;
10676 
10677             /*
10678              * sched_getaffinity needs multiples of ulong, so need to take
10679              * care of mismatches between target ulong and host ulong sizes.
10680              */
10681             if (arg2 & (sizeof(abi_ulong) - 1)) {
10682                 ret = -TARGET_EINVAL;
10683                 break;
10684             }
10685             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10686 
10687             mask = alloca(mask_size);
10688             memset(mask, 0, mask_size);
10689             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10690 
10691             if (!is_error(ret)) {
10692                 if (ret > arg2) {
10693                     /* More data returned than the caller's buffer will fit.
10694                      * This only happens if sizeof(abi_long) < sizeof(long)
10695                      * and the caller passed us a buffer holding an odd number
10696                      * of abi_longs. If the host kernel is actually using the
10697                      * extra 4 bytes then fail EINVAL; otherwise we can just
10698                      * ignore them and only copy the interesting part.
10699                      */
10700                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10701                     if (numcpus > arg2 * 8) {
10702                         ret = -TARGET_EINVAL;
10703                         break;
10704                     }
10705                     ret = arg2;
10706                 }
10707 
10708                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10709                     goto efault;
10710                 }
10711             }
10712         }
10713         break;
10714     case TARGET_NR_sched_setaffinity:
10715         {
10716             unsigned int mask_size;
10717             unsigned long *mask;
10718 
10719             /*
10720              * sched_setaffinity needs multiples of ulong, so need to take
10721              * care of mismatches between target ulong and host ulong sizes.
10722              */
10723             if (arg2 & (sizeof(abi_ulong) - 1)) {
10724                 ret = -TARGET_EINVAL;
10725                 break;
10726             }
10727             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10728             mask = alloca(mask_size);
10729 
10730             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10731             if (ret) {
10732                 break;
10733             }
10734 
10735             ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10736         }
10737         break;
10738     case TARGET_NR_getcpu:
10739         {
10740             unsigned cpu, node;
10741             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10742                                        arg2 ? &node : NULL,
10743                                        NULL));
10744             if (is_error(ret)) {
10745                 goto fail;
10746             }
10747             if (arg1 && put_user_u32(cpu, arg1)) {
10748                 goto efault;
10749             }
10750             if (arg2 && put_user_u32(node, arg2)) {
10751                 goto efault;
10752             }
10753         }
10754         break;
10755     case TARGET_NR_sched_setparam:
10756         {
10757             struct sched_param *target_schp;
10758             struct sched_param schp;
10759 
10760             if (arg2 == 0) {
10761                 return -TARGET_EINVAL;
10762             }
10763             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10764                 goto efault;
10765             schp.sched_priority = tswap32(target_schp->sched_priority);
10766             unlock_user_struct(target_schp, arg2, 0);
10767             ret = get_errno(sched_setparam(arg1, &schp));
10768         }
10769         break;
10770     case TARGET_NR_sched_getparam:
10771         {
10772             struct sched_param *target_schp;
10773             struct sched_param schp;
10774 
10775             if (arg2 == 0) {
10776                 return -TARGET_EINVAL;
10777             }
10778             ret = get_errno(sched_getparam(arg1, &schp));
10779             if (!is_error(ret)) {
10780                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10781                     goto efault;
10782                 target_schp->sched_priority = tswap32(schp.sched_priority);
10783                 unlock_user_struct(target_schp, arg2, 1);
10784             }
10785         }
10786         break;
10787     case TARGET_NR_sched_setscheduler:
10788         {
10789             struct sched_param *target_schp;
10790             struct sched_param schp;
10791             if (arg3 == 0) {
10792                 return -TARGET_EINVAL;
10793             }
10794             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10795                 goto efault;
10796             schp.sched_priority = tswap32(target_schp->sched_priority);
10797             unlock_user_struct(target_schp, arg3, 0);
10798             ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10799         }
10800         break;
10801     case TARGET_NR_sched_getscheduler:
10802         ret = get_errno(sched_getscheduler(arg1));
10803         break;
10804     case TARGET_NR_sched_yield:
10805         ret = get_errno(sched_yield());
10806         break;
10807     case TARGET_NR_sched_get_priority_max:
10808         ret = get_errno(sched_get_priority_max(arg1));
10809         break;
10810     case TARGET_NR_sched_get_priority_min:
10811         ret = get_errno(sched_get_priority_min(arg1));
10812         break;
10813     case TARGET_NR_sched_rr_get_interval:
10814         {
10815             struct timespec ts;
10816             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10817             if (!is_error(ret)) {
10818                 ret = host_to_target_timespec(arg2, &ts);
10819             }
10820         }
10821         break;
10822     case TARGET_NR_nanosleep:
10823         {
10824             struct timespec req, rem;
10825             target_to_host_timespec(&req, arg1);
10826             ret = get_errno(safe_nanosleep(&req, &rem));
10827             if (is_error(ret) && arg2) {
10828                 host_to_target_timespec(arg2, &rem);
10829             }
10830         }
10831         break;
10832 #ifdef TARGET_NR_query_module
10833     case TARGET_NR_query_module:
10834         goto unimplemented;
10835 #endif
10836 #ifdef TARGET_NR_nfsservctl
10837     case TARGET_NR_nfsservctl:
10838         goto unimplemented;
10839 #endif
10840     case TARGET_NR_prctl:
10841         switch (arg1) {
10842         case PR_GET_PDEATHSIG:
10843         {
10844             int deathsig;
10845             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10846             if (!is_error(ret) && arg2
10847                 && put_user_ual(deathsig, arg2)) {
10848                 goto efault;
10849             }
10850             break;
10851         }
10852 #ifdef PR_GET_NAME
10853         case PR_GET_NAME:
10854         {
10855             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10856             if (!name) {
10857                 goto efault;
10858             }
10859             ret = get_errno(prctl(arg1, (unsigned long)name,
10860                                   arg3, arg4, arg5));
10861             unlock_user(name, arg2, 16);
10862             break;
10863         }
10864         case PR_SET_NAME:
10865         {
10866             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10867             if (!name) {
10868                 goto efault;
10869             }
10870             ret = get_errno(prctl(arg1, (unsigned long)name,
10871                                   arg3, arg4, arg5));
10872             unlock_user(name, arg2, 0);
10873             break;
10874         }
10875 #endif
10876 #ifdef TARGET_AARCH64
10877         case TARGET_PR_SVE_SET_VL:
10878             /*
10879              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10880              * PR_SVE_VL_INHERIT.  Note the kernel definition
10881              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10882              * even though the current architectural maximum is VQ=16.
10883              */
10884             ret = -TARGET_EINVAL;
10885             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10886                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10887                 CPUARMState *env = cpu_env;
10888                 ARMCPU *cpu = arm_env_get_cpu(env);
10889                 uint32_t vq, old_vq;
10890 
10891                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10892                 vq = MAX(arg2 / 16, 1);
10893                 vq = MIN(vq, cpu->sve_max_vq);
10894 
10895                 if (vq < old_vq) {
10896                     aarch64_sve_narrow_vq(env, vq);
10897                 }
10898                 env->vfp.zcr_el[1] = vq - 1;
10899                 ret = vq * 16;
10900             }
10901             break;
10902         case TARGET_PR_SVE_GET_VL:
10903             ret = -TARGET_EINVAL;
10904             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10905                 CPUARMState *env = cpu_env;
10906                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10907             }
10908             break;
10909 #endif /* AARCH64 */
10910         case PR_GET_SECCOMP:
10911         case PR_SET_SECCOMP:
10912             /* Disable seccomp to prevent the target disabling syscalls we
10913              * need. */
10914             ret = -TARGET_EINVAL;
10915             break;
10916         default:
10917             /* Most prctl options have no pointer arguments */
10918             ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10919             break;
10920         }
10921         break;
10922 #ifdef TARGET_NR_arch_prctl
10923     case TARGET_NR_arch_prctl:
10924 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10925         ret = do_arch_prctl(cpu_env, arg1, arg2);
10926         break;
10927 #else
10928         goto unimplemented;
10929 #endif
10930 #endif
10931 #ifdef TARGET_NR_pread64
10932     case TARGET_NR_pread64:
10933         if (regpairs_aligned(cpu_env, num)) {
10934             arg4 = arg5;
10935             arg5 = arg6;
10936         }
10937         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10938             goto efault;
10939         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10940         unlock_user(p, arg2, ret);
10941         break;
10942     case TARGET_NR_pwrite64:
10943         if (regpairs_aligned(cpu_env, num)) {
10944             arg4 = arg5;
10945             arg5 = arg6;
10946         }
10947         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10948             goto efault;
10949         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10950         unlock_user(p, arg2, 0);
10951         break;
10952 #endif
10953     case TARGET_NR_getcwd:
10954         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10955             goto efault;
10956         ret = get_errno(sys_getcwd1(p, arg2));
10957         unlock_user(p, arg1, ret);
10958         break;
10959     case TARGET_NR_capget:
10960     case TARGET_NR_capset:
10961     {
10962         struct target_user_cap_header *target_header;
10963         struct target_user_cap_data *target_data = NULL;
10964         struct __user_cap_header_struct header;
10965         struct __user_cap_data_struct data[2];
10966         struct __user_cap_data_struct *dataptr = NULL;
10967         int i, target_datalen;
10968         int data_items = 1;
10969 
10970         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10971             goto efault;
10972         }
10973         header.version = tswap32(target_header->version);
10974         header.pid = tswap32(target_header->pid);
10975 
10976         if (header.version != _LINUX_CAPABILITY_VERSION) {
10977             /* Version 2 and up takes pointer to two user_data structs */
10978             data_items = 2;
10979         }
10980 
10981         target_datalen = sizeof(*target_data) * data_items;
10982 
10983         if (arg2) {
10984             if (num == TARGET_NR_capget) {
10985                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10986             } else {
10987                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10988             }
10989             if (!target_data) {
10990                 unlock_user_struct(target_header, arg1, 0);
10991                 goto efault;
10992             }
10993 
10994             if (num == TARGET_NR_capset) {
10995                 for (i = 0; i < data_items; i++) {
10996                     data[i].effective = tswap32(target_data[i].effective);
10997                     data[i].permitted = tswap32(target_data[i].permitted);
10998                     data[i].inheritable = tswap32(target_data[i].inheritable);
10999                 }
11000             }
11001 
11002             dataptr = data;
11003         }
11004 
11005         if (num == TARGET_NR_capget) {
11006             ret = get_errno(capget(&header, dataptr));
11007         } else {
11008             ret = get_errno(capset(&header, dataptr));
11009         }
11010 
11011         /* The kernel always updates version for both capget and capset */
11012         target_header->version = tswap32(header.version);
11013         unlock_user_struct(target_header, arg1, 1);
11014 
11015         if (arg2) {
11016             if (num == TARGET_NR_capget) {
11017                 for (i = 0; i < data_items; i++) {
11018                     target_data[i].effective = tswap32(data[i].effective);
11019                     target_data[i].permitted = tswap32(data[i].permitted);
11020                     target_data[i].inheritable = tswap32(data[i].inheritable);
11021                 }
11022                 unlock_user(target_data, arg2, target_datalen);
11023             } else {
11024                 unlock_user(target_data, arg2, 0);
11025             }
11026         }
11027         break;
11028     }
11029     case TARGET_NR_sigaltstack:
11030         ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
11031         break;
11032 
11033 #ifdef CONFIG_SENDFILE
11034 #ifdef TARGET_NR_sendfile
11035     case TARGET_NR_sendfile:
11036     {
11037         off_t *offp = NULL;
11038         off_t off;
11039         if (arg3) {
11040             ret = get_user_sal(off, arg3);
11041             if (is_error(ret)) {
11042                 break;
11043             }
11044             offp = &off;
11045         }
11046         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11047         if (!is_error(ret) && arg3) {
11048             abi_long ret2 = put_user_sal(off, arg3);
11049             if (is_error(ret2)) {
11050                 ret = ret2;
11051             }
11052         }
11053         break;
11054     }
11055 #endif
11056 #ifdef TARGET_NR_sendfile64
11057     case TARGET_NR_sendfile64:
11058     {
11059         off_t *offp = NULL;
11060         off_t off;
11061         if (arg3) {
11062             ret = get_user_s64(off, arg3);
11063             if (is_error(ret)) {
11064                 break;
11065             }
11066             offp = &off;
11067         }
11068         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11069         if (!is_error(ret) && arg3) {
11070             abi_long ret2 = put_user_s64(off, arg3);
11071             if (is_error(ret2)) {
11072                 ret = ret2;
11073             }
11074         }
11075         break;
11076     }
11077 #endif
11078 #else
11079     case TARGET_NR_sendfile:
11080 #ifdef TARGET_NR_sendfile64
11081     case TARGET_NR_sendfile64:
11082 #endif
11083         goto unimplemented;
11084 #endif
11085 
11086 #ifdef TARGET_NR_getpmsg
11087     case TARGET_NR_getpmsg:
11088         goto unimplemented;
11089 #endif
11090 #ifdef TARGET_NR_putpmsg
11091     case TARGET_NR_putpmsg:
11092         goto unimplemented;
11093 #endif
11094 #ifdef TARGET_NR_vfork
11095     case TARGET_NR_vfork:
11096         ret = get_errno(do_fork(cpu_env,
11097                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11098                         0, 0, 0, 0));
11099         break;
11100 #endif
11101 #ifdef TARGET_NR_ugetrlimit
11102     case TARGET_NR_ugetrlimit:
11103     {
11104 	struct rlimit rlim;
11105 	int resource = target_to_host_resource(arg1);
11106 	ret = get_errno(getrlimit(resource, &rlim));
11107 	if (!is_error(ret)) {
11108 	    struct target_rlimit *target_rlim;
11109             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11110                 goto efault;
11111 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11112 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11113             unlock_user_struct(target_rlim, arg2, 1);
11114 	}
11115 	break;
11116     }
11117 #endif
11118 #ifdef TARGET_NR_truncate64
11119     case TARGET_NR_truncate64:
11120         if (!(p = lock_user_string(arg1)))
11121             goto efault;
11122 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11123         unlock_user(p, arg1, 0);
11124 	break;
11125 #endif
11126 #ifdef TARGET_NR_ftruncate64
11127     case TARGET_NR_ftruncate64:
11128 	ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11129 	break;
11130 #endif
11131 #ifdef TARGET_NR_stat64
11132     case TARGET_NR_stat64:
11133         if (!(p = lock_user_string(arg1)))
11134             goto efault;
11135         ret = get_errno(stat(path(p), &st));
11136         unlock_user(p, arg1, 0);
11137         if (!is_error(ret))
11138             ret = host_to_target_stat64(cpu_env, arg2, &st);
11139         break;
11140 #endif
11141 #ifdef TARGET_NR_lstat64
11142     case TARGET_NR_lstat64:
11143         if (!(p = lock_user_string(arg1)))
11144             goto efault;
11145         ret = get_errno(lstat(path(p), &st));
11146         unlock_user(p, arg1, 0);
11147         if (!is_error(ret))
11148             ret = host_to_target_stat64(cpu_env, arg2, &st);
11149         break;
11150 #endif
11151 #ifdef TARGET_NR_fstat64
11152     case TARGET_NR_fstat64:
11153         ret = get_errno(fstat(arg1, &st));
11154         if (!is_error(ret))
11155             ret = host_to_target_stat64(cpu_env, arg2, &st);
11156         break;
11157 #endif
11158 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11159 #ifdef TARGET_NR_fstatat64
11160     case TARGET_NR_fstatat64:
11161 #endif
11162 #ifdef TARGET_NR_newfstatat
11163     case TARGET_NR_newfstatat:
11164 #endif
11165         if (!(p = lock_user_string(arg2)))
11166             goto efault;
11167         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11168         if (!is_error(ret))
11169             ret = host_to_target_stat64(cpu_env, arg3, &st);
11170         break;
11171 #endif
11172 #ifdef TARGET_NR_lchown
11173     case TARGET_NR_lchown:
11174         if (!(p = lock_user_string(arg1)))
11175             goto efault;
11176         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11177         unlock_user(p, arg1, 0);
11178         break;
11179 #endif
11180 #ifdef TARGET_NR_getuid
11181     case TARGET_NR_getuid:
11182         ret = get_errno(high2lowuid(getuid()));
11183         break;
11184 #endif
11185 #ifdef TARGET_NR_getgid
11186     case TARGET_NR_getgid:
11187         ret = get_errno(high2lowgid(getgid()));
11188         break;
11189 #endif
11190 #ifdef TARGET_NR_geteuid
11191     case TARGET_NR_geteuid:
11192         ret = get_errno(high2lowuid(geteuid()));
11193         break;
11194 #endif
11195 #ifdef TARGET_NR_getegid
11196     case TARGET_NR_getegid:
11197         ret = get_errno(high2lowgid(getegid()));
11198         break;
11199 #endif
11200     case TARGET_NR_setreuid:
11201         ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11202         break;
11203     case TARGET_NR_setregid:
11204         ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11205         break;
11206     case TARGET_NR_getgroups:
11207         {
11208             int gidsetsize = arg1;
11209             target_id *target_grouplist;
11210             gid_t *grouplist;
11211             int i;
11212 
11213             grouplist = alloca(gidsetsize * sizeof(gid_t));
11214             ret = get_errno(getgroups(gidsetsize, grouplist));
11215             if (gidsetsize == 0)
11216                 break;
11217             if (!is_error(ret)) {
11218                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11219                 if (!target_grouplist)
11220                     goto efault;
11221                 for(i = 0;i < ret; i++)
11222                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11223                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11224             }
11225         }
11226         break;
11227     case TARGET_NR_setgroups:
11228         {
11229             int gidsetsize = arg1;
11230             target_id *target_grouplist;
11231             gid_t *grouplist = NULL;
11232             int i;
11233             if (gidsetsize) {
11234                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11235                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11236                 if (!target_grouplist) {
11237                     ret = -TARGET_EFAULT;
11238                     goto fail;
11239                 }
11240                 for (i = 0; i < gidsetsize; i++) {
11241                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11242                 }
11243                 unlock_user(target_grouplist, arg2, 0);
11244             }
11245             ret = get_errno(setgroups(gidsetsize, grouplist));
11246         }
11247         break;
11248     case TARGET_NR_fchown:
11249         ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11250         break;
11251 #if defined(TARGET_NR_fchownat)
11252     case TARGET_NR_fchownat:
11253         if (!(p = lock_user_string(arg2)))
11254             goto efault;
11255         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11256                                  low2highgid(arg4), arg5));
11257         unlock_user(p, arg2, 0);
11258         break;
11259 #endif
11260 #ifdef TARGET_NR_setresuid
11261     case TARGET_NR_setresuid:
11262         ret = get_errno(sys_setresuid(low2highuid(arg1),
11263                                       low2highuid(arg2),
11264                                       low2highuid(arg3)));
11265         break;
11266 #endif
11267 #ifdef TARGET_NR_getresuid
11268     case TARGET_NR_getresuid:
11269         {
11270             uid_t ruid, euid, suid;
11271             ret = get_errno(getresuid(&ruid, &euid, &suid));
11272             if (!is_error(ret)) {
11273                 if (put_user_id(high2lowuid(ruid), arg1)
11274                     || put_user_id(high2lowuid(euid), arg2)
11275                     || put_user_id(high2lowuid(suid), arg3))
11276                     goto efault;
11277             }
11278         }
11279         break;
11280 #endif
11281 #ifdef TARGET_NR_getresgid
11282     case TARGET_NR_setresgid:
11283         ret = get_errno(sys_setresgid(low2highgid(arg1),
11284                                       low2highgid(arg2),
11285                                       low2highgid(arg3)));
11286         break;
11287 #endif
11288 #ifdef TARGET_NR_getresgid
11289     case TARGET_NR_getresgid:
11290         {
11291             gid_t rgid, egid, sgid;
11292             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11293             if (!is_error(ret)) {
11294                 if (put_user_id(high2lowgid(rgid), arg1)
11295                     || put_user_id(high2lowgid(egid), arg2)
11296                     || put_user_id(high2lowgid(sgid), arg3))
11297                     goto efault;
11298             }
11299         }
11300         break;
11301 #endif
11302 #ifdef TARGET_NR_chown
11303     case TARGET_NR_chown:
11304         if (!(p = lock_user_string(arg1)))
11305             goto efault;
11306         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11307         unlock_user(p, arg1, 0);
11308         break;
11309 #endif
11310     case TARGET_NR_setuid:
11311         ret = get_errno(sys_setuid(low2highuid(arg1)));
11312         break;
11313     case TARGET_NR_setgid:
11314         ret = get_errno(sys_setgid(low2highgid(arg1)));
11315         break;
11316     case TARGET_NR_setfsuid:
11317         ret = get_errno(setfsuid(arg1));
11318         break;
11319     case TARGET_NR_setfsgid:
11320         ret = get_errno(setfsgid(arg1));
11321         break;
11322 
11323 #ifdef TARGET_NR_lchown32
11324     case TARGET_NR_lchown32:
11325         if (!(p = lock_user_string(arg1)))
11326             goto efault;
11327         ret = get_errno(lchown(p, arg2, arg3));
11328         unlock_user(p, arg1, 0);
11329         break;
11330 #endif
11331 #ifdef TARGET_NR_getuid32
11332     case TARGET_NR_getuid32:
11333         ret = get_errno(getuid());
11334         break;
11335 #endif
11336 
11337 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11338    /* Alpha specific */
11339     case TARGET_NR_getxuid:
11340          {
11341             uid_t euid;
11342             euid=geteuid();
11343             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11344          }
11345         ret = get_errno(getuid());
11346         break;
11347 #endif
11348 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11349    /* Alpha specific */
11350     case TARGET_NR_getxgid:
11351          {
11352             uid_t egid;
11353             egid=getegid();
11354             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11355          }
11356         ret = get_errno(getgid());
11357         break;
11358 #endif
11359 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11360     /* Alpha specific */
11361     case TARGET_NR_osf_getsysinfo:
11362         ret = -TARGET_EOPNOTSUPP;
11363         switch (arg1) {
11364           case TARGET_GSI_IEEE_FP_CONTROL:
11365             {
11366                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11367 
11368                 /* Copied from linux ieee_fpcr_to_swcr.  */
11369                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11370                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11371                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11372                                         | SWCR_TRAP_ENABLE_DZE
11373                                         | SWCR_TRAP_ENABLE_OVF);
11374                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11375                                         | SWCR_TRAP_ENABLE_INE);
11376                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11377                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11378 
11379                 if (put_user_u64 (swcr, arg2))
11380                         goto efault;
11381                 ret = 0;
11382             }
11383             break;
11384 
11385           /* case GSI_IEEE_STATE_AT_SIGNAL:
11386              -- Not implemented in linux kernel.
11387              case GSI_UACPROC:
11388              -- Retrieves current unaligned access state; not much used.
11389              case GSI_PROC_TYPE:
11390              -- Retrieves implver information; surely not used.
11391              case GSI_GET_HWRPB:
11392              -- Grabs a copy of the HWRPB; surely not used.
11393           */
11394         }
11395         break;
11396 #endif
11397 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11398     /* Alpha specific */
11399     case TARGET_NR_osf_setsysinfo:
11400         ret = -TARGET_EOPNOTSUPP;
11401         switch (arg1) {
11402           case TARGET_SSI_IEEE_FP_CONTROL:
11403             {
11404                 uint64_t swcr, fpcr, orig_fpcr;
11405 
11406                 if (get_user_u64 (swcr, arg2)) {
11407                     goto efault;
11408                 }
11409                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11410                 fpcr = orig_fpcr & FPCR_DYN_MASK;
11411 
11412                 /* Copied from linux ieee_swcr_to_fpcr.  */
11413                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11414                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11415                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11416                                   | SWCR_TRAP_ENABLE_DZE
11417                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
11418                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11419                                   | SWCR_TRAP_ENABLE_INE)) << 57;
11420                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11421                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11422 
11423                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11424                 ret = 0;
11425             }
11426             break;
11427 
11428           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11429             {
11430                 uint64_t exc, fpcr, orig_fpcr;
11431                 int si_code;
11432 
11433                 if (get_user_u64(exc, arg2)) {
11434                     goto efault;
11435                 }
11436 
11437                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11438 
11439                 /* We only add to the exception status here.  */
11440                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11441 
11442                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11443                 ret = 0;
11444 
11445                 /* Old exceptions are not signaled.  */
11446                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11447 
11448                 /* If any exceptions set by this call,
11449                    and are unmasked, send a signal.  */
11450                 si_code = 0;
11451                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11452                     si_code = TARGET_FPE_FLTRES;
11453                 }
11454                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11455                     si_code = TARGET_FPE_FLTUND;
11456                 }
11457                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11458                     si_code = TARGET_FPE_FLTOVF;
11459                 }
11460                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11461                     si_code = TARGET_FPE_FLTDIV;
11462                 }
11463                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11464                     si_code = TARGET_FPE_FLTINV;
11465                 }
11466                 if (si_code != 0) {
11467                     target_siginfo_t info;
11468                     info.si_signo = SIGFPE;
11469                     info.si_errno = 0;
11470                     info.si_code = si_code;
11471                     info._sifields._sigfault._addr
11472                         = ((CPUArchState *)cpu_env)->pc;
11473                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11474                                  QEMU_SI_FAULT, &info);
11475                 }
11476             }
11477             break;
11478 
11479           /* case SSI_NVPAIRS:
11480              -- Used with SSIN_UACPROC to enable unaligned accesses.
11481              case SSI_IEEE_STATE_AT_SIGNAL:
11482              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11483              -- Not implemented in linux kernel
11484           */
11485         }
11486         break;
11487 #endif
11488 #ifdef TARGET_NR_osf_sigprocmask
11489     /* Alpha specific.  */
11490     case TARGET_NR_osf_sigprocmask:
11491         {
11492             abi_ulong mask;
11493             int how;
11494             sigset_t set, oldset;
11495 
11496             switch(arg1) {
11497             case TARGET_SIG_BLOCK:
11498                 how = SIG_BLOCK;
11499                 break;
11500             case TARGET_SIG_UNBLOCK:
11501                 how = SIG_UNBLOCK;
11502                 break;
11503             case TARGET_SIG_SETMASK:
11504                 how = SIG_SETMASK;
11505                 break;
11506             default:
11507                 ret = -TARGET_EINVAL;
11508                 goto fail;
11509             }
11510             mask = arg2;
11511             target_to_host_old_sigset(&set, &mask);
11512             ret = do_sigprocmask(how, &set, &oldset);
11513             if (!ret) {
11514                 host_to_target_old_sigset(&mask, &oldset);
11515                 ret = mask;
11516             }
11517         }
11518         break;
11519 #endif
11520 
11521 #ifdef TARGET_NR_getgid32
11522     case TARGET_NR_getgid32:
11523         ret = get_errno(getgid());
11524         break;
11525 #endif
11526 #ifdef TARGET_NR_geteuid32
11527     case TARGET_NR_geteuid32:
11528         ret = get_errno(geteuid());
11529         break;
11530 #endif
11531 #ifdef TARGET_NR_getegid32
11532     case TARGET_NR_getegid32:
11533         ret = get_errno(getegid());
11534         break;
11535 #endif
11536 #ifdef TARGET_NR_setreuid32
11537     case TARGET_NR_setreuid32:
11538         ret = get_errno(setreuid(arg1, arg2));
11539         break;
11540 #endif
11541 #ifdef TARGET_NR_setregid32
11542     case TARGET_NR_setregid32:
11543         ret = get_errno(setregid(arg1, arg2));
11544         break;
11545 #endif
11546 #ifdef TARGET_NR_getgroups32
11547     case TARGET_NR_getgroups32:
11548         {
11549             int gidsetsize = arg1;
11550             uint32_t *target_grouplist;
11551             gid_t *grouplist;
11552             int i;
11553 
11554             grouplist = alloca(gidsetsize * sizeof(gid_t));
11555             ret = get_errno(getgroups(gidsetsize, grouplist));
11556             if (gidsetsize == 0)
11557                 break;
11558             if (!is_error(ret)) {
11559                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11560                 if (!target_grouplist) {
11561                     ret = -TARGET_EFAULT;
11562                     goto fail;
11563                 }
11564                 for(i = 0;i < ret; i++)
11565                     target_grouplist[i] = tswap32(grouplist[i]);
11566                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11567             }
11568         }
11569         break;
11570 #endif
11571 #ifdef TARGET_NR_setgroups32
11572     case TARGET_NR_setgroups32:
11573         {
11574             int gidsetsize = arg1;
11575             uint32_t *target_grouplist;
11576             gid_t *grouplist;
11577             int i;
11578 
11579             grouplist = alloca(gidsetsize * sizeof(gid_t));
11580             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11581             if (!target_grouplist) {
11582                 ret = -TARGET_EFAULT;
11583                 goto fail;
11584             }
11585             for(i = 0;i < gidsetsize; i++)
11586                 grouplist[i] = tswap32(target_grouplist[i]);
11587             unlock_user(target_grouplist, arg2, 0);
11588             ret = get_errno(setgroups(gidsetsize, grouplist));
11589         }
11590         break;
11591 #endif
11592 #ifdef TARGET_NR_fchown32
11593     case TARGET_NR_fchown32:
11594         ret = get_errno(fchown(arg1, arg2, arg3));
11595         break;
11596 #endif
11597 #ifdef TARGET_NR_setresuid32
11598     case TARGET_NR_setresuid32:
11599         ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11600         break;
11601 #endif
11602 #ifdef TARGET_NR_getresuid32
11603     case TARGET_NR_getresuid32:
11604         {
11605             uid_t ruid, euid, suid;
11606             ret = get_errno(getresuid(&ruid, &euid, &suid));
11607             if (!is_error(ret)) {
11608                 if (put_user_u32(ruid, arg1)
11609                     || put_user_u32(euid, arg2)
11610                     || put_user_u32(suid, arg3))
11611                     goto efault;
11612             }
11613         }
11614         break;
11615 #endif
11616 #ifdef TARGET_NR_setresgid32
11617     case TARGET_NR_setresgid32:
11618         ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11619         break;
11620 #endif
11621 #ifdef TARGET_NR_getresgid32
11622     case TARGET_NR_getresgid32:
11623         {
11624             gid_t rgid, egid, sgid;
11625             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11626             if (!is_error(ret)) {
11627                 if (put_user_u32(rgid, arg1)
11628                     || put_user_u32(egid, arg2)
11629                     || put_user_u32(sgid, arg3))
11630                     goto efault;
11631             }
11632         }
11633         break;
11634 #endif
11635 #ifdef TARGET_NR_chown32
11636     case TARGET_NR_chown32:
11637         if (!(p = lock_user_string(arg1)))
11638             goto efault;
11639         ret = get_errno(chown(p, arg2, arg3));
11640         unlock_user(p, arg1, 0);
11641         break;
11642 #endif
11643 #ifdef TARGET_NR_setuid32
11644     case TARGET_NR_setuid32:
11645         ret = get_errno(sys_setuid(arg1));
11646         break;
11647 #endif
11648 #ifdef TARGET_NR_setgid32
11649     case TARGET_NR_setgid32:
11650         ret = get_errno(sys_setgid(arg1));
11651         break;
11652 #endif
11653 #ifdef TARGET_NR_setfsuid32
11654     case TARGET_NR_setfsuid32:
11655         ret = get_errno(setfsuid(arg1));
11656         break;
11657 #endif
11658 #ifdef TARGET_NR_setfsgid32
11659     case TARGET_NR_setfsgid32:
11660         ret = get_errno(setfsgid(arg1));
11661         break;
11662 #endif
11663 
11664     case TARGET_NR_pivot_root:
11665         goto unimplemented;
11666 #ifdef TARGET_NR_mincore
11667     case TARGET_NR_mincore:
11668         {
11669             void *a;
11670             ret = -TARGET_ENOMEM;
11671             a = lock_user(VERIFY_READ, arg1, arg2, 0);
11672             if (!a) {
11673                 goto fail;
11674             }
11675             ret = -TARGET_EFAULT;
11676             p = lock_user_string(arg3);
11677             if (!p) {
11678                 goto mincore_fail;
11679             }
11680             ret = get_errno(mincore(a, arg2, p));
11681             unlock_user(p, arg3, ret);
11682             mincore_fail:
11683             unlock_user(a, arg1, 0);
11684         }
11685         break;
11686 #endif
11687 #ifdef TARGET_NR_arm_fadvise64_64
11688     case TARGET_NR_arm_fadvise64_64:
11689         /* arm_fadvise64_64 looks like fadvise64_64 but
11690          * with different argument order: fd, advice, offset, len
11691          * rather than the usual fd, offset, len, advice.
11692          * Note that offset and len are both 64-bit so appear as
11693          * pairs of 32-bit registers.
11694          */
11695         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11696                             target_offset64(arg5, arg6), arg2);
11697         ret = -host_to_target_errno(ret);
11698         break;
11699 #endif
11700 
11701 #if TARGET_ABI_BITS == 32
11702 
11703 #ifdef TARGET_NR_fadvise64_64
11704     case TARGET_NR_fadvise64_64:
11705 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11706         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11707         ret = arg2;
11708         arg2 = arg3;
11709         arg3 = arg4;
11710         arg4 = arg5;
11711         arg5 = arg6;
11712         arg6 = ret;
11713 #else
11714         /* 6 args: fd, offset (high, low), len (high, low), advice */
11715         if (regpairs_aligned(cpu_env, num)) {
11716             /* offset is in (3,4), len in (5,6) and advice in 7 */
11717             arg2 = arg3;
11718             arg3 = arg4;
11719             arg4 = arg5;
11720             arg5 = arg6;
11721             arg6 = arg7;
11722         }
11723 #endif
11724         ret = -host_to_target_errno(posix_fadvise(arg1,
11725                                                   target_offset64(arg2, arg3),
11726                                                   target_offset64(arg4, arg5),
11727                                                   arg6));
11728         break;
11729 #endif
11730 
11731 #ifdef TARGET_NR_fadvise64
11732     case TARGET_NR_fadvise64:
11733         /* 5 args: fd, offset (high, low), len, advice */
11734         if (regpairs_aligned(cpu_env, num)) {
11735             /* offset is in (3,4), len in 5 and advice in 6 */
11736             arg2 = arg3;
11737             arg3 = arg4;
11738             arg4 = arg5;
11739             arg5 = arg6;
11740         }
11741         ret = -host_to_target_errno(posix_fadvise(arg1,
11742                                                   target_offset64(arg2, arg3),
11743                                                   arg4, arg5));
11744         break;
11745 #endif
11746 
11747 #else /* not a 32-bit ABI */
11748 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11749 #ifdef TARGET_NR_fadvise64_64
11750     case TARGET_NR_fadvise64_64:
11751 #endif
11752 #ifdef TARGET_NR_fadvise64
11753     case TARGET_NR_fadvise64:
11754 #endif
11755 #ifdef TARGET_S390X
11756         switch (arg4) {
11757         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11758         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11759         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11760         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11761         default: break;
11762         }
11763 #endif
11764         ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11765         break;
11766 #endif
11767 #endif /* end of 64-bit ABI fadvise handling */
11768 
11769 #ifdef TARGET_NR_madvise
11770     case TARGET_NR_madvise:
11771         /* A straight passthrough may not be safe because qemu sometimes
11772            turns private file-backed mappings into anonymous mappings.
11773            This will break MADV_DONTNEED.
11774            This is a hint, so ignoring and returning success is ok.  */
11775         ret = get_errno(0);
11776         break;
11777 #endif
11778 #if TARGET_ABI_BITS == 32
11779     case TARGET_NR_fcntl64:
11780     {
11781 	int cmd;
11782 	struct flock64 fl;
11783         from_flock64_fn *copyfrom = copy_from_user_flock64;
11784         to_flock64_fn *copyto = copy_to_user_flock64;
11785 
11786 #ifdef TARGET_ARM
11787         if (!((CPUARMState *)cpu_env)->eabi) {
11788             copyfrom = copy_from_user_oabi_flock64;
11789             copyto = copy_to_user_oabi_flock64;
11790         }
11791 #endif
11792 
11793 	cmd = target_to_host_fcntl_cmd(arg2);
11794         if (cmd == -TARGET_EINVAL) {
11795             ret = cmd;
11796             break;
11797         }
11798 
11799         switch(arg2) {
11800         case TARGET_F_GETLK64:
11801             ret = copyfrom(&fl, arg3);
11802             if (ret) {
11803                 break;
11804             }
11805             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11806             if (ret == 0) {
11807                 ret = copyto(arg3, &fl);
11808             }
11809 	    break;
11810 
11811         case TARGET_F_SETLK64:
11812         case TARGET_F_SETLKW64:
11813             ret = copyfrom(&fl, arg3);
11814             if (ret) {
11815                 break;
11816             }
11817             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11818 	    break;
11819         default:
11820             ret = do_fcntl(arg1, arg2, arg3);
11821             break;
11822         }
11823 	break;
11824     }
11825 #endif
11826 #ifdef TARGET_NR_cacheflush
11827     case TARGET_NR_cacheflush:
11828         /* self-modifying code is handled automatically, so nothing needed */
11829         ret = 0;
11830         break;
11831 #endif
11832 #ifdef TARGET_NR_security
11833     case TARGET_NR_security:
11834         goto unimplemented;
11835 #endif
11836 #ifdef TARGET_NR_getpagesize
11837     case TARGET_NR_getpagesize:
11838         ret = TARGET_PAGE_SIZE;
11839         break;
11840 #endif
11841     case TARGET_NR_gettid:
11842         ret = get_errno(gettid());
11843         break;
11844 #ifdef TARGET_NR_readahead
11845     case TARGET_NR_readahead:
11846 #if TARGET_ABI_BITS == 32
11847         if (regpairs_aligned(cpu_env, num)) {
11848             arg2 = arg3;
11849             arg3 = arg4;
11850             arg4 = arg5;
11851         }
11852         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11853 #else
11854         ret = get_errno(readahead(arg1, arg2, arg3));
11855 #endif
11856         break;
11857 #endif
11858 #ifdef CONFIG_ATTR
11859 #ifdef TARGET_NR_setxattr
11860     case TARGET_NR_listxattr:
11861     case TARGET_NR_llistxattr:
11862     {
11863         void *p, *b = 0;
11864         if (arg2) {
11865             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11866             if (!b) {
11867                 ret = -TARGET_EFAULT;
11868                 break;
11869             }
11870         }
11871         p = lock_user_string(arg1);
11872         if (p) {
11873             if (num == TARGET_NR_listxattr) {
11874                 ret = get_errno(listxattr(p, b, arg3));
11875             } else {
11876                 ret = get_errno(llistxattr(p, b, arg3));
11877             }
11878         } else {
11879             ret = -TARGET_EFAULT;
11880         }
11881         unlock_user(p, arg1, 0);
11882         unlock_user(b, arg2, arg3);
11883         break;
11884     }
11885     case TARGET_NR_flistxattr:
11886     {
11887         void *b = 0;
11888         if (arg2) {
11889             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11890             if (!b) {
11891                 ret = -TARGET_EFAULT;
11892                 break;
11893             }
11894         }
11895         ret = get_errno(flistxattr(arg1, b, arg3));
11896         unlock_user(b, arg2, arg3);
11897         break;
11898     }
11899     case TARGET_NR_setxattr:
11900     case TARGET_NR_lsetxattr:
11901         {
11902             void *p, *n, *v = 0;
11903             if (arg3) {
11904                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11905                 if (!v) {
11906                     ret = -TARGET_EFAULT;
11907                     break;
11908                 }
11909             }
11910             p = lock_user_string(arg1);
11911             n = lock_user_string(arg2);
11912             if (p && n) {
11913                 if (num == TARGET_NR_setxattr) {
11914                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11915                 } else {
11916                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11917                 }
11918             } else {
11919                 ret = -TARGET_EFAULT;
11920             }
11921             unlock_user(p, arg1, 0);
11922             unlock_user(n, arg2, 0);
11923             unlock_user(v, arg3, 0);
11924         }
11925         break;
11926     case TARGET_NR_fsetxattr:
11927         {
11928             void *n, *v = 0;
11929             if (arg3) {
11930                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11931                 if (!v) {
11932                     ret = -TARGET_EFAULT;
11933                     break;
11934                 }
11935             }
11936             n = lock_user_string(arg2);
11937             if (n) {
11938                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11939             } else {
11940                 ret = -TARGET_EFAULT;
11941             }
11942             unlock_user(n, arg2, 0);
11943             unlock_user(v, arg3, 0);
11944         }
11945         break;
11946     case TARGET_NR_getxattr:
11947     case TARGET_NR_lgetxattr:
11948         {
11949             void *p, *n, *v = 0;
11950             if (arg3) {
11951                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11952                 if (!v) {
11953                     ret = -TARGET_EFAULT;
11954                     break;
11955                 }
11956             }
11957             p = lock_user_string(arg1);
11958             n = lock_user_string(arg2);
11959             if (p && n) {
11960                 if (num == TARGET_NR_getxattr) {
11961                     ret = get_errno(getxattr(p, n, v, arg4));
11962                 } else {
11963                     ret = get_errno(lgetxattr(p, n, v, arg4));
11964                 }
11965             } else {
11966                 ret = -TARGET_EFAULT;
11967             }
11968             unlock_user(p, arg1, 0);
11969             unlock_user(n, arg2, 0);
11970             unlock_user(v, arg3, arg4);
11971         }
11972         break;
11973     case TARGET_NR_fgetxattr:
11974         {
11975             void *n, *v = 0;
11976             if (arg3) {
11977                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11978                 if (!v) {
11979                     ret = -TARGET_EFAULT;
11980                     break;
11981                 }
11982             }
11983             n = lock_user_string(arg2);
11984             if (n) {
11985                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11986             } else {
11987                 ret = -TARGET_EFAULT;
11988             }
11989             unlock_user(n, arg2, 0);
11990             unlock_user(v, arg3, arg4);
11991         }
11992         break;
11993     case TARGET_NR_removexattr:
11994     case TARGET_NR_lremovexattr:
11995         {
11996             void *p, *n;
11997             p = lock_user_string(arg1);
11998             n = lock_user_string(arg2);
11999             if (p && n) {
12000                 if (num == TARGET_NR_removexattr) {
12001                     ret = get_errno(removexattr(p, n));
12002                 } else {
12003                     ret = get_errno(lremovexattr(p, n));
12004                 }
12005             } else {
12006                 ret = -TARGET_EFAULT;
12007             }
12008             unlock_user(p, arg1, 0);
12009             unlock_user(n, arg2, 0);
12010         }
12011         break;
12012     case TARGET_NR_fremovexattr:
12013         {
12014             void *n;
12015             n = lock_user_string(arg2);
12016             if (n) {
12017                 ret = get_errno(fremovexattr(arg1, n));
12018             } else {
12019                 ret = -TARGET_EFAULT;
12020             }
12021             unlock_user(n, arg2, 0);
12022         }
12023         break;
12024 #endif
12025 #endif /* CONFIG_ATTR */
12026 #ifdef TARGET_NR_set_thread_area
12027     case TARGET_NR_set_thread_area:
12028 #if defined(TARGET_MIPS)
12029       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12030       ret = 0;
12031       break;
12032 #elif defined(TARGET_CRIS)
12033       if (arg1 & 0xff)
12034           ret = -TARGET_EINVAL;
12035       else {
12036           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12037           ret = 0;
12038       }
12039       break;
12040 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12041       ret = do_set_thread_area(cpu_env, arg1);
12042       break;
12043 #elif defined(TARGET_M68K)
12044       {
12045           TaskState *ts = cpu->opaque;
12046           ts->tp_value = arg1;
12047           ret = 0;
12048           break;
12049       }
12050 #else
12051       goto unimplemented_nowarn;
12052 #endif
12053 #endif
12054 #ifdef TARGET_NR_get_thread_area
12055     case TARGET_NR_get_thread_area:
12056 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12057         ret = do_get_thread_area(cpu_env, arg1);
12058         break;
12059 #elif defined(TARGET_M68K)
12060         {
12061             TaskState *ts = cpu->opaque;
12062             ret = ts->tp_value;
12063             break;
12064         }
12065 #else
12066         goto unimplemented_nowarn;
12067 #endif
12068 #endif
12069 #ifdef TARGET_NR_getdomainname
12070     case TARGET_NR_getdomainname:
12071         goto unimplemented_nowarn;
12072 #endif
12073 
12074 #ifdef TARGET_NR_clock_settime
12075     case TARGET_NR_clock_settime:
12076     {
12077         struct timespec ts;
12078 
12079         ret = target_to_host_timespec(&ts, arg2);
12080         if (!is_error(ret)) {
12081             ret = get_errno(clock_settime(arg1, &ts));
12082         }
12083         break;
12084     }
12085 #endif
12086 #ifdef TARGET_NR_clock_gettime
12087     case TARGET_NR_clock_gettime:
12088     {
12089         struct timespec ts;
12090         ret = get_errno(clock_gettime(arg1, &ts));
12091         if (!is_error(ret)) {
12092             ret = host_to_target_timespec(arg2, &ts);
12093         }
12094         break;
12095     }
12096 #endif
12097 #ifdef TARGET_NR_clock_getres
12098     case TARGET_NR_clock_getres:
12099     {
12100         struct timespec ts;
12101         ret = get_errno(clock_getres(arg1, &ts));
12102         if (!is_error(ret)) {
12103             host_to_target_timespec(arg2, &ts);
12104         }
12105         break;
12106     }
12107 #endif
12108 #ifdef TARGET_NR_clock_nanosleep
12109     case TARGET_NR_clock_nanosleep:
12110     {
12111         struct timespec ts;
12112         target_to_host_timespec(&ts, arg3);
12113         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12114                                              &ts, arg4 ? &ts : NULL));
12115         if (arg4)
12116             host_to_target_timespec(arg4, &ts);
12117 
12118 #if defined(TARGET_PPC)
12119         /* clock_nanosleep is odd in that it returns positive errno values.
12120          * On PPC, CR0 bit 3 should be set in such a situation. */
12121         if (ret && ret != -TARGET_ERESTARTSYS) {
12122             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12123         }
12124 #endif
12125         break;
12126     }
12127 #endif
12128 
12129 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12130     case TARGET_NR_set_tid_address:
12131         ret = get_errno(set_tid_address((int *)g2h(arg1)));
12132         break;
12133 #endif
12134 
12135     case TARGET_NR_tkill:
12136         ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12137         break;
12138 
12139     case TARGET_NR_tgkill:
12140         ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12141                         target_to_host_signal(arg3)));
12142         break;
12143 
12144 #ifdef TARGET_NR_set_robust_list
12145     case TARGET_NR_set_robust_list:
12146     case TARGET_NR_get_robust_list:
12147         /* The ABI for supporting robust futexes has userspace pass
12148          * the kernel a pointer to a linked list which is updated by
12149          * userspace after the syscall; the list is walked by the kernel
12150          * when the thread exits. Since the linked list in QEMU guest
12151          * memory isn't a valid linked list for the host and we have
12152          * no way to reliably intercept the thread-death event, we can't
12153          * support these. Silently return ENOSYS so that guest userspace
12154          * falls back to a non-robust futex implementation (which should
12155          * be OK except in the corner case of the guest crashing while
12156          * holding a mutex that is shared with another process via
12157          * shared memory).
12158          */
12159         goto unimplemented_nowarn;
12160 #endif
12161 
12162 #if defined(TARGET_NR_utimensat)
12163     case TARGET_NR_utimensat:
12164         {
12165             struct timespec *tsp, ts[2];
12166             if (!arg3) {
12167                 tsp = NULL;
12168             } else {
12169                 target_to_host_timespec(ts, arg3);
12170                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12171                 tsp = ts;
12172             }
12173             if (!arg2)
12174                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12175             else {
12176                 if (!(p = lock_user_string(arg2))) {
12177                     ret = -TARGET_EFAULT;
12178                     goto fail;
12179                 }
12180                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12181                 unlock_user(p, arg2, 0);
12182             }
12183         }
12184 	break;
12185 #endif
12186     case TARGET_NR_futex:
12187         ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12188         break;
12189 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12190     case TARGET_NR_inotify_init:
12191         ret = get_errno(sys_inotify_init());
12192         if (ret >= 0) {
12193             fd_trans_register(ret, &target_inotify_trans);
12194         }
12195         break;
12196 #endif
12197 #ifdef CONFIG_INOTIFY1
12198 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12199     case TARGET_NR_inotify_init1:
12200         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12201                                           fcntl_flags_tbl)));
12202         if (ret >= 0) {
12203             fd_trans_register(ret, &target_inotify_trans);
12204         }
12205         break;
12206 #endif
12207 #endif
12208 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12209     case TARGET_NR_inotify_add_watch:
12210         p = lock_user_string(arg2);
12211         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12212         unlock_user(p, arg2, 0);
12213         break;
12214 #endif
12215 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12216     case TARGET_NR_inotify_rm_watch:
12217         ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12218         break;
12219 #endif
12220 
12221 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12222     case TARGET_NR_mq_open:
12223         {
12224             struct mq_attr posix_mq_attr;
12225             struct mq_attr *pposix_mq_attr;
12226             int host_flags;
12227 
12228             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12229             pposix_mq_attr = NULL;
12230             if (arg4) {
12231                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12232                     goto efault;
12233                 }
12234                 pposix_mq_attr = &posix_mq_attr;
12235             }
12236             p = lock_user_string(arg1 - 1);
12237             if (!p) {
12238                 goto efault;
12239             }
12240             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12241             unlock_user (p, arg1, 0);
12242         }
12243         break;
12244 
12245     case TARGET_NR_mq_unlink:
12246         p = lock_user_string(arg1 - 1);
12247         if (!p) {
12248             ret = -TARGET_EFAULT;
12249             break;
12250         }
12251         ret = get_errno(mq_unlink(p));
12252         unlock_user (p, arg1, 0);
12253         break;
12254 
12255     case TARGET_NR_mq_timedsend:
12256         {
12257             struct timespec ts;
12258 
12259             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12260             if (arg5 != 0) {
12261                 target_to_host_timespec(&ts, arg5);
12262                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12263                 host_to_target_timespec(arg5, &ts);
12264             } else {
12265                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12266             }
12267             unlock_user (p, arg2, arg3);
12268         }
12269         break;
12270 
12271     case TARGET_NR_mq_timedreceive:
12272         {
12273             struct timespec ts;
12274             unsigned int prio;
12275 
12276             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12277             if (arg5 != 0) {
12278                 target_to_host_timespec(&ts, arg5);
12279                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12280                                                      &prio, &ts));
12281                 host_to_target_timespec(arg5, &ts);
12282             } else {
12283                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12284                                                      &prio, NULL));
12285             }
12286             unlock_user (p, arg2, arg3);
12287             if (arg4 != 0)
12288                 put_user_u32(prio, arg4);
12289         }
12290         break;
12291 
12292     /* Not implemented for now... */
12293 /*     case TARGET_NR_mq_notify: */
12294 /*         break; */
12295 
12296     case TARGET_NR_mq_getsetattr:
12297         {
12298             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12299             ret = 0;
12300             if (arg2 != 0) {
12301                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12302                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12303                                            &posix_mq_attr_out));
12304             } else if (arg3 != 0) {
12305                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12306             }
12307             if (ret == 0 && arg3 != 0) {
12308                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12309             }
12310         }
12311         break;
12312 #endif
12313 
12314 #ifdef CONFIG_SPLICE
12315 #ifdef TARGET_NR_tee
12316     case TARGET_NR_tee:
12317         {
12318             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12319         }
12320         break;
12321 #endif
12322 #ifdef TARGET_NR_splice
12323     case TARGET_NR_splice:
12324         {
12325             loff_t loff_in, loff_out;
12326             loff_t *ploff_in = NULL, *ploff_out = NULL;
12327             if (arg2) {
12328                 if (get_user_u64(loff_in, arg2)) {
12329                     goto efault;
12330                 }
12331                 ploff_in = &loff_in;
12332             }
12333             if (arg4) {
12334                 if (get_user_u64(loff_out, arg4)) {
12335                     goto efault;
12336                 }
12337                 ploff_out = &loff_out;
12338             }
12339             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12340             if (arg2) {
12341                 if (put_user_u64(loff_in, arg2)) {
12342                     goto efault;
12343                 }
12344             }
12345             if (arg4) {
12346                 if (put_user_u64(loff_out, arg4)) {
12347                     goto efault;
12348                 }
12349             }
12350         }
12351         break;
12352 #endif
12353 #ifdef TARGET_NR_vmsplice
12354 	case TARGET_NR_vmsplice:
12355         {
12356             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12357             if (vec != NULL) {
12358                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12359                 unlock_iovec(vec, arg2, arg3, 0);
12360             } else {
12361                 ret = -host_to_target_errno(errno);
12362             }
12363         }
12364         break;
12365 #endif
12366 #endif /* CONFIG_SPLICE */
12367 #ifdef CONFIG_EVENTFD
12368 #if defined(TARGET_NR_eventfd)
12369     case TARGET_NR_eventfd:
12370         ret = get_errno(eventfd(arg1, 0));
12371         if (ret >= 0) {
12372             fd_trans_register(ret, &target_eventfd_trans);
12373         }
12374         break;
12375 #endif
12376 #if defined(TARGET_NR_eventfd2)
12377     case TARGET_NR_eventfd2:
12378     {
12379         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12380         if (arg2 & TARGET_O_NONBLOCK) {
12381             host_flags |= O_NONBLOCK;
12382         }
12383         if (arg2 & TARGET_O_CLOEXEC) {
12384             host_flags |= O_CLOEXEC;
12385         }
12386         ret = get_errno(eventfd(arg1, host_flags));
12387         if (ret >= 0) {
12388             fd_trans_register(ret, &target_eventfd_trans);
12389         }
12390         break;
12391     }
12392 #endif
12393 #endif /* CONFIG_EVENTFD  */
12394 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12395     case TARGET_NR_fallocate:
12396 #if TARGET_ABI_BITS == 32
12397         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12398                                   target_offset64(arg5, arg6)));
12399 #else
12400         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12401 #endif
12402         break;
12403 #endif
12404 #if defined(CONFIG_SYNC_FILE_RANGE)
12405 #if defined(TARGET_NR_sync_file_range)
12406     case TARGET_NR_sync_file_range:
12407 #if TARGET_ABI_BITS == 32
12408 #if defined(TARGET_MIPS)
12409         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12410                                         target_offset64(arg5, arg6), arg7));
12411 #else
12412         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12413                                         target_offset64(arg4, arg5), arg6));
12414 #endif /* !TARGET_MIPS */
12415 #else
12416         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12417 #endif
12418         break;
12419 #endif
12420 #if defined(TARGET_NR_sync_file_range2)
12421     case TARGET_NR_sync_file_range2:
12422         /* This is like sync_file_range but the arguments are reordered */
12423 #if TARGET_ABI_BITS == 32
12424         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12425                                         target_offset64(arg5, arg6), arg2));
12426 #else
12427         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12428 #endif
12429         break;
12430 #endif
12431 #endif
12432 #if defined(TARGET_NR_signalfd4)
12433     case TARGET_NR_signalfd4:
12434         ret = do_signalfd4(arg1, arg2, arg4);
12435         break;
12436 #endif
12437 #if defined(TARGET_NR_signalfd)
12438     case TARGET_NR_signalfd:
12439         ret = do_signalfd4(arg1, arg2, 0);
12440         break;
12441 #endif
12442 #if defined(CONFIG_EPOLL)
12443 #if defined(TARGET_NR_epoll_create)
12444     case TARGET_NR_epoll_create:
12445         ret = get_errno(epoll_create(arg1));
12446         break;
12447 #endif
12448 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12449     case TARGET_NR_epoll_create1:
12450         ret = get_errno(epoll_create1(arg1));
12451         break;
12452 #endif
12453 #if defined(TARGET_NR_epoll_ctl)
12454     case TARGET_NR_epoll_ctl:
12455     {
12456         struct epoll_event ep;
12457         struct epoll_event *epp = 0;
12458         if (arg4) {
12459             struct target_epoll_event *target_ep;
12460             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12461                 goto efault;
12462             }
12463             ep.events = tswap32(target_ep->events);
12464             /* The epoll_data_t union is just opaque data to the kernel,
12465              * so we transfer all 64 bits across and need not worry what
12466              * actual data type it is.
12467              */
12468             ep.data.u64 = tswap64(target_ep->data.u64);
12469             unlock_user_struct(target_ep, arg4, 0);
12470             epp = &ep;
12471         }
12472         ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12473         break;
12474     }
12475 #endif
12476 
12477 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12478 #if defined(TARGET_NR_epoll_wait)
12479     case TARGET_NR_epoll_wait:
12480 #endif
12481 #if defined(TARGET_NR_epoll_pwait)
12482     case TARGET_NR_epoll_pwait:
12483 #endif
12484     {
12485         struct target_epoll_event *target_ep;
12486         struct epoll_event *ep;
12487         int epfd = arg1;
12488         int maxevents = arg3;
12489         int timeout = arg4;
12490 
12491         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12492             ret = -TARGET_EINVAL;
12493             break;
12494         }
12495 
12496         target_ep = lock_user(VERIFY_WRITE, arg2,
12497                               maxevents * sizeof(struct target_epoll_event), 1);
12498         if (!target_ep) {
12499             goto efault;
12500         }
12501 
12502         ep = g_try_new(struct epoll_event, maxevents);
12503         if (!ep) {
12504             unlock_user(target_ep, arg2, 0);
12505             ret = -TARGET_ENOMEM;
12506             break;
12507         }
12508 
12509         switch (num) {
12510 #if defined(TARGET_NR_epoll_pwait)
12511         case TARGET_NR_epoll_pwait:
12512         {
12513             target_sigset_t *target_set;
12514             sigset_t _set, *set = &_set;
12515 
12516             if (arg5) {
12517                 if (arg6 != sizeof(target_sigset_t)) {
12518                     ret = -TARGET_EINVAL;
12519                     break;
12520                 }
12521 
12522                 target_set = lock_user(VERIFY_READ, arg5,
12523                                        sizeof(target_sigset_t), 1);
12524                 if (!target_set) {
12525                     ret = -TARGET_EFAULT;
12526                     break;
12527                 }
12528                 target_to_host_sigset(set, target_set);
12529                 unlock_user(target_set, arg5, 0);
12530             } else {
12531                 set = NULL;
12532             }
12533 
12534             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12535                                              set, SIGSET_T_SIZE));
12536             break;
12537         }
12538 #endif
12539 #if defined(TARGET_NR_epoll_wait)
12540         case TARGET_NR_epoll_wait:
12541             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12542                                              NULL, 0));
12543             break;
12544 #endif
12545         default:
12546             ret = -TARGET_ENOSYS;
12547         }
12548         if (!is_error(ret)) {
12549             int i;
12550             for (i = 0; i < ret; i++) {
12551                 target_ep[i].events = tswap32(ep[i].events);
12552                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12553             }
12554             unlock_user(target_ep, arg2,
12555                         ret * sizeof(struct target_epoll_event));
12556         } else {
12557             unlock_user(target_ep, arg2, 0);
12558         }
12559         g_free(ep);
12560         break;
12561     }
12562 #endif
12563 #endif
12564 #ifdef TARGET_NR_prlimit64
12565     case TARGET_NR_prlimit64:
12566     {
12567         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12568         struct target_rlimit64 *target_rnew, *target_rold;
12569         struct host_rlimit64 rnew, rold, *rnewp = 0;
12570         int resource = target_to_host_resource(arg2);
12571         if (arg3) {
12572             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12573                 goto efault;
12574             }
12575             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12576             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12577             unlock_user_struct(target_rnew, arg3, 0);
12578             rnewp = &rnew;
12579         }
12580 
12581         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12582         if (!is_error(ret) && arg4) {
12583             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12584                 goto efault;
12585             }
12586             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12587             target_rold->rlim_max = tswap64(rold.rlim_max);
12588             unlock_user_struct(target_rold, arg4, 1);
12589         }
12590         break;
12591     }
12592 #endif
12593 #ifdef TARGET_NR_gethostname
12594     case TARGET_NR_gethostname:
12595     {
12596         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12597         if (name) {
12598             ret = get_errno(gethostname(name, arg2));
12599             unlock_user(name, arg1, arg2);
12600         } else {
12601             ret = -TARGET_EFAULT;
12602         }
12603         break;
12604     }
12605 #endif
12606 #ifdef TARGET_NR_atomic_cmpxchg_32
12607     case TARGET_NR_atomic_cmpxchg_32:
12608     {
12609         /* should use start_exclusive from main.c */
12610         abi_ulong mem_value;
12611         if (get_user_u32(mem_value, arg6)) {
12612             target_siginfo_t info;
12613             info.si_signo = SIGSEGV;
12614             info.si_errno = 0;
12615             info.si_code = TARGET_SEGV_MAPERR;
12616             info._sifields._sigfault._addr = arg6;
12617             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12618                          QEMU_SI_FAULT, &info);
12619             ret = 0xdeadbeef;
12620 
12621         }
12622         if (mem_value == arg2)
12623             put_user_u32(arg1, arg6);
12624         ret = mem_value;
12625         break;
12626     }
12627 #endif
12628 #ifdef TARGET_NR_atomic_barrier
12629     case TARGET_NR_atomic_barrier:
12630     {
12631         /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12632         ret = 0;
12633         break;
12634     }
12635 #endif
12636 
12637 #ifdef TARGET_NR_timer_create
12638     case TARGET_NR_timer_create:
12639     {
12640         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12641 
12642         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12643 
12644         int clkid = arg1;
12645         int timer_index = next_free_host_timer();
12646 
12647         if (timer_index < 0) {
12648             ret = -TARGET_EAGAIN;
12649         } else {
12650             timer_t *phtimer = g_posix_timers  + timer_index;
12651 
12652             if (arg2) {
12653                 phost_sevp = &host_sevp;
12654                 ret = target_to_host_sigevent(phost_sevp, arg2);
12655                 if (ret != 0) {
12656                     break;
12657                 }
12658             }
12659 
12660             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12661             if (ret) {
12662                 phtimer = NULL;
12663             } else {
12664                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12665                     goto efault;
12666                 }
12667             }
12668         }
12669         break;
12670     }
12671 #endif
12672 
12673 #ifdef TARGET_NR_timer_settime
12674     case TARGET_NR_timer_settime:
12675     {
12676         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12677          * struct itimerspec * old_value */
12678         target_timer_t timerid = get_timer_id(arg1);
12679 
12680         if (timerid < 0) {
12681             ret = timerid;
12682         } else if (arg3 == 0) {
12683             ret = -TARGET_EINVAL;
12684         } else {
12685             timer_t htimer = g_posix_timers[timerid];
12686             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12687 
12688             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12689                 goto efault;
12690             }
12691             ret = get_errno(
12692                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12693             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12694                 goto efault;
12695             }
12696         }
12697         break;
12698     }
12699 #endif
12700 
12701 #ifdef TARGET_NR_timer_gettime
12702     case TARGET_NR_timer_gettime:
12703     {
12704         /* args: timer_t timerid, struct itimerspec *curr_value */
12705         target_timer_t timerid = get_timer_id(arg1);
12706 
12707         if (timerid < 0) {
12708             ret = timerid;
12709         } else if (!arg2) {
12710             ret = -TARGET_EFAULT;
12711         } else {
12712             timer_t htimer = g_posix_timers[timerid];
12713             struct itimerspec hspec;
12714             ret = get_errno(timer_gettime(htimer, &hspec));
12715 
12716             if (host_to_target_itimerspec(arg2, &hspec)) {
12717                 ret = -TARGET_EFAULT;
12718             }
12719         }
12720         break;
12721     }
12722 #endif
12723 
12724 #ifdef TARGET_NR_timer_getoverrun
12725     case TARGET_NR_timer_getoverrun:
12726     {
12727         /* args: timer_t timerid */
12728         target_timer_t timerid = get_timer_id(arg1);
12729 
12730         if (timerid < 0) {
12731             ret = timerid;
12732         } else {
12733             timer_t htimer = g_posix_timers[timerid];
12734             ret = get_errno(timer_getoverrun(htimer));
12735         }
12736         fd_trans_unregister(ret);
12737         break;
12738     }
12739 #endif
12740 
12741 #ifdef TARGET_NR_timer_delete
12742     case TARGET_NR_timer_delete:
12743     {
12744         /* args: timer_t timerid */
12745         target_timer_t timerid = get_timer_id(arg1);
12746 
12747         if (timerid < 0) {
12748             ret = timerid;
12749         } else {
12750             timer_t htimer = g_posix_timers[timerid];
12751             ret = get_errno(timer_delete(htimer));
12752             g_posix_timers[timerid] = 0;
12753         }
12754         break;
12755     }
12756 #endif
12757 
12758 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12759     case TARGET_NR_timerfd_create:
12760         ret = get_errno(timerfd_create(arg1,
12761                 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12762         break;
12763 #endif
12764 
12765 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12766     case TARGET_NR_timerfd_gettime:
12767         {
12768             struct itimerspec its_curr;
12769 
12770             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12771 
12772             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12773                 goto efault;
12774             }
12775         }
12776         break;
12777 #endif
12778 
12779 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12780     case TARGET_NR_timerfd_settime:
12781         {
12782             struct itimerspec its_new, its_old, *p_new;
12783 
12784             if (arg3) {
12785                 if (target_to_host_itimerspec(&its_new, arg3)) {
12786                     goto efault;
12787                 }
12788                 p_new = &its_new;
12789             } else {
12790                 p_new = NULL;
12791             }
12792 
12793             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12794 
12795             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12796                 goto efault;
12797             }
12798         }
12799         break;
12800 #endif
12801 
12802 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12803     case TARGET_NR_ioprio_get:
12804         ret = get_errno(ioprio_get(arg1, arg2));
12805         break;
12806 #endif
12807 
12808 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12809     case TARGET_NR_ioprio_set:
12810         ret = get_errno(ioprio_set(arg1, arg2, arg3));
12811         break;
12812 #endif
12813 
12814 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12815     case TARGET_NR_setns:
12816         ret = get_errno(setns(arg1, arg2));
12817         break;
12818 #endif
12819 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12820     case TARGET_NR_unshare:
12821         ret = get_errno(unshare(arg1));
12822         break;
12823 #endif
12824 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12825     case TARGET_NR_kcmp:
12826         ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12827         break;
12828 #endif
12829 #ifdef TARGET_NR_swapcontext
12830     case TARGET_NR_swapcontext:
12831         /* PowerPC specific.  */
12832         ret = do_swapcontext(cpu_env, arg1, arg2, arg3);
12833         break;
12834 #endif
12835 
12836     default:
12837     unimplemented:
12838         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12839 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12840     unimplemented_nowarn:
12841 #endif
12842         ret = -TARGET_ENOSYS;
12843         break;
12844     }
12845 fail:
12846 #ifdef DEBUG
12847     gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12848 #endif
12849     if(do_strace)
12850         print_syscall_ret(num, ret);
12851     trace_guest_user_syscall_ret(cpu, num, ret);
12852     return ret;
12853 efault:
12854     ret = -TARGET_EFAULT;
12855     goto fail;
12856 }
12857