xref: /openbmc/qemu/linux-user/syscall.c (revision 10f45d98)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167  * once. This exercises the codepaths for restart.
168  */
169 //#define DEBUG_ERESTARTSYS
170 
171 //#include <linux/msdos_fs.h>
172 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
173 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
174 
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
182 
183 #define _syscall0(type,name)		\
184 static type name (void)			\
185 {					\
186 	return syscall(__NR_##name);	\
187 }
188 
189 #define _syscall1(type,name,type1,arg1)		\
190 static type name (type1 arg1)			\
191 {						\
192 	return syscall(__NR_##name, arg1);	\
193 }
194 
195 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
196 static type name (type1 arg1,type2 arg2)		\
197 {							\
198 	return syscall(__NR_##name, arg1, arg2);	\
199 }
200 
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
202 static type name (type1 arg1,type2 arg2,type3 arg3)		\
203 {								\
204 	return syscall(__NR_##name, arg1, arg2, arg3);		\
205 }
206 
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
209 {										\
210 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
211 }
212 
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
214 		  type5,arg5)							\
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
216 {										\
217 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
218 }
219 
220 
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
222 		  type5,arg5,type6,arg6)					\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
224                   type6 arg6)							\
225 {										\
226 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
227 }
228 
229 
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
246 
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
251 
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256    errno. */
257 static int gettid(void) {
258     return -ENOSYS;
259 }
260 #endif
261 
262 /* For the 64-bit guest on 32-bit host case we must emulate
263  * getdents using getdents64, because otherwise the host
264  * might hand us back more dirent records than we can fit
265  * into the guest buffer after structure format conversion.
266  * Otherwise we emulate getdents with getdents if the host has it.
267  */
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
271 
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
282           loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286           siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296           const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300           unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303           unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307           void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309           struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311           struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
321 
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324           unsigned long, idx1, unsigned long, idx2)
325 #endif
326 
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
329   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
330   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
331   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
332   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
333   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
334   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
335   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
336   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
337   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
338   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
339   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
341 #if defined(O_DIRECT)
342   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
343 #endif
344 #if defined(O_NOATIME)
345   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
346 #endif
347 #if defined(O_CLOEXEC)
348   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
349 #endif
350 #if defined(O_PATH)
351   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
352 #endif
353 #if defined(O_TMPFILE)
354   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
355 #endif
356   /* Don't terminate the list prematurely on 64-bit host+guest.  */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360   { 0, 0, 0, 0 }
361 };
362 
363 enum {
364     QEMU_IFLA_BR_UNSPEC,
365     QEMU_IFLA_BR_FORWARD_DELAY,
366     QEMU_IFLA_BR_HELLO_TIME,
367     QEMU_IFLA_BR_MAX_AGE,
368     QEMU_IFLA_BR_AGEING_TIME,
369     QEMU_IFLA_BR_STP_STATE,
370     QEMU_IFLA_BR_PRIORITY,
371     QEMU_IFLA_BR_VLAN_FILTERING,
372     QEMU_IFLA_BR_VLAN_PROTOCOL,
373     QEMU_IFLA_BR_GROUP_FWD_MASK,
374     QEMU_IFLA_BR_ROOT_ID,
375     QEMU_IFLA_BR_BRIDGE_ID,
376     QEMU_IFLA_BR_ROOT_PORT,
377     QEMU_IFLA_BR_ROOT_PATH_COST,
378     QEMU_IFLA_BR_TOPOLOGY_CHANGE,
379     QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
380     QEMU_IFLA_BR_HELLO_TIMER,
381     QEMU_IFLA_BR_TCN_TIMER,
382     QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
383     QEMU_IFLA_BR_GC_TIMER,
384     QEMU_IFLA_BR_GROUP_ADDR,
385     QEMU_IFLA_BR_FDB_FLUSH,
386     QEMU_IFLA_BR_MCAST_ROUTER,
387     QEMU_IFLA_BR_MCAST_SNOOPING,
388     QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
389     QEMU_IFLA_BR_MCAST_QUERIER,
390     QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
391     QEMU_IFLA_BR_MCAST_HASH_MAX,
392     QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
393     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
394     QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
395     QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
396     QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
397     QEMU_IFLA_BR_MCAST_QUERY_INTVL,
398     QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
399     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
400     QEMU_IFLA_BR_NF_CALL_IPTABLES,
401     QEMU_IFLA_BR_NF_CALL_IP6TABLES,
402     QEMU_IFLA_BR_NF_CALL_ARPTABLES,
403     QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
404     QEMU_IFLA_BR_PAD,
405     QEMU_IFLA_BR_VLAN_STATS_ENABLED,
406     QEMU_IFLA_BR_MCAST_STATS_ENABLED,
407     QEMU_IFLA_BR_MCAST_IGMP_VERSION,
408     QEMU_IFLA_BR_MCAST_MLD_VERSION,
409     QEMU___IFLA_BR_MAX,
410 };
411 
412 enum {
413     QEMU_IFLA_UNSPEC,
414     QEMU_IFLA_ADDRESS,
415     QEMU_IFLA_BROADCAST,
416     QEMU_IFLA_IFNAME,
417     QEMU_IFLA_MTU,
418     QEMU_IFLA_LINK,
419     QEMU_IFLA_QDISC,
420     QEMU_IFLA_STATS,
421     QEMU_IFLA_COST,
422     QEMU_IFLA_PRIORITY,
423     QEMU_IFLA_MASTER,
424     QEMU_IFLA_WIRELESS,
425     QEMU_IFLA_PROTINFO,
426     QEMU_IFLA_TXQLEN,
427     QEMU_IFLA_MAP,
428     QEMU_IFLA_WEIGHT,
429     QEMU_IFLA_OPERSTATE,
430     QEMU_IFLA_LINKMODE,
431     QEMU_IFLA_LINKINFO,
432     QEMU_IFLA_NET_NS_PID,
433     QEMU_IFLA_IFALIAS,
434     QEMU_IFLA_NUM_VF,
435     QEMU_IFLA_VFINFO_LIST,
436     QEMU_IFLA_STATS64,
437     QEMU_IFLA_VF_PORTS,
438     QEMU_IFLA_PORT_SELF,
439     QEMU_IFLA_AF_SPEC,
440     QEMU_IFLA_GROUP,
441     QEMU_IFLA_NET_NS_FD,
442     QEMU_IFLA_EXT_MASK,
443     QEMU_IFLA_PROMISCUITY,
444     QEMU_IFLA_NUM_TX_QUEUES,
445     QEMU_IFLA_NUM_RX_QUEUES,
446     QEMU_IFLA_CARRIER,
447     QEMU_IFLA_PHYS_PORT_ID,
448     QEMU_IFLA_CARRIER_CHANGES,
449     QEMU_IFLA_PHYS_SWITCH_ID,
450     QEMU_IFLA_LINK_NETNSID,
451     QEMU_IFLA_PHYS_PORT_NAME,
452     QEMU_IFLA_PROTO_DOWN,
453     QEMU_IFLA_GSO_MAX_SEGS,
454     QEMU_IFLA_GSO_MAX_SIZE,
455     QEMU_IFLA_PAD,
456     QEMU_IFLA_XDP,
457     QEMU_IFLA_EVENT,
458     QEMU_IFLA_NEW_NETNSID,
459     QEMU_IFLA_IF_NETNSID,
460     QEMU_IFLA_CARRIER_UP_COUNT,
461     QEMU_IFLA_CARRIER_DOWN_COUNT,
462     QEMU_IFLA_NEW_IFINDEX,
463     QEMU___IFLA_MAX
464 };
465 
466 enum {
467     QEMU_IFLA_BRPORT_UNSPEC,
468     QEMU_IFLA_BRPORT_STATE,
469     QEMU_IFLA_BRPORT_PRIORITY,
470     QEMU_IFLA_BRPORT_COST,
471     QEMU_IFLA_BRPORT_MODE,
472     QEMU_IFLA_BRPORT_GUARD,
473     QEMU_IFLA_BRPORT_PROTECT,
474     QEMU_IFLA_BRPORT_FAST_LEAVE,
475     QEMU_IFLA_BRPORT_LEARNING,
476     QEMU_IFLA_BRPORT_UNICAST_FLOOD,
477     QEMU_IFLA_BRPORT_PROXYARP,
478     QEMU_IFLA_BRPORT_LEARNING_SYNC,
479     QEMU_IFLA_BRPORT_PROXYARP_WIFI,
480     QEMU_IFLA_BRPORT_ROOT_ID,
481     QEMU_IFLA_BRPORT_BRIDGE_ID,
482     QEMU_IFLA_BRPORT_DESIGNATED_PORT,
483     QEMU_IFLA_BRPORT_DESIGNATED_COST,
484     QEMU_IFLA_BRPORT_ID,
485     QEMU_IFLA_BRPORT_NO,
486     QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
487     QEMU_IFLA_BRPORT_CONFIG_PENDING,
488     QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
489     QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
490     QEMU_IFLA_BRPORT_HOLD_TIMER,
491     QEMU_IFLA_BRPORT_FLUSH,
492     QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
493     QEMU_IFLA_BRPORT_PAD,
494     QEMU_IFLA_BRPORT_MCAST_FLOOD,
495     QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
496     QEMU_IFLA_BRPORT_VLAN_TUNNEL,
497     QEMU_IFLA_BRPORT_BCAST_FLOOD,
498     QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
499     QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
500     QEMU___IFLA_BRPORT_MAX
501 };
502 
503 enum {
504     QEMU_IFLA_TUN_UNSPEC,
505     QEMU_IFLA_TUN_OWNER,
506     QEMU_IFLA_TUN_GROUP,
507     QEMU_IFLA_TUN_TYPE,
508     QEMU_IFLA_TUN_PI,
509     QEMU_IFLA_TUN_VNET_HDR,
510     QEMU_IFLA_TUN_PERSIST,
511     QEMU_IFLA_TUN_MULTI_QUEUE,
512     QEMU_IFLA_TUN_NUM_QUEUES,
513     QEMU_IFLA_TUN_NUM_DISABLED_QUEUES,
514     QEMU___IFLA_TUN_MAX,
515 };
516 
517 enum {
518     QEMU_IFLA_INFO_UNSPEC,
519     QEMU_IFLA_INFO_KIND,
520     QEMU_IFLA_INFO_DATA,
521     QEMU_IFLA_INFO_XSTATS,
522     QEMU_IFLA_INFO_SLAVE_KIND,
523     QEMU_IFLA_INFO_SLAVE_DATA,
524     QEMU___IFLA_INFO_MAX,
525 };
526 
527 enum {
528     QEMU_IFLA_INET_UNSPEC,
529     QEMU_IFLA_INET_CONF,
530     QEMU___IFLA_INET_MAX,
531 };
532 
533 enum {
534     QEMU_IFLA_INET6_UNSPEC,
535     QEMU_IFLA_INET6_FLAGS,
536     QEMU_IFLA_INET6_CONF,
537     QEMU_IFLA_INET6_STATS,
538     QEMU_IFLA_INET6_MCAST,
539     QEMU_IFLA_INET6_CACHEINFO,
540     QEMU_IFLA_INET6_ICMP6STATS,
541     QEMU_IFLA_INET6_TOKEN,
542     QEMU_IFLA_INET6_ADDR_GEN_MODE,
543     QEMU___IFLA_INET6_MAX
544 };
545 
546 enum {
547     QEMU_IFLA_XDP_UNSPEC,
548     QEMU_IFLA_XDP_FD,
549     QEMU_IFLA_XDP_ATTACHED,
550     QEMU_IFLA_XDP_FLAGS,
551     QEMU_IFLA_XDP_PROG_ID,
552     QEMU___IFLA_XDP_MAX,
553 };
554 
555 enum {
556     QEMU_RTA_UNSPEC,
557     QEMU_RTA_DST,
558     QEMU_RTA_SRC,
559     QEMU_RTA_IIF,
560     QEMU_RTA_OIF,
561     QEMU_RTA_GATEWAY,
562     QEMU_RTA_PRIORITY,
563     QEMU_RTA_PREFSRC,
564     QEMU_RTA_METRICS,
565     QEMU_RTA_MULTIPATH,
566     QEMU_RTA_PROTOINFO, /* no longer used */
567     QEMU_RTA_FLOW,
568     QEMU_RTA_CACHEINFO,
569     QEMU_RTA_SESSION, /* no longer used */
570     QEMU_RTA_MP_ALGO, /* no longer used */
571     QEMU_RTA_TABLE,
572     QEMU_RTA_MARK,
573     QEMU_RTA_MFC_STATS,
574     QEMU_RTA_VIA,
575     QEMU_RTA_NEWDST,
576     QEMU_RTA_PREF,
577     QEMU_RTA_ENCAP_TYPE,
578     QEMU_RTA_ENCAP,
579     QEMU_RTA_EXPIRES,
580     QEMU_RTA_PAD,
581     QEMU_RTA_UID,
582     QEMU_RTA_TTL_PROPAGATE,
583     QEMU_RTA_IP_PROTO,
584     QEMU_RTA_SPORT,
585     QEMU_RTA_DPORT,
586     QEMU___RTA_MAX
587 };
588 
589 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
590 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
591 typedef struct TargetFdTrans {
592     TargetFdDataFunc host_to_target_data;
593     TargetFdDataFunc target_to_host_data;
594     TargetFdAddrFunc target_to_host_addr;
595 } TargetFdTrans;
596 
597 static TargetFdTrans **target_fd_trans;
598 
599 static unsigned int target_fd_max;
600 
601 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
602 {
603     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
604         return target_fd_trans[fd]->target_to_host_data;
605     }
606     return NULL;
607 }
608 
609 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
610 {
611     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
612         return target_fd_trans[fd]->host_to_target_data;
613     }
614     return NULL;
615 }
616 
617 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
618 {
619     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
620         return target_fd_trans[fd]->target_to_host_addr;
621     }
622     return NULL;
623 }
624 
625 static void fd_trans_register(int fd, TargetFdTrans *trans)
626 {
627     unsigned int oldmax;
628 
629     if (fd >= target_fd_max) {
630         oldmax = target_fd_max;
631         target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
632         target_fd_trans = g_renew(TargetFdTrans *,
633                                   target_fd_trans, target_fd_max);
634         memset((void *)(target_fd_trans + oldmax), 0,
635                (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
636     }
637     target_fd_trans[fd] = trans;
638 }
639 
640 static void fd_trans_unregister(int fd)
641 {
642     if (fd >= 0 && fd < target_fd_max) {
643         target_fd_trans[fd] = NULL;
644     }
645 }
646 
647 static void fd_trans_dup(int oldfd, int newfd)
648 {
649     fd_trans_unregister(newfd);
650     if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
651         fd_trans_register(newfd, target_fd_trans[oldfd]);
652     }
653 }
654 
655 static int sys_getcwd1(char *buf, size_t size)
656 {
657   if (getcwd(buf, size) == NULL) {
658       /* getcwd() sets errno */
659       return (-1);
660   }
661   return strlen(buf)+1;
662 }
663 
664 #ifdef TARGET_NR_utimensat
665 #if defined(__NR_utimensat)
666 #define __NR_sys_utimensat __NR_utimensat
667 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
668           const struct timespec *,tsp,int,flags)
669 #else
670 static int sys_utimensat(int dirfd, const char *pathname,
671                          const struct timespec times[2], int flags)
672 {
673     errno = ENOSYS;
674     return -1;
675 }
676 #endif
677 #endif /* TARGET_NR_utimensat */
678 
679 #ifdef TARGET_NR_renameat2
680 #if defined(__NR_renameat2)
681 #define __NR_sys_renameat2 __NR_renameat2
682 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
683           const char *, new, unsigned int, flags)
684 #else
685 static int sys_renameat2(int oldfd, const char *old,
686                          int newfd, const char *new, int flags)
687 {
688     if (flags == 0) {
689         return renameat(oldfd, old, newfd, new);
690     }
691     errno = ENOSYS;
692     return -1;
693 }
694 #endif
695 #endif /* TARGET_NR_renameat2 */
696 
697 #ifdef CONFIG_INOTIFY
698 #include <sys/inotify.h>
699 
700 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
701 static int sys_inotify_init(void)
702 {
703   return (inotify_init());
704 }
705 #endif
706 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
707 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
708 {
709   return (inotify_add_watch(fd, pathname, mask));
710 }
711 #endif
712 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
713 static int sys_inotify_rm_watch(int fd, int32_t wd)
714 {
715   return (inotify_rm_watch(fd, wd));
716 }
717 #endif
718 #ifdef CONFIG_INOTIFY1
719 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
720 static int sys_inotify_init1(int flags)
721 {
722   return (inotify_init1(flags));
723 }
724 #endif
725 #endif
726 #else
727 /* Userspace can usually survive runtime without inotify */
728 #undef TARGET_NR_inotify_init
729 #undef TARGET_NR_inotify_init1
730 #undef TARGET_NR_inotify_add_watch
731 #undef TARGET_NR_inotify_rm_watch
732 #endif /* CONFIG_INOTIFY  */
733 
734 #if defined(TARGET_NR_prlimit64)
735 #ifndef __NR_prlimit64
736 # define __NR_prlimit64 -1
737 #endif
738 #define __NR_sys_prlimit64 __NR_prlimit64
739 /* The glibc rlimit structure may not be that used by the underlying syscall */
740 struct host_rlimit64 {
741     uint64_t rlim_cur;
742     uint64_t rlim_max;
743 };
744 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
745           const struct host_rlimit64 *, new_limit,
746           struct host_rlimit64 *, old_limit)
747 #endif
748 
749 
750 #if defined(TARGET_NR_timer_create)
751 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
752 static timer_t g_posix_timers[32] = { 0, } ;
753 
754 static inline int next_free_host_timer(void)
755 {
756     int k ;
757     /* FIXME: Does finding the next free slot require a lock? */
758     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
759         if (g_posix_timers[k] == 0) {
760             g_posix_timers[k] = (timer_t) 1;
761             return k;
762         }
763     }
764     return -1;
765 }
766 #endif
767 
768 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
769 #ifdef TARGET_ARM
770 static inline int regpairs_aligned(void *cpu_env, int num)
771 {
772     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
773 }
774 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
775 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
776 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
777 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
778  * of registers which translates to the same as ARM/MIPS, because we start with
779  * r3 as arg1 */
780 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
781 #elif defined(TARGET_SH4)
782 /* SH4 doesn't align register pairs, except for p{read,write}64 */
783 static inline int regpairs_aligned(void *cpu_env, int num)
784 {
785     switch (num) {
786     case TARGET_NR_pread64:
787     case TARGET_NR_pwrite64:
788         return 1;
789 
790     default:
791         return 0;
792     }
793 }
794 #elif defined(TARGET_XTENSA)
795 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
796 #else
797 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
798 #endif
799 
800 #define ERRNO_TABLE_SIZE 1200
801 
802 /* target_to_host_errno_table[] is initialized from
803  * host_to_target_errno_table[] in syscall_init(). */
804 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
805 };
806 
807 /*
808  * This list is the union of errno values overridden in asm-<arch>/errno.h
809  * minus the errnos that are not actually generic to all archs.
810  */
811 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
812     [EAGAIN]		= TARGET_EAGAIN,
813     [EIDRM]		= TARGET_EIDRM,
814     [ECHRNG]		= TARGET_ECHRNG,
815     [EL2NSYNC]		= TARGET_EL2NSYNC,
816     [EL3HLT]		= TARGET_EL3HLT,
817     [EL3RST]		= TARGET_EL3RST,
818     [ELNRNG]		= TARGET_ELNRNG,
819     [EUNATCH]		= TARGET_EUNATCH,
820     [ENOCSI]		= TARGET_ENOCSI,
821     [EL2HLT]		= TARGET_EL2HLT,
822     [EDEADLK]		= TARGET_EDEADLK,
823     [ENOLCK]		= TARGET_ENOLCK,
824     [EBADE]		= TARGET_EBADE,
825     [EBADR]		= TARGET_EBADR,
826     [EXFULL]		= TARGET_EXFULL,
827     [ENOANO]		= TARGET_ENOANO,
828     [EBADRQC]		= TARGET_EBADRQC,
829     [EBADSLT]		= TARGET_EBADSLT,
830     [EBFONT]		= TARGET_EBFONT,
831     [ENOSTR]		= TARGET_ENOSTR,
832     [ENODATA]		= TARGET_ENODATA,
833     [ETIME]		= TARGET_ETIME,
834     [ENOSR]		= TARGET_ENOSR,
835     [ENONET]		= TARGET_ENONET,
836     [ENOPKG]		= TARGET_ENOPKG,
837     [EREMOTE]		= TARGET_EREMOTE,
838     [ENOLINK]		= TARGET_ENOLINK,
839     [EADV]		= TARGET_EADV,
840     [ESRMNT]		= TARGET_ESRMNT,
841     [ECOMM]		= TARGET_ECOMM,
842     [EPROTO]		= TARGET_EPROTO,
843     [EDOTDOT]		= TARGET_EDOTDOT,
844     [EMULTIHOP]		= TARGET_EMULTIHOP,
845     [EBADMSG]		= TARGET_EBADMSG,
846     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
847     [EOVERFLOW]		= TARGET_EOVERFLOW,
848     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
849     [EBADFD]		= TARGET_EBADFD,
850     [EREMCHG]		= TARGET_EREMCHG,
851     [ELIBACC]		= TARGET_ELIBACC,
852     [ELIBBAD]		= TARGET_ELIBBAD,
853     [ELIBSCN]		= TARGET_ELIBSCN,
854     [ELIBMAX]		= TARGET_ELIBMAX,
855     [ELIBEXEC]		= TARGET_ELIBEXEC,
856     [EILSEQ]		= TARGET_EILSEQ,
857     [ENOSYS]		= TARGET_ENOSYS,
858     [ELOOP]		= TARGET_ELOOP,
859     [ERESTART]		= TARGET_ERESTART,
860     [ESTRPIPE]		= TARGET_ESTRPIPE,
861     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
862     [EUSERS]		= TARGET_EUSERS,
863     [ENOTSOCK]		= TARGET_ENOTSOCK,
864     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
865     [EMSGSIZE]		= TARGET_EMSGSIZE,
866     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
867     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
868     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
869     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
870     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
871     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
872     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
873     [EADDRINUSE]	= TARGET_EADDRINUSE,
874     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
875     [ENETDOWN]		= TARGET_ENETDOWN,
876     [ENETUNREACH]	= TARGET_ENETUNREACH,
877     [ENETRESET]		= TARGET_ENETRESET,
878     [ECONNABORTED]	= TARGET_ECONNABORTED,
879     [ECONNRESET]	= TARGET_ECONNRESET,
880     [ENOBUFS]		= TARGET_ENOBUFS,
881     [EISCONN]		= TARGET_EISCONN,
882     [ENOTCONN]		= TARGET_ENOTCONN,
883     [EUCLEAN]		= TARGET_EUCLEAN,
884     [ENOTNAM]		= TARGET_ENOTNAM,
885     [ENAVAIL]		= TARGET_ENAVAIL,
886     [EISNAM]		= TARGET_EISNAM,
887     [EREMOTEIO]		= TARGET_EREMOTEIO,
888     [EDQUOT]            = TARGET_EDQUOT,
889     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
890     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
891     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
892     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
893     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
894     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
895     [EALREADY]		= TARGET_EALREADY,
896     [EINPROGRESS]	= TARGET_EINPROGRESS,
897     [ESTALE]		= TARGET_ESTALE,
898     [ECANCELED]		= TARGET_ECANCELED,
899     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
900     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
901 #ifdef ENOKEY
902     [ENOKEY]		= TARGET_ENOKEY,
903 #endif
904 #ifdef EKEYEXPIRED
905     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
906 #endif
907 #ifdef EKEYREVOKED
908     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
909 #endif
910 #ifdef EKEYREJECTED
911     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
912 #endif
913 #ifdef EOWNERDEAD
914     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
915 #endif
916 #ifdef ENOTRECOVERABLE
917     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
918 #endif
919 #ifdef ENOMSG
920     [ENOMSG]            = TARGET_ENOMSG,
921 #endif
922 #ifdef ERKFILL
923     [ERFKILL]           = TARGET_ERFKILL,
924 #endif
925 #ifdef EHWPOISON
926     [EHWPOISON]         = TARGET_EHWPOISON,
927 #endif
928 };
929 
930 static inline int host_to_target_errno(int err)
931 {
932     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
933         host_to_target_errno_table[err]) {
934         return host_to_target_errno_table[err];
935     }
936     return err;
937 }
938 
939 static inline int target_to_host_errno(int err)
940 {
941     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
942         target_to_host_errno_table[err]) {
943         return target_to_host_errno_table[err];
944     }
945     return err;
946 }
947 
948 static inline abi_long get_errno(abi_long ret)
949 {
950     if (ret == -1)
951         return -host_to_target_errno(errno);
952     else
953         return ret;
954 }
955 
956 const char *target_strerror(int err)
957 {
958     if (err == TARGET_ERESTARTSYS) {
959         return "To be restarted";
960     }
961     if (err == TARGET_QEMU_ESIGRETURN) {
962         return "Successful exit from sigreturn";
963     }
964 
965     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
966         return NULL;
967     }
968     return strerror(target_to_host_errno(err));
969 }
970 
971 #define safe_syscall0(type, name) \
972 static type safe_##name(void) \
973 { \
974     return safe_syscall(__NR_##name); \
975 }
976 
977 #define safe_syscall1(type, name, type1, arg1) \
978 static type safe_##name(type1 arg1) \
979 { \
980     return safe_syscall(__NR_##name, arg1); \
981 }
982 
983 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
984 static type safe_##name(type1 arg1, type2 arg2) \
985 { \
986     return safe_syscall(__NR_##name, arg1, arg2); \
987 }
988 
989 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
990 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
991 { \
992     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
993 }
994 
995 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
996     type4, arg4) \
997 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
998 { \
999     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
1000 }
1001 
1002 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
1003     type4, arg4, type5, arg5) \
1004 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1005     type5 arg5) \
1006 { \
1007     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
1008 }
1009 
1010 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
1011     type4, arg4, type5, arg5, type6, arg6) \
1012 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1013     type5 arg5, type6 arg6) \
1014 { \
1015     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
1016 }
1017 
1018 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
1019 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
1020 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
1021               int, flags, mode_t, mode)
1022 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
1023               struct rusage *, rusage)
1024 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
1025               int, options, struct rusage *, rusage)
1026 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
1027 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
1028               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
1029 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
1030               struct timespec *, tsp, const sigset_t *, sigmask,
1031               size_t, sigsetsize)
1032 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
1033               int, maxevents, int, timeout, const sigset_t *, sigmask,
1034               size_t, sigsetsize)
1035 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
1036               const struct timespec *,timeout,int *,uaddr2,int,val3)
1037 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
1038 safe_syscall2(int, kill, pid_t, pid, int, sig)
1039 safe_syscall2(int, tkill, int, tid, int, sig)
1040 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
1041 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
1042 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
1043 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
1044               unsigned long, pos_l, unsigned long, pos_h)
1045 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
1046               unsigned long, pos_l, unsigned long, pos_h)
1047 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1048               socklen_t, addrlen)
1049 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1050               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1051 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1052               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1053 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1054 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1055 safe_syscall2(int, flock, int, fd, int, operation)
1056 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1057               const struct timespec *, uts, size_t, sigsetsize)
1058 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1059               int, flags)
1060 safe_syscall2(int, nanosleep, const struct timespec *, req,
1061               struct timespec *, rem)
1062 #ifdef TARGET_NR_clock_nanosleep
1063 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1064               const struct timespec *, req, struct timespec *, rem)
1065 #endif
1066 #ifdef __NR_msgsnd
1067 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1068               int, flags)
1069 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1070               long, msgtype, int, flags)
1071 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1072               unsigned, nsops, const struct timespec *, timeout)
1073 #else
1074 /* This host kernel architecture uses a single ipc syscall; fake up
1075  * wrappers for the sub-operations to hide this implementation detail.
1076  * Annoyingly we can't include linux/ipc.h to get the constant definitions
1077  * for the call parameter because some structs in there conflict with the
1078  * sys/ipc.h ones. So we just define them here, and rely on them being
1079  * the same for all host architectures.
1080  */
1081 #define Q_SEMTIMEDOP 4
1082 #define Q_MSGSND 11
1083 #define Q_MSGRCV 12
1084 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1085 
1086 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1087               void *, ptr, long, fifth)
1088 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1089 {
1090     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1091 }
1092 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1093 {
1094     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1095 }
1096 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1097                            const struct timespec *timeout)
1098 {
1099     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1100                     (long)timeout);
1101 }
1102 #endif
1103 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1104 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1105               size_t, len, unsigned, prio, const struct timespec *, timeout)
1106 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1107               size_t, len, unsigned *, prio, const struct timespec *, timeout)
1108 #endif
1109 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1110  * "third argument might be integer or pointer or not present" behaviour of
1111  * the libc function.
1112  */
1113 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1114 /* Similarly for fcntl. Note that callers must always:
1115  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1116  *  use the flock64 struct rather than unsuffixed flock
1117  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1118  */
1119 #ifdef __NR_fcntl64
1120 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1121 #else
1122 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1123 #endif
1124 
1125 static inline int host_to_target_sock_type(int host_type)
1126 {
1127     int target_type;
1128 
1129     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1130     case SOCK_DGRAM:
1131         target_type = TARGET_SOCK_DGRAM;
1132         break;
1133     case SOCK_STREAM:
1134         target_type = TARGET_SOCK_STREAM;
1135         break;
1136     default:
1137         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1138         break;
1139     }
1140 
1141 #if defined(SOCK_CLOEXEC)
1142     if (host_type & SOCK_CLOEXEC) {
1143         target_type |= TARGET_SOCK_CLOEXEC;
1144     }
1145 #endif
1146 
1147 #if defined(SOCK_NONBLOCK)
1148     if (host_type & SOCK_NONBLOCK) {
1149         target_type |= TARGET_SOCK_NONBLOCK;
1150     }
1151 #endif
1152 
1153     return target_type;
1154 }
1155 
1156 static abi_ulong target_brk;
1157 static abi_ulong target_original_brk;
1158 static abi_ulong brk_page;
1159 
1160 void target_set_brk(abi_ulong new_brk)
1161 {
1162     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1163     brk_page = HOST_PAGE_ALIGN(target_brk);
1164 }
1165 
1166 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1167 #define DEBUGF_BRK(message, args...)
1168 
1169 /* do_brk() must return target values and target errnos. */
1170 abi_long do_brk(abi_ulong new_brk)
1171 {
1172     abi_long mapped_addr;
1173     abi_ulong new_alloc_size;
1174 
1175     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1176 
1177     if (!new_brk) {
1178         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1179         return target_brk;
1180     }
1181     if (new_brk < target_original_brk) {
1182         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1183                    target_brk);
1184         return target_brk;
1185     }
1186 
1187     /* If the new brk is less than the highest page reserved to the
1188      * target heap allocation, set it and we're almost done...  */
1189     if (new_brk <= brk_page) {
1190         /* Heap contents are initialized to zero, as for anonymous
1191          * mapped pages.  */
1192         if (new_brk > target_brk) {
1193             memset(g2h(target_brk), 0, new_brk - target_brk);
1194         }
1195 	target_brk = new_brk;
1196         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1197     	return target_brk;
1198     }
1199 
1200     /* We need to allocate more memory after the brk... Note that
1201      * we don't use MAP_FIXED because that will map over the top of
1202      * any existing mapping (like the one with the host libc or qemu
1203      * itself); instead we treat "mapped but at wrong address" as
1204      * a failure and unmap again.
1205      */
1206     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1207     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1208                                         PROT_READ|PROT_WRITE,
1209                                         MAP_ANON|MAP_PRIVATE, 0, 0));
1210 
1211     if (mapped_addr == brk_page) {
1212         /* Heap contents are initialized to zero, as for anonymous
1213          * mapped pages.  Technically the new pages are already
1214          * initialized to zero since they *are* anonymous mapped
1215          * pages, however we have to take care with the contents that
1216          * come from the remaining part of the previous page: it may
1217          * contains garbage data due to a previous heap usage (grown
1218          * then shrunken).  */
1219         memset(g2h(target_brk), 0, brk_page - target_brk);
1220 
1221         target_brk = new_brk;
1222         brk_page = HOST_PAGE_ALIGN(target_brk);
1223         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1224             target_brk);
1225         return target_brk;
1226     } else if (mapped_addr != -1) {
1227         /* Mapped but at wrong address, meaning there wasn't actually
1228          * enough space for this brk.
1229          */
1230         target_munmap(mapped_addr, new_alloc_size);
1231         mapped_addr = -1;
1232         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1233     }
1234     else {
1235         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1236     }
1237 
1238 #if defined(TARGET_ALPHA)
1239     /* We (partially) emulate OSF/1 on Alpha, which requires we
1240        return a proper errno, not an unchanged brk value.  */
1241     return -TARGET_ENOMEM;
1242 #endif
1243     /* For everything else, return the previous break. */
1244     return target_brk;
1245 }
1246 
1247 static inline abi_long copy_from_user_fdset(fd_set *fds,
1248                                             abi_ulong target_fds_addr,
1249                                             int n)
1250 {
1251     int i, nw, j, k;
1252     abi_ulong b, *target_fds;
1253 
1254     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255     if (!(target_fds = lock_user(VERIFY_READ,
1256                                  target_fds_addr,
1257                                  sizeof(abi_ulong) * nw,
1258                                  1)))
1259         return -TARGET_EFAULT;
1260 
1261     FD_ZERO(fds);
1262     k = 0;
1263     for (i = 0; i < nw; i++) {
1264         /* grab the abi_ulong */
1265         __get_user(b, &target_fds[i]);
1266         for (j = 0; j < TARGET_ABI_BITS; j++) {
1267             /* check the bit inside the abi_ulong */
1268             if ((b >> j) & 1)
1269                 FD_SET(k, fds);
1270             k++;
1271         }
1272     }
1273 
1274     unlock_user(target_fds, target_fds_addr, 0);
1275 
1276     return 0;
1277 }
1278 
1279 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1280                                                  abi_ulong target_fds_addr,
1281                                                  int n)
1282 {
1283     if (target_fds_addr) {
1284         if (copy_from_user_fdset(fds, target_fds_addr, n))
1285             return -TARGET_EFAULT;
1286         *fds_ptr = fds;
1287     } else {
1288         *fds_ptr = NULL;
1289     }
1290     return 0;
1291 }
1292 
1293 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1294                                           const fd_set *fds,
1295                                           int n)
1296 {
1297     int i, nw, j, k;
1298     abi_long v;
1299     abi_ulong *target_fds;
1300 
1301     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1302     if (!(target_fds = lock_user(VERIFY_WRITE,
1303                                  target_fds_addr,
1304                                  sizeof(abi_ulong) * nw,
1305                                  0)))
1306         return -TARGET_EFAULT;
1307 
1308     k = 0;
1309     for (i = 0; i < nw; i++) {
1310         v = 0;
1311         for (j = 0; j < TARGET_ABI_BITS; j++) {
1312             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1313             k++;
1314         }
1315         __put_user(v, &target_fds[i]);
1316     }
1317 
1318     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1319 
1320     return 0;
1321 }
1322 
1323 #if defined(__alpha__)
1324 #define HOST_HZ 1024
1325 #else
1326 #define HOST_HZ 100
1327 #endif
1328 
1329 static inline abi_long host_to_target_clock_t(long ticks)
1330 {
1331 #if HOST_HZ == TARGET_HZ
1332     return ticks;
1333 #else
1334     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1335 #endif
1336 }
1337 
1338 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1339                                              const struct rusage *rusage)
1340 {
1341     struct target_rusage *target_rusage;
1342 
1343     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1344         return -TARGET_EFAULT;
1345     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1346     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1347     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1348     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1349     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1350     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1351     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1352     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1353     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1354     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1355     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1356     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1357     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1358     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1359     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1360     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1361     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1362     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1363     unlock_user_struct(target_rusage, target_addr, 1);
1364 
1365     return 0;
1366 }
1367 
1368 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1369 {
1370     abi_ulong target_rlim_swap;
1371     rlim_t result;
1372 
1373     target_rlim_swap = tswapal(target_rlim);
1374     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1375         return RLIM_INFINITY;
1376 
1377     result = target_rlim_swap;
1378     if (target_rlim_swap != (rlim_t)result)
1379         return RLIM_INFINITY;
1380 
1381     return result;
1382 }
1383 
1384 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1385 {
1386     abi_ulong target_rlim_swap;
1387     abi_ulong result;
1388 
1389     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1390         target_rlim_swap = TARGET_RLIM_INFINITY;
1391     else
1392         target_rlim_swap = rlim;
1393     result = tswapal(target_rlim_swap);
1394 
1395     return result;
1396 }
1397 
1398 static inline int target_to_host_resource(int code)
1399 {
1400     switch (code) {
1401     case TARGET_RLIMIT_AS:
1402         return RLIMIT_AS;
1403     case TARGET_RLIMIT_CORE:
1404         return RLIMIT_CORE;
1405     case TARGET_RLIMIT_CPU:
1406         return RLIMIT_CPU;
1407     case TARGET_RLIMIT_DATA:
1408         return RLIMIT_DATA;
1409     case TARGET_RLIMIT_FSIZE:
1410         return RLIMIT_FSIZE;
1411     case TARGET_RLIMIT_LOCKS:
1412         return RLIMIT_LOCKS;
1413     case TARGET_RLIMIT_MEMLOCK:
1414         return RLIMIT_MEMLOCK;
1415     case TARGET_RLIMIT_MSGQUEUE:
1416         return RLIMIT_MSGQUEUE;
1417     case TARGET_RLIMIT_NICE:
1418         return RLIMIT_NICE;
1419     case TARGET_RLIMIT_NOFILE:
1420         return RLIMIT_NOFILE;
1421     case TARGET_RLIMIT_NPROC:
1422         return RLIMIT_NPROC;
1423     case TARGET_RLIMIT_RSS:
1424         return RLIMIT_RSS;
1425     case TARGET_RLIMIT_RTPRIO:
1426         return RLIMIT_RTPRIO;
1427     case TARGET_RLIMIT_SIGPENDING:
1428         return RLIMIT_SIGPENDING;
1429     case TARGET_RLIMIT_STACK:
1430         return RLIMIT_STACK;
1431     default:
1432         return code;
1433     }
1434 }
1435 
1436 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1437                                               abi_ulong target_tv_addr)
1438 {
1439     struct target_timeval *target_tv;
1440 
1441     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1442         return -TARGET_EFAULT;
1443 
1444     __get_user(tv->tv_sec, &target_tv->tv_sec);
1445     __get_user(tv->tv_usec, &target_tv->tv_usec);
1446 
1447     unlock_user_struct(target_tv, target_tv_addr, 0);
1448 
1449     return 0;
1450 }
1451 
1452 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1453                                             const struct timeval *tv)
1454 {
1455     struct target_timeval *target_tv;
1456 
1457     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1458         return -TARGET_EFAULT;
1459 
1460     __put_user(tv->tv_sec, &target_tv->tv_sec);
1461     __put_user(tv->tv_usec, &target_tv->tv_usec);
1462 
1463     unlock_user_struct(target_tv, target_tv_addr, 1);
1464 
1465     return 0;
1466 }
1467 
1468 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1469                                                abi_ulong target_tz_addr)
1470 {
1471     struct target_timezone *target_tz;
1472 
1473     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1474         return -TARGET_EFAULT;
1475     }
1476 
1477     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1478     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1479 
1480     unlock_user_struct(target_tz, target_tz_addr, 0);
1481 
1482     return 0;
1483 }
1484 
1485 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1486 #include <mqueue.h>
1487 
1488 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1489                                               abi_ulong target_mq_attr_addr)
1490 {
1491     struct target_mq_attr *target_mq_attr;
1492 
1493     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1494                           target_mq_attr_addr, 1))
1495         return -TARGET_EFAULT;
1496 
1497     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1498     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1499     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1500     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1501 
1502     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1503 
1504     return 0;
1505 }
1506 
1507 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1508                                             const struct mq_attr *attr)
1509 {
1510     struct target_mq_attr *target_mq_attr;
1511 
1512     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1513                           target_mq_attr_addr, 0))
1514         return -TARGET_EFAULT;
1515 
1516     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1517     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1518     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1519     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1520 
1521     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1522 
1523     return 0;
1524 }
1525 #endif
1526 
1527 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1528 /* do_select() must return target values and target errnos. */
1529 static abi_long do_select(int n,
1530                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1531                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1532 {
1533     fd_set rfds, wfds, efds;
1534     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1535     struct timeval tv;
1536     struct timespec ts, *ts_ptr;
1537     abi_long ret;
1538 
1539     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1540     if (ret) {
1541         return ret;
1542     }
1543     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1544     if (ret) {
1545         return ret;
1546     }
1547     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1548     if (ret) {
1549         return ret;
1550     }
1551 
1552     if (target_tv_addr) {
1553         if (copy_from_user_timeval(&tv, target_tv_addr))
1554             return -TARGET_EFAULT;
1555         ts.tv_sec = tv.tv_sec;
1556         ts.tv_nsec = tv.tv_usec * 1000;
1557         ts_ptr = &ts;
1558     } else {
1559         ts_ptr = NULL;
1560     }
1561 
1562     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1563                                   ts_ptr, NULL));
1564 
1565     if (!is_error(ret)) {
1566         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1567             return -TARGET_EFAULT;
1568         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1569             return -TARGET_EFAULT;
1570         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1571             return -TARGET_EFAULT;
1572 
1573         if (target_tv_addr) {
1574             tv.tv_sec = ts.tv_sec;
1575             tv.tv_usec = ts.tv_nsec / 1000;
1576             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1577                 return -TARGET_EFAULT;
1578             }
1579         }
1580     }
1581 
1582     return ret;
1583 }
1584 
1585 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1586 static abi_long do_old_select(abi_ulong arg1)
1587 {
1588     struct target_sel_arg_struct *sel;
1589     abi_ulong inp, outp, exp, tvp;
1590     long nsel;
1591 
1592     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1593         return -TARGET_EFAULT;
1594     }
1595 
1596     nsel = tswapal(sel->n);
1597     inp = tswapal(sel->inp);
1598     outp = tswapal(sel->outp);
1599     exp = tswapal(sel->exp);
1600     tvp = tswapal(sel->tvp);
1601 
1602     unlock_user_struct(sel, arg1, 0);
1603 
1604     return do_select(nsel, inp, outp, exp, tvp);
1605 }
1606 #endif
1607 #endif
1608 
1609 static abi_long do_pipe2(int host_pipe[], int flags)
1610 {
1611 #ifdef CONFIG_PIPE2
1612     return pipe2(host_pipe, flags);
1613 #else
1614     return -ENOSYS;
1615 #endif
1616 }
1617 
1618 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1619                         int flags, int is_pipe2)
1620 {
1621     int host_pipe[2];
1622     abi_long ret;
1623     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1624 
1625     if (is_error(ret))
1626         return get_errno(ret);
1627 
1628     /* Several targets have special calling conventions for the original
1629        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1630     if (!is_pipe2) {
1631 #if defined(TARGET_ALPHA)
1632         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1633         return host_pipe[0];
1634 #elif defined(TARGET_MIPS)
1635         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_SH4)
1638         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1639         return host_pipe[0];
1640 #elif defined(TARGET_SPARC)
1641         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1642         return host_pipe[0];
1643 #endif
1644     }
1645 
1646     if (put_user_s32(host_pipe[0], pipedes)
1647         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1648         return -TARGET_EFAULT;
1649     return get_errno(ret);
1650 }
1651 
1652 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1653                                               abi_ulong target_addr,
1654                                               socklen_t len)
1655 {
1656     struct target_ip_mreqn *target_smreqn;
1657 
1658     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1659     if (!target_smreqn)
1660         return -TARGET_EFAULT;
1661     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1662     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1663     if (len == sizeof(struct target_ip_mreqn))
1664         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1665     unlock_user(target_smreqn, target_addr, 0);
1666 
1667     return 0;
1668 }
1669 
1670 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1671                                                abi_ulong target_addr,
1672                                                socklen_t len)
1673 {
1674     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1675     sa_family_t sa_family;
1676     struct target_sockaddr *target_saddr;
1677 
1678     if (fd_trans_target_to_host_addr(fd)) {
1679         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1680     }
1681 
1682     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1683     if (!target_saddr)
1684         return -TARGET_EFAULT;
1685 
1686     sa_family = tswap16(target_saddr->sa_family);
1687 
1688     /* Oops. The caller might send a incomplete sun_path; sun_path
1689      * must be terminated by \0 (see the manual page), but
1690      * unfortunately it is quite common to specify sockaddr_un
1691      * length as "strlen(x->sun_path)" while it should be
1692      * "strlen(...) + 1". We'll fix that here if needed.
1693      * Linux kernel has a similar feature.
1694      */
1695 
1696     if (sa_family == AF_UNIX) {
1697         if (len < unix_maxlen && len > 0) {
1698             char *cp = (char*)target_saddr;
1699 
1700             if ( cp[len-1] && !cp[len] )
1701                 len++;
1702         }
1703         if (len > unix_maxlen)
1704             len = unix_maxlen;
1705     }
1706 
1707     memcpy(addr, target_saddr, len);
1708     addr->sa_family = sa_family;
1709     if (sa_family == AF_NETLINK) {
1710         struct sockaddr_nl *nladdr;
1711 
1712         nladdr = (struct sockaddr_nl *)addr;
1713         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1714         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1715     } else if (sa_family == AF_PACKET) {
1716 	struct target_sockaddr_ll *lladdr;
1717 
1718 	lladdr = (struct target_sockaddr_ll *)addr;
1719 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1720 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1721     }
1722     unlock_user(target_saddr, target_addr, 0);
1723 
1724     return 0;
1725 }
1726 
1727 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1728                                                struct sockaddr *addr,
1729                                                socklen_t len)
1730 {
1731     struct target_sockaddr *target_saddr;
1732 
1733     if (len == 0) {
1734         return 0;
1735     }
1736     assert(addr);
1737 
1738     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1739     if (!target_saddr)
1740         return -TARGET_EFAULT;
1741     memcpy(target_saddr, addr, len);
1742     if (len >= offsetof(struct target_sockaddr, sa_family) +
1743         sizeof(target_saddr->sa_family)) {
1744         target_saddr->sa_family = tswap16(addr->sa_family);
1745     }
1746     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1747         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1748         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1749         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1750     } else if (addr->sa_family == AF_PACKET) {
1751         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1752         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1753         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1754     } else if (addr->sa_family == AF_INET6 &&
1755                len >= sizeof(struct target_sockaddr_in6)) {
1756         struct target_sockaddr_in6 *target_in6 =
1757                (struct target_sockaddr_in6 *)target_saddr;
1758         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1759     }
1760     unlock_user(target_saddr, target_addr, len);
1761 
1762     return 0;
1763 }
1764 
1765 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1766                                            struct target_msghdr *target_msgh)
1767 {
1768     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1769     abi_long msg_controllen;
1770     abi_ulong target_cmsg_addr;
1771     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1772     socklen_t space = 0;
1773 
1774     msg_controllen = tswapal(target_msgh->msg_controllen);
1775     if (msg_controllen < sizeof (struct target_cmsghdr))
1776         goto the_end;
1777     target_cmsg_addr = tswapal(target_msgh->msg_control);
1778     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1779     target_cmsg_start = target_cmsg;
1780     if (!target_cmsg)
1781         return -TARGET_EFAULT;
1782 
1783     while (cmsg && target_cmsg) {
1784         void *data = CMSG_DATA(cmsg);
1785         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1786 
1787         int len = tswapal(target_cmsg->cmsg_len)
1788             - sizeof(struct target_cmsghdr);
1789 
1790         space += CMSG_SPACE(len);
1791         if (space > msgh->msg_controllen) {
1792             space -= CMSG_SPACE(len);
1793             /* This is a QEMU bug, since we allocated the payload
1794              * area ourselves (unlike overflow in host-to-target
1795              * conversion, which is just the guest giving us a buffer
1796              * that's too small). It can't happen for the payload types
1797              * we currently support; if it becomes an issue in future
1798              * we would need to improve our allocation strategy to
1799              * something more intelligent than "twice the size of the
1800              * target buffer we're reading from".
1801              */
1802             gemu_log("Host cmsg overflow\n");
1803             break;
1804         }
1805 
1806         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1807             cmsg->cmsg_level = SOL_SOCKET;
1808         } else {
1809             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1810         }
1811         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1812         cmsg->cmsg_len = CMSG_LEN(len);
1813 
1814         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1815             int *fd = (int *)data;
1816             int *target_fd = (int *)target_data;
1817             int i, numfds = len / sizeof(int);
1818 
1819             for (i = 0; i < numfds; i++) {
1820                 __get_user(fd[i], target_fd + i);
1821             }
1822         } else if (cmsg->cmsg_level == SOL_SOCKET
1823                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1824             struct ucred *cred = (struct ucred *)data;
1825             struct target_ucred *target_cred =
1826                 (struct target_ucred *)target_data;
1827 
1828             __get_user(cred->pid, &target_cred->pid);
1829             __get_user(cred->uid, &target_cred->uid);
1830             __get_user(cred->gid, &target_cred->gid);
1831         } else {
1832             gemu_log("Unsupported ancillary data: %d/%d\n",
1833                                         cmsg->cmsg_level, cmsg->cmsg_type);
1834             memcpy(data, target_data, len);
1835         }
1836 
1837         cmsg = CMSG_NXTHDR(msgh, cmsg);
1838         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1839                                          target_cmsg_start);
1840     }
1841     unlock_user(target_cmsg, target_cmsg_addr, 0);
1842  the_end:
1843     msgh->msg_controllen = space;
1844     return 0;
1845 }
1846 
1847 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1848                                            struct msghdr *msgh)
1849 {
1850     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1851     abi_long msg_controllen;
1852     abi_ulong target_cmsg_addr;
1853     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1854     socklen_t space = 0;
1855 
1856     msg_controllen = tswapal(target_msgh->msg_controllen);
1857     if (msg_controllen < sizeof (struct target_cmsghdr))
1858         goto the_end;
1859     target_cmsg_addr = tswapal(target_msgh->msg_control);
1860     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1861     target_cmsg_start = target_cmsg;
1862     if (!target_cmsg)
1863         return -TARGET_EFAULT;
1864 
1865     while (cmsg && target_cmsg) {
1866         void *data = CMSG_DATA(cmsg);
1867         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1868 
1869         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1870         int tgt_len, tgt_space;
1871 
1872         /* We never copy a half-header but may copy half-data;
1873          * this is Linux's behaviour in put_cmsg(). Note that
1874          * truncation here is a guest problem (which we report
1875          * to the guest via the CTRUNC bit), unlike truncation
1876          * in target_to_host_cmsg, which is a QEMU bug.
1877          */
1878         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1879             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1880             break;
1881         }
1882 
1883         if (cmsg->cmsg_level == SOL_SOCKET) {
1884             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1885         } else {
1886             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1887         }
1888         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1889 
1890         /* Payload types which need a different size of payload on
1891          * the target must adjust tgt_len here.
1892          */
1893         tgt_len = len;
1894         switch (cmsg->cmsg_level) {
1895         case SOL_SOCKET:
1896             switch (cmsg->cmsg_type) {
1897             case SO_TIMESTAMP:
1898                 tgt_len = sizeof(struct target_timeval);
1899                 break;
1900             default:
1901                 break;
1902             }
1903             break;
1904         default:
1905             break;
1906         }
1907 
1908         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1909             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1910             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1911         }
1912 
1913         /* We must now copy-and-convert len bytes of payload
1914          * into tgt_len bytes of destination space. Bear in mind
1915          * that in both source and destination we may be dealing
1916          * with a truncated value!
1917          */
1918         switch (cmsg->cmsg_level) {
1919         case SOL_SOCKET:
1920             switch (cmsg->cmsg_type) {
1921             case SCM_RIGHTS:
1922             {
1923                 int *fd = (int *)data;
1924                 int *target_fd = (int *)target_data;
1925                 int i, numfds = tgt_len / sizeof(int);
1926 
1927                 for (i = 0; i < numfds; i++) {
1928                     __put_user(fd[i], target_fd + i);
1929                 }
1930                 break;
1931             }
1932             case SO_TIMESTAMP:
1933             {
1934                 struct timeval *tv = (struct timeval *)data;
1935                 struct target_timeval *target_tv =
1936                     (struct target_timeval *)target_data;
1937 
1938                 if (len != sizeof(struct timeval) ||
1939                     tgt_len != sizeof(struct target_timeval)) {
1940                     goto unimplemented;
1941                 }
1942 
1943                 /* copy struct timeval to target */
1944                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1945                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1946                 break;
1947             }
1948             case SCM_CREDENTIALS:
1949             {
1950                 struct ucred *cred = (struct ucred *)data;
1951                 struct target_ucred *target_cred =
1952                     (struct target_ucred *)target_data;
1953 
1954                 __put_user(cred->pid, &target_cred->pid);
1955                 __put_user(cred->uid, &target_cred->uid);
1956                 __put_user(cred->gid, &target_cred->gid);
1957                 break;
1958             }
1959             default:
1960                 goto unimplemented;
1961             }
1962             break;
1963 
1964         case SOL_IP:
1965             switch (cmsg->cmsg_type) {
1966             case IP_TTL:
1967             {
1968                 uint32_t *v = (uint32_t *)data;
1969                 uint32_t *t_int = (uint32_t *)target_data;
1970 
1971                 if (len != sizeof(uint32_t) ||
1972                     tgt_len != sizeof(uint32_t)) {
1973                     goto unimplemented;
1974                 }
1975                 __put_user(*v, t_int);
1976                 break;
1977             }
1978             case IP_RECVERR:
1979             {
1980                 struct errhdr_t {
1981                    struct sock_extended_err ee;
1982                    struct sockaddr_in offender;
1983                 };
1984                 struct errhdr_t *errh = (struct errhdr_t *)data;
1985                 struct errhdr_t *target_errh =
1986                     (struct errhdr_t *)target_data;
1987 
1988                 if (len != sizeof(struct errhdr_t) ||
1989                     tgt_len != sizeof(struct errhdr_t)) {
1990                     goto unimplemented;
1991                 }
1992                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1993                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1994                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1995                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1996                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1997                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1998                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1999                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2000                     (void *) &errh->offender, sizeof(errh->offender));
2001                 break;
2002             }
2003             default:
2004                 goto unimplemented;
2005             }
2006             break;
2007 
2008         case SOL_IPV6:
2009             switch (cmsg->cmsg_type) {
2010             case IPV6_HOPLIMIT:
2011             {
2012                 uint32_t *v = (uint32_t *)data;
2013                 uint32_t *t_int = (uint32_t *)target_data;
2014 
2015                 if (len != sizeof(uint32_t) ||
2016                     tgt_len != sizeof(uint32_t)) {
2017                     goto unimplemented;
2018                 }
2019                 __put_user(*v, t_int);
2020                 break;
2021             }
2022             case IPV6_RECVERR:
2023             {
2024                 struct errhdr6_t {
2025                    struct sock_extended_err ee;
2026                    struct sockaddr_in6 offender;
2027                 };
2028                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2029                 struct errhdr6_t *target_errh =
2030                     (struct errhdr6_t *)target_data;
2031 
2032                 if (len != sizeof(struct errhdr6_t) ||
2033                     tgt_len != sizeof(struct errhdr6_t)) {
2034                     goto unimplemented;
2035                 }
2036                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2037                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2038                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2039                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2040                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2041                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2042                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2043                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2044                     (void *) &errh->offender, sizeof(errh->offender));
2045                 break;
2046             }
2047             default:
2048                 goto unimplemented;
2049             }
2050             break;
2051 
2052         default:
2053         unimplemented:
2054             gemu_log("Unsupported ancillary data: %d/%d\n",
2055                                         cmsg->cmsg_level, cmsg->cmsg_type);
2056             memcpy(target_data, data, MIN(len, tgt_len));
2057             if (tgt_len > len) {
2058                 memset(target_data + len, 0, tgt_len - len);
2059             }
2060         }
2061 
2062         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2063         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2064         if (msg_controllen < tgt_space) {
2065             tgt_space = msg_controllen;
2066         }
2067         msg_controllen -= tgt_space;
2068         space += tgt_space;
2069         cmsg = CMSG_NXTHDR(msgh, cmsg);
2070         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2071                                          target_cmsg_start);
2072     }
2073     unlock_user(target_cmsg, target_cmsg_addr, space);
2074  the_end:
2075     target_msgh->msg_controllen = tswapal(space);
2076     return 0;
2077 }
2078 
2079 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2080 {
2081     nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2082     nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2083     nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2084     nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2085     nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2086 }
2087 
2088 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2089                                               size_t len,
2090                                               abi_long (*host_to_target_nlmsg)
2091                                                        (struct nlmsghdr *))
2092 {
2093     uint32_t nlmsg_len;
2094     abi_long ret;
2095 
2096     while (len > sizeof(struct nlmsghdr)) {
2097 
2098         nlmsg_len = nlh->nlmsg_len;
2099         if (nlmsg_len < sizeof(struct nlmsghdr) ||
2100             nlmsg_len > len) {
2101             break;
2102         }
2103 
2104         switch (nlh->nlmsg_type) {
2105         case NLMSG_DONE:
2106             tswap_nlmsghdr(nlh);
2107             return 0;
2108         case NLMSG_NOOP:
2109             break;
2110         case NLMSG_ERROR:
2111         {
2112             struct nlmsgerr *e = NLMSG_DATA(nlh);
2113             e->error = tswap32(e->error);
2114             tswap_nlmsghdr(&e->msg);
2115             tswap_nlmsghdr(nlh);
2116             return 0;
2117         }
2118         default:
2119             ret = host_to_target_nlmsg(nlh);
2120             if (ret < 0) {
2121                 tswap_nlmsghdr(nlh);
2122                 return ret;
2123             }
2124             break;
2125         }
2126         tswap_nlmsghdr(nlh);
2127         len -= NLMSG_ALIGN(nlmsg_len);
2128         nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2129     }
2130     return 0;
2131 }
2132 
2133 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2134                                               size_t len,
2135                                               abi_long (*target_to_host_nlmsg)
2136                                                        (struct nlmsghdr *))
2137 {
2138     int ret;
2139 
2140     while (len > sizeof(struct nlmsghdr)) {
2141         if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2142             tswap32(nlh->nlmsg_len) > len) {
2143             break;
2144         }
2145         tswap_nlmsghdr(nlh);
2146         switch (nlh->nlmsg_type) {
2147         case NLMSG_DONE:
2148             return 0;
2149         case NLMSG_NOOP:
2150             break;
2151         case NLMSG_ERROR:
2152         {
2153             struct nlmsgerr *e = NLMSG_DATA(nlh);
2154             e->error = tswap32(e->error);
2155             tswap_nlmsghdr(&e->msg);
2156             return 0;
2157         }
2158         default:
2159             ret = target_to_host_nlmsg(nlh);
2160             if (ret < 0) {
2161                 return ret;
2162             }
2163         }
2164         len -= NLMSG_ALIGN(nlh->nlmsg_len);
2165         nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2166     }
2167     return 0;
2168 }
2169 
2170 #ifdef CONFIG_RTNETLINK
2171 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2172                                                size_t len, void *context,
2173                                                abi_long (*host_to_target_nlattr)
2174                                                         (struct nlattr *,
2175                                                          void *context))
2176 {
2177     unsigned short nla_len;
2178     abi_long ret;
2179 
2180     while (len > sizeof(struct nlattr)) {
2181         nla_len = nlattr->nla_len;
2182         if (nla_len < sizeof(struct nlattr) ||
2183             nla_len > len) {
2184             break;
2185         }
2186         ret = host_to_target_nlattr(nlattr, context);
2187         nlattr->nla_len = tswap16(nlattr->nla_len);
2188         nlattr->nla_type = tswap16(nlattr->nla_type);
2189         if (ret < 0) {
2190             return ret;
2191         }
2192         len -= NLA_ALIGN(nla_len);
2193         nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2194     }
2195     return 0;
2196 }
2197 
2198 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2199                                                size_t len,
2200                                                abi_long (*host_to_target_rtattr)
2201                                                         (struct rtattr *))
2202 {
2203     unsigned short rta_len;
2204     abi_long ret;
2205 
2206     while (len > sizeof(struct rtattr)) {
2207         rta_len = rtattr->rta_len;
2208         if (rta_len < sizeof(struct rtattr) ||
2209             rta_len > len) {
2210             break;
2211         }
2212         ret = host_to_target_rtattr(rtattr);
2213         rtattr->rta_len = tswap16(rtattr->rta_len);
2214         rtattr->rta_type = tswap16(rtattr->rta_type);
2215         if (ret < 0) {
2216             return ret;
2217         }
2218         len -= RTA_ALIGN(rta_len);
2219         rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2220     }
2221     return 0;
2222 }
2223 
2224 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2225 
2226 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2227                                                   void *context)
2228 {
2229     uint16_t *u16;
2230     uint32_t *u32;
2231     uint64_t *u64;
2232 
2233     switch (nlattr->nla_type) {
2234     /* no data */
2235     case QEMU_IFLA_BR_FDB_FLUSH:
2236         break;
2237     /* binary */
2238     case QEMU_IFLA_BR_GROUP_ADDR:
2239         break;
2240     /* uint8_t */
2241     case QEMU_IFLA_BR_VLAN_FILTERING:
2242     case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2243     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2244     case QEMU_IFLA_BR_MCAST_ROUTER:
2245     case QEMU_IFLA_BR_MCAST_SNOOPING:
2246     case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2247     case QEMU_IFLA_BR_MCAST_QUERIER:
2248     case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2249     case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2250     case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2251     case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2252     case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2253     case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2254     case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2255         break;
2256     /* uint16_t */
2257     case QEMU_IFLA_BR_PRIORITY:
2258     case QEMU_IFLA_BR_VLAN_PROTOCOL:
2259     case QEMU_IFLA_BR_GROUP_FWD_MASK:
2260     case QEMU_IFLA_BR_ROOT_PORT:
2261     case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2262         u16 = NLA_DATA(nlattr);
2263         *u16 = tswap16(*u16);
2264         break;
2265     /* uint32_t */
2266     case QEMU_IFLA_BR_FORWARD_DELAY:
2267     case QEMU_IFLA_BR_HELLO_TIME:
2268     case QEMU_IFLA_BR_MAX_AGE:
2269     case QEMU_IFLA_BR_AGEING_TIME:
2270     case QEMU_IFLA_BR_STP_STATE:
2271     case QEMU_IFLA_BR_ROOT_PATH_COST:
2272     case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2273     case QEMU_IFLA_BR_MCAST_HASH_MAX:
2274     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2275     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2276         u32 = NLA_DATA(nlattr);
2277         *u32 = tswap32(*u32);
2278         break;
2279     /* uint64_t */
2280     case QEMU_IFLA_BR_HELLO_TIMER:
2281     case QEMU_IFLA_BR_TCN_TIMER:
2282     case QEMU_IFLA_BR_GC_TIMER:
2283     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2284     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2285     case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2286     case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2287     case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2288     case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2289     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2290         u64 = NLA_DATA(nlattr);
2291         *u64 = tswap64(*u64);
2292         break;
2293     /* ifla_bridge_id: uin8_t[] */
2294     case QEMU_IFLA_BR_ROOT_ID:
2295     case QEMU_IFLA_BR_BRIDGE_ID:
2296         break;
2297     default:
2298         gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2299         break;
2300     }
2301     return 0;
2302 }
2303 
2304 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2305                                                         void *context)
2306 {
2307     uint16_t *u16;
2308     uint32_t *u32;
2309     uint64_t *u64;
2310 
2311     switch (nlattr->nla_type) {
2312     /* uint8_t */
2313     case QEMU_IFLA_BRPORT_STATE:
2314     case QEMU_IFLA_BRPORT_MODE:
2315     case QEMU_IFLA_BRPORT_GUARD:
2316     case QEMU_IFLA_BRPORT_PROTECT:
2317     case QEMU_IFLA_BRPORT_FAST_LEAVE:
2318     case QEMU_IFLA_BRPORT_LEARNING:
2319     case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2320     case QEMU_IFLA_BRPORT_PROXYARP:
2321     case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2322     case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2323     case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2324     case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2325     case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2326     case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2327     case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2328     case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2329     case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2330     case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2331         break;
2332     /* uint16_t */
2333     case QEMU_IFLA_BRPORT_PRIORITY:
2334     case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2335     case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2336     case QEMU_IFLA_BRPORT_ID:
2337     case QEMU_IFLA_BRPORT_NO:
2338     case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2339         u16 = NLA_DATA(nlattr);
2340         *u16 = tswap16(*u16);
2341         break;
2342     /* uin32_t */
2343     case QEMU_IFLA_BRPORT_COST:
2344         u32 = NLA_DATA(nlattr);
2345         *u32 = tswap32(*u32);
2346         break;
2347     /* uint64_t */
2348     case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2349     case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2350     case QEMU_IFLA_BRPORT_HOLD_TIMER:
2351         u64 = NLA_DATA(nlattr);
2352         *u64 = tswap64(*u64);
2353         break;
2354     /* ifla_bridge_id: uint8_t[] */
2355     case QEMU_IFLA_BRPORT_ROOT_ID:
2356     case QEMU_IFLA_BRPORT_BRIDGE_ID:
2357         break;
2358     default:
2359         gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2360         break;
2361     }
2362     return 0;
2363 }
2364 
2365 static abi_long host_to_target_data_tun_nlattr(struct nlattr *nlattr,
2366                                                   void *context)
2367 {
2368     uint32_t *u32;
2369 
2370     switch (nlattr->nla_type) {
2371     /* uint8_t */
2372     case QEMU_IFLA_TUN_TYPE:
2373     case QEMU_IFLA_TUN_PI:
2374     case QEMU_IFLA_TUN_VNET_HDR:
2375     case QEMU_IFLA_TUN_PERSIST:
2376     case QEMU_IFLA_TUN_MULTI_QUEUE:
2377         break;
2378     /* uint32_t */
2379     case QEMU_IFLA_TUN_NUM_QUEUES:
2380     case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES:
2381     case QEMU_IFLA_TUN_OWNER:
2382     case QEMU_IFLA_TUN_GROUP:
2383         u32 = NLA_DATA(nlattr);
2384         *u32 = tswap32(*u32);
2385         break;
2386     default:
2387         gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr->nla_type);
2388         break;
2389     }
2390     return 0;
2391 }
2392 
2393 struct linkinfo_context {
2394     int len;
2395     char *name;
2396     int slave_len;
2397     char *slave_name;
2398 };
2399 
2400 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2401                                                     void *context)
2402 {
2403     struct linkinfo_context *li_context = context;
2404 
2405     switch (nlattr->nla_type) {
2406     /* string */
2407     case QEMU_IFLA_INFO_KIND:
2408         li_context->name = NLA_DATA(nlattr);
2409         li_context->len = nlattr->nla_len - NLA_HDRLEN;
2410         break;
2411     case QEMU_IFLA_INFO_SLAVE_KIND:
2412         li_context->slave_name = NLA_DATA(nlattr);
2413         li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2414         break;
2415     /* stats */
2416     case QEMU_IFLA_INFO_XSTATS:
2417         /* FIXME: only used by CAN */
2418         break;
2419     /* nested */
2420     case QEMU_IFLA_INFO_DATA:
2421         if (strncmp(li_context->name, "bridge",
2422                     li_context->len) == 0) {
2423             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2424                                                   nlattr->nla_len,
2425                                                   NULL,
2426                                              host_to_target_data_bridge_nlattr);
2427         } else if (strncmp(li_context->name, "tun",
2428                     li_context->len) == 0) {
2429             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2430                                                   nlattr->nla_len,
2431                                                   NULL,
2432                                                 host_to_target_data_tun_nlattr);
2433         } else {
2434             gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2435         }
2436         break;
2437     case QEMU_IFLA_INFO_SLAVE_DATA:
2438         if (strncmp(li_context->slave_name, "bridge",
2439                     li_context->slave_len) == 0) {
2440             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2441                                                   nlattr->nla_len,
2442                                                   NULL,
2443                                        host_to_target_slave_data_bridge_nlattr);
2444         } else {
2445             gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2446                      li_context->slave_name);
2447         }
2448         break;
2449     default:
2450         gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2451         break;
2452     }
2453 
2454     return 0;
2455 }
2456 
2457 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2458                                                 void *context)
2459 {
2460     uint32_t *u32;
2461     int i;
2462 
2463     switch (nlattr->nla_type) {
2464     case QEMU_IFLA_INET_CONF:
2465         u32 = NLA_DATA(nlattr);
2466         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2467              i++) {
2468             u32[i] = tswap32(u32[i]);
2469         }
2470         break;
2471     default:
2472         gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2473     }
2474     return 0;
2475 }
2476 
2477 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2478                                                 void *context)
2479 {
2480     uint32_t *u32;
2481     uint64_t *u64;
2482     struct ifla_cacheinfo *ci;
2483     int i;
2484 
2485     switch (nlattr->nla_type) {
2486     /* binaries */
2487     case QEMU_IFLA_INET6_TOKEN:
2488         break;
2489     /* uint8_t */
2490     case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2491         break;
2492     /* uint32_t */
2493     case QEMU_IFLA_INET6_FLAGS:
2494         u32 = NLA_DATA(nlattr);
2495         *u32 = tswap32(*u32);
2496         break;
2497     /* uint32_t[] */
2498     case QEMU_IFLA_INET6_CONF:
2499         u32 = NLA_DATA(nlattr);
2500         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2501              i++) {
2502             u32[i] = tswap32(u32[i]);
2503         }
2504         break;
2505     /* ifla_cacheinfo */
2506     case QEMU_IFLA_INET6_CACHEINFO:
2507         ci = NLA_DATA(nlattr);
2508         ci->max_reasm_len = tswap32(ci->max_reasm_len);
2509         ci->tstamp = tswap32(ci->tstamp);
2510         ci->reachable_time = tswap32(ci->reachable_time);
2511         ci->retrans_time = tswap32(ci->retrans_time);
2512         break;
2513     /* uint64_t[] */
2514     case QEMU_IFLA_INET6_STATS:
2515     case QEMU_IFLA_INET6_ICMP6STATS:
2516         u64 = NLA_DATA(nlattr);
2517         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2518              i++) {
2519             u64[i] = tswap64(u64[i]);
2520         }
2521         break;
2522     default:
2523         gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2524     }
2525     return 0;
2526 }
2527 
2528 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2529                                                     void *context)
2530 {
2531     switch (nlattr->nla_type) {
2532     case AF_INET:
2533         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2534                                               NULL,
2535                                              host_to_target_data_inet_nlattr);
2536     case AF_INET6:
2537         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2538                                               NULL,
2539                                              host_to_target_data_inet6_nlattr);
2540     default:
2541         gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2542         break;
2543     }
2544     return 0;
2545 }
2546 
2547 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2548                                                void *context)
2549 {
2550     uint32_t *u32;
2551 
2552     switch (nlattr->nla_type) {
2553     /* uint8_t */
2554     case QEMU_IFLA_XDP_ATTACHED:
2555         break;
2556     /* uint32_t */
2557     case QEMU_IFLA_XDP_PROG_ID:
2558         u32 = NLA_DATA(nlattr);
2559         *u32 = tswap32(*u32);
2560         break;
2561     default:
2562         gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2563         break;
2564     }
2565     return 0;
2566 }
2567 
2568 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2569 {
2570     uint32_t *u32;
2571     struct rtnl_link_stats *st;
2572     struct rtnl_link_stats64 *st64;
2573     struct rtnl_link_ifmap *map;
2574     struct linkinfo_context li_context;
2575 
2576     switch (rtattr->rta_type) {
2577     /* binary stream */
2578     case QEMU_IFLA_ADDRESS:
2579     case QEMU_IFLA_BROADCAST:
2580     /* string */
2581     case QEMU_IFLA_IFNAME:
2582     case QEMU_IFLA_QDISC:
2583         break;
2584     /* uin8_t */
2585     case QEMU_IFLA_OPERSTATE:
2586     case QEMU_IFLA_LINKMODE:
2587     case QEMU_IFLA_CARRIER:
2588     case QEMU_IFLA_PROTO_DOWN:
2589         break;
2590     /* uint32_t */
2591     case QEMU_IFLA_MTU:
2592     case QEMU_IFLA_LINK:
2593     case QEMU_IFLA_WEIGHT:
2594     case QEMU_IFLA_TXQLEN:
2595     case QEMU_IFLA_CARRIER_CHANGES:
2596     case QEMU_IFLA_NUM_RX_QUEUES:
2597     case QEMU_IFLA_NUM_TX_QUEUES:
2598     case QEMU_IFLA_PROMISCUITY:
2599     case QEMU_IFLA_EXT_MASK:
2600     case QEMU_IFLA_LINK_NETNSID:
2601     case QEMU_IFLA_GROUP:
2602     case QEMU_IFLA_MASTER:
2603     case QEMU_IFLA_NUM_VF:
2604     case QEMU_IFLA_GSO_MAX_SEGS:
2605     case QEMU_IFLA_GSO_MAX_SIZE:
2606     case QEMU_IFLA_CARRIER_UP_COUNT:
2607     case QEMU_IFLA_CARRIER_DOWN_COUNT:
2608         u32 = RTA_DATA(rtattr);
2609         *u32 = tswap32(*u32);
2610         break;
2611     /* struct rtnl_link_stats */
2612     case QEMU_IFLA_STATS:
2613         st = RTA_DATA(rtattr);
2614         st->rx_packets = tswap32(st->rx_packets);
2615         st->tx_packets = tswap32(st->tx_packets);
2616         st->rx_bytes = tswap32(st->rx_bytes);
2617         st->tx_bytes = tswap32(st->tx_bytes);
2618         st->rx_errors = tswap32(st->rx_errors);
2619         st->tx_errors = tswap32(st->tx_errors);
2620         st->rx_dropped = tswap32(st->rx_dropped);
2621         st->tx_dropped = tswap32(st->tx_dropped);
2622         st->multicast = tswap32(st->multicast);
2623         st->collisions = tswap32(st->collisions);
2624 
2625         /* detailed rx_errors: */
2626         st->rx_length_errors = tswap32(st->rx_length_errors);
2627         st->rx_over_errors = tswap32(st->rx_over_errors);
2628         st->rx_crc_errors = tswap32(st->rx_crc_errors);
2629         st->rx_frame_errors = tswap32(st->rx_frame_errors);
2630         st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2631         st->rx_missed_errors = tswap32(st->rx_missed_errors);
2632 
2633         /* detailed tx_errors */
2634         st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2635         st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2636         st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2637         st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2638         st->tx_window_errors = tswap32(st->tx_window_errors);
2639 
2640         /* for cslip etc */
2641         st->rx_compressed = tswap32(st->rx_compressed);
2642         st->tx_compressed = tswap32(st->tx_compressed);
2643         break;
2644     /* struct rtnl_link_stats64 */
2645     case QEMU_IFLA_STATS64:
2646         st64 = RTA_DATA(rtattr);
2647         st64->rx_packets = tswap64(st64->rx_packets);
2648         st64->tx_packets = tswap64(st64->tx_packets);
2649         st64->rx_bytes = tswap64(st64->rx_bytes);
2650         st64->tx_bytes = tswap64(st64->tx_bytes);
2651         st64->rx_errors = tswap64(st64->rx_errors);
2652         st64->tx_errors = tswap64(st64->tx_errors);
2653         st64->rx_dropped = tswap64(st64->rx_dropped);
2654         st64->tx_dropped = tswap64(st64->tx_dropped);
2655         st64->multicast = tswap64(st64->multicast);
2656         st64->collisions = tswap64(st64->collisions);
2657 
2658         /* detailed rx_errors: */
2659         st64->rx_length_errors = tswap64(st64->rx_length_errors);
2660         st64->rx_over_errors = tswap64(st64->rx_over_errors);
2661         st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2662         st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2663         st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2664         st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2665 
2666         /* detailed tx_errors */
2667         st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2668         st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2669         st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2670         st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2671         st64->tx_window_errors = tswap64(st64->tx_window_errors);
2672 
2673         /* for cslip etc */
2674         st64->rx_compressed = tswap64(st64->rx_compressed);
2675         st64->tx_compressed = tswap64(st64->tx_compressed);
2676         break;
2677     /* struct rtnl_link_ifmap */
2678     case QEMU_IFLA_MAP:
2679         map = RTA_DATA(rtattr);
2680         map->mem_start = tswap64(map->mem_start);
2681         map->mem_end = tswap64(map->mem_end);
2682         map->base_addr = tswap64(map->base_addr);
2683         map->irq = tswap16(map->irq);
2684         break;
2685     /* nested */
2686     case QEMU_IFLA_LINKINFO:
2687         memset(&li_context, 0, sizeof(li_context));
2688         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2689                                               &li_context,
2690                                            host_to_target_data_linkinfo_nlattr);
2691     case QEMU_IFLA_AF_SPEC:
2692         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2693                                               NULL,
2694                                              host_to_target_data_spec_nlattr);
2695     case QEMU_IFLA_XDP:
2696         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2697                                               NULL,
2698                                                 host_to_target_data_xdp_nlattr);
2699     default:
2700         gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2701         break;
2702     }
2703     return 0;
2704 }
2705 
2706 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2707 {
2708     uint32_t *u32;
2709     struct ifa_cacheinfo *ci;
2710 
2711     switch (rtattr->rta_type) {
2712     /* binary: depends on family type */
2713     case IFA_ADDRESS:
2714     case IFA_LOCAL:
2715         break;
2716     /* string */
2717     case IFA_LABEL:
2718         break;
2719     /* u32 */
2720     case IFA_FLAGS:
2721     case IFA_BROADCAST:
2722         u32 = RTA_DATA(rtattr);
2723         *u32 = tswap32(*u32);
2724         break;
2725     /* struct ifa_cacheinfo */
2726     case IFA_CACHEINFO:
2727         ci = RTA_DATA(rtattr);
2728         ci->ifa_prefered = tswap32(ci->ifa_prefered);
2729         ci->ifa_valid = tswap32(ci->ifa_valid);
2730         ci->cstamp = tswap32(ci->cstamp);
2731         ci->tstamp = tswap32(ci->tstamp);
2732         break;
2733     default:
2734         gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2735         break;
2736     }
2737     return 0;
2738 }
2739 
2740 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2741 {
2742     uint32_t *u32;
2743     struct rta_cacheinfo *ci;
2744 
2745     switch (rtattr->rta_type) {
2746     /* binary: depends on family type */
2747     case QEMU_RTA_GATEWAY:
2748     case QEMU_RTA_DST:
2749     case QEMU_RTA_PREFSRC:
2750         break;
2751     /* u8 */
2752     case QEMU_RTA_PREF:
2753         break;
2754     /* u32 */
2755     case QEMU_RTA_PRIORITY:
2756     case QEMU_RTA_TABLE:
2757     case QEMU_RTA_OIF:
2758         u32 = RTA_DATA(rtattr);
2759         *u32 = tswap32(*u32);
2760         break;
2761     /* struct rta_cacheinfo */
2762     case QEMU_RTA_CACHEINFO:
2763         ci = RTA_DATA(rtattr);
2764         ci->rta_clntref = tswap32(ci->rta_clntref);
2765         ci->rta_lastuse = tswap32(ci->rta_lastuse);
2766         ci->rta_expires = tswap32(ci->rta_expires);
2767         ci->rta_error = tswap32(ci->rta_error);
2768         ci->rta_used = tswap32(ci->rta_used);
2769 #if defined(RTNETLINK_HAVE_PEERINFO)
2770         ci->rta_id = tswap32(ci->rta_id);
2771         ci->rta_ts = tswap32(ci->rta_ts);
2772         ci->rta_tsage = tswap32(ci->rta_tsage);
2773 #endif
2774         break;
2775     default:
2776         gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2777         break;
2778     }
2779     return 0;
2780 }
2781 
2782 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2783                                          uint32_t rtattr_len)
2784 {
2785     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2786                                           host_to_target_data_link_rtattr);
2787 }
2788 
2789 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2790                                          uint32_t rtattr_len)
2791 {
2792     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2793                                           host_to_target_data_addr_rtattr);
2794 }
2795 
2796 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2797                                          uint32_t rtattr_len)
2798 {
2799     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2800                                           host_to_target_data_route_rtattr);
2801 }
2802 
2803 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2804 {
2805     uint32_t nlmsg_len;
2806     struct ifinfomsg *ifi;
2807     struct ifaddrmsg *ifa;
2808     struct rtmsg *rtm;
2809 
2810     nlmsg_len = nlh->nlmsg_len;
2811     switch (nlh->nlmsg_type) {
2812     case RTM_NEWLINK:
2813     case RTM_DELLINK:
2814     case RTM_GETLINK:
2815         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2816             ifi = NLMSG_DATA(nlh);
2817             ifi->ifi_type = tswap16(ifi->ifi_type);
2818             ifi->ifi_index = tswap32(ifi->ifi_index);
2819             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2820             ifi->ifi_change = tswap32(ifi->ifi_change);
2821             host_to_target_link_rtattr(IFLA_RTA(ifi),
2822                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2823         }
2824         break;
2825     case RTM_NEWADDR:
2826     case RTM_DELADDR:
2827     case RTM_GETADDR:
2828         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2829             ifa = NLMSG_DATA(nlh);
2830             ifa->ifa_index = tswap32(ifa->ifa_index);
2831             host_to_target_addr_rtattr(IFA_RTA(ifa),
2832                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2833         }
2834         break;
2835     case RTM_NEWROUTE:
2836     case RTM_DELROUTE:
2837     case RTM_GETROUTE:
2838         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2839             rtm = NLMSG_DATA(nlh);
2840             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2841             host_to_target_route_rtattr(RTM_RTA(rtm),
2842                                         nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2843         }
2844         break;
2845     default:
2846         return -TARGET_EINVAL;
2847     }
2848     return 0;
2849 }
2850 
2851 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2852                                                   size_t len)
2853 {
2854     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2855 }
2856 
2857 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2858                                                size_t len,
2859                                                abi_long (*target_to_host_rtattr)
2860                                                         (struct rtattr *))
2861 {
2862     abi_long ret;
2863 
2864     while (len >= sizeof(struct rtattr)) {
2865         if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2866             tswap16(rtattr->rta_len) > len) {
2867             break;
2868         }
2869         rtattr->rta_len = tswap16(rtattr->rta_len);
2870         rtattr->rta_type = tswap16(rtattr->rta_type);
2871         ret = target_to_host_rtattr(rtattr);
2872         if (ret < 0) {
2873             return ret;
2874         }
2875         len -= RTA_ALIGN(rtattr->rta_len);
2876         rtattr = (struct rtattr *)(((char *)rtattr) +
2877                  RTA_ALIGN(rtattr->rta_len));
2878     }
2879     return 0;
2880 }
2881 
2882 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2883 {
2884     switch (rtattr->rta_type) {
2885     default:
2886         gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2887         break;
2888     }
2889     return 0;
2890 }
2891 
2892 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2893 {
2894     switch (rtattr->rta_type) {
2895     /* binary: depends on family type */
2896     case IFA_LOCAL:
2897     case IFA_ADDRESS:
2898         break;
2899     default:
2900         gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2901         break;
2902     }
2903     return 0;
2904 }
2905 
2906 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2907 {
2908     uint32_t *u32;
2909     switch (rtattr->rta_type) {
2910     /* binary: depends on family type */
2911     case QEMU_RTA_DST:
2912     case QEMU_RTA_SRC:
2913     case QEMU_RTA_GATEWAY:
2914         break;
2915     /* u32 */
2916     case QEMU_RTA_PRIORITY:
2917     case QEMU_RTA_OIF:
2918         u32 = RTA_DATA(rtattr);
2919         *u32 = tswap32(*u32);
2920         break;
2921     default:
2922         gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2923         break;
2924     }
2925     return 0;
2926 }
2927 
2928 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2929                                        uint32_t rtattr_len)
2930 {
2931     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2932                                    target_to_host_data_link_rtattr);
2933 }
2934 
2935 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2936                                      uint32_t rtattr_len)
2937 {
2938     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2939                                    target_to_host_data_addr_rtattr);
2940 }
2941 
2942 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2943                                      uint32_t rtattr_len)
2944 {
2945     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2946                                    target_to_host_data_route_rtattr);
2947 }
2948 
2949 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2950 {
2951     struct ifinfomsg *ifi;
2952     struct ifaddrmsg *ifa;
2953     struct rtmsg *rtm;
2954 
2955     switch (nlh->nlmsg_type) {
2956     case RTM_GETLINK:
2957         break;
2958     case RTM_NEWLINK:
2959     case RTM_DELLINK:
2960         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2961             ifi = NLMSG_DATA(nlh);
2962             ifi->ifi_type = tswap16(ifi->ifi_type);
2963             ifi->ifi_index = tswap32(ifi->ifi_index);
2964             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2965             ifi->ifi_change = tswap32(ifi->ifi_change);
2966             target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2967                                        NLMSG_LENGTH(sizeof(*ifi)));
2968         }
2969         break;
2970     case RTM_GETADDR:
2971     case RTM_NEWADDR:
2972     case RTM_DELADDR:
2973         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2974             ifa = NLMSG_DATA(nlh);
2975             ifa->ifa_index = tswap32(ifa->ifa_index);
2976             target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2977                                        NLMSG_LENGTH(sizeof(*ifa)));
2978         }
2979         break;
2980     case RTM_GETROUTE:
2981         break;
2982     case RTM_NEWROUTE:
2983     case RTM_DELROUTE:
2984         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2985             rtm = NLMSG_DATA(nlh);
2986             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2987             target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2988                                         NLMSG_LENGTH(sizeof(*rtm)));
2989         }
2990         break;
2991     default:
2992         return -TARGET_EOPNOTSUPP;
2993     }
2994     return 0;
2995 }
2996 
2997 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2998 {
2999     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
3000 }
3001 #endif /* CONFIG_RTNETLINK */
3002 
3003 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
3004 {
3005     switch (nlh->nlmsg_type) {
3006     default:
3007         gemu_log("Unknown host audit message type %d\n",
3008                  nlh->nlmsg_type);
3009         return -TARGET_EINVAL;
3010     }
3011     return 0;
3012 }
3013 
3014 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
3015                                                   size_t len)
3016 {
3017     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
3018 }
3019 
3020 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
3021 {
3022     switch (nlh->nlmsg_type) {
3023     case AUDIT_USER:
3024     case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
3025     case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
3026         break;
3027     default:
3028         gemu_log("Unknown target audit message type %d\n",
3029                  nlh->nlmsg_type);
3030         return -TARGET_EINVAL;
3031     }
3032 
3033     return 0;
3034 }
3035 
3036 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
3037 {
3038     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
3039 }
3040 
3041 /* do_setsockopt() Must return target values and target errnos. */
3042 static abi_long do_setsockopt(int sockfd, int level, int optname,
3043                               abi_ulong optval_addr, socklen_t optlen)
3044 {
3045     abi_long ret;
3046     int val;
3047     struct ip_mreqn *ip_mreq;
3048     struct ip_mreq_source *ip_mreq_source;
3049 
3050     switch(level) {
3051     case SOL_TCP:
3052         /* TCP options all take an 'int' value.  */
3053         if (optlen < sizeof(uint32_t))
3054             return -TARGET_EINVAL;
3055 
3056         if (get_user_u32(val, optval_addr))
3057             return -TARGET_EFAULT;
3058         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
3059         break;
3060     case SOL_IP:
3061         switch(optname) {
3062         case IP_TOS:
3063         case IP_TTL:
3064         case IP_HDRINCL:
3065         case IP_ROUTER_ALERT:
3066         case IP_RECVOPTS:
3067         case IP_RETOPTS:
3068         case IP_PKTINFO:
3069         case IP_MTU_DISCOVER:
3070         case IP_RECVERR:
3071         case IP_RECVTTL:
3072         case IP_RECVTOS:
3073 #ifdef IP_FREEBIND
3074         case IP_FREEBIND:
3075 #endif
3076         case IP_MULTICAST_TTL:
3077         case IP_MULTICAST_LOOP:
3078             val = 0;
3079             if (optlen >= sizeof(uint32_t)) {
3080                 if (get_user_u32(val, optval_addr))
3081                     return -TARGET_EFAULT;
3082             } else if (optlen >= 1) {
3083                 if (get_user_u8(val, optval_addr))
3084                     return -TARGET_EFAULT;
3085             }
3086             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
3087             break;
3088         case IP_ADD_MEMBERSHIP:
3089         case IP_DROP_MEMBERSHIP:
3090             if (optlen < sizeof (struct target_ip_mreq) ||
3091                 optlen > sizeof (struct target_ip_mreqn))
3092                 return -TARGET_EINVAL;
3093 
3094             ip_mreq = (struct ip_mreqn *) alloca(optlen);
3095             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
3096             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
3097             break;
3098 
3099         case IP_BLOCK_SOURCE:
3100         case IP_UNBLOCK_SOURCE:
3101         case IP_ADD_SOURCE_MEMBERSHIP:
3102         case IP_DROP_SOURCE_MEMBERSHIP:
3103             if (optlen != sizeof (struct target_ip_mreq_source))
3104                 return -TARGET_EINVAL;
3105 
3106             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3107             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3108             unlock_user (ip_mreq_source, optval_addr, 0);
3109             break;
3110 
3111         default:
3112             goto unimplemented;
3113         }
3114         break;
3115     case SOL_IPV6:
3116         switch (optname) {
3117         case IPV6_MTU_DISCOVER:
3118         case IPV6_MTU:
3119         case IPV6_V6ONLY:
3120         case IPV6_RECVPKTINFO:
3121         case IPV6_UNICAST_HOPS:
3122         case IPV6_MULTICAST_HOPS:
3123         case IPV6_MULTICAST_LOOP:
3124         case IPV6_RECVERR:
3125         case IPV6_RECVHOPLIMIT:
3126         case IPV6_2292HOPLIMIT:
3127         case IPV6_CHECKSUM:
3128             val = 0;
3129             if (optlen < sizeof(uint32_t)) {
3130                 return -TARGET_EINVAL;
3131             }
3132             if (get_user_u32(val, optval_addr)) {
3133                 return -TARGET_EFAULT;
3134             }
3135             ret = get_errno(setsockopt(sockfd, level, optname,
3136                                        &val, sizeof(val)));
3137             break;
3138         case IPV6_PKTINFO:
3139         {
3140             struct in6_pktinfo pki;
3141 
3142             if (optlen < sizeof(pki)) {
3143                 return -TARGET_EINVAL;
3144             }
3145 
3146             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3147                 return -TARGET_EFAULT;
3148             }
3149 
3150             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3151 
3152             ret = get_errno(setsockopt(sockfd, level, optname,
3153                                        &pki, sizeof(pki)));
3154             break;
3155         }
3156         default:
3157             goto unimplemented;
3158         }
3159         break;
3160     case SOL_ICMPV6:
3161         switch (optname) {
3162         case ICMPV6_FILTER:
3163         {
3164             struct icmp6_filter icmp6f;
3165 
3166             if (optlen > sizeof(icmp6f)) {
3167                 optlen = sizeof(icmp6f);
3168             }
3169 
3170             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3171                 return -TARGET_EFAULT;
3172             }
3173 
3174             for (val = 0; val < 8; val++) {
3175                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3176             }
3177 
3178             ret = get_errno(setsockopt(sockfd, level, optname,
3179                                        &icmp6f, optlen));
3180             break;
3181         }
3182         default:
3183             goto unimplemented;
3184         }
3185         break;
3186     case SOL_RAW:
3187         switch (optname) {
3188         case ICMP_FILTER:
3189         case IPV6_CHECKSUM:
3190             /* those take an u32 value */
3191             if (optlen < sizeof(uint32_t)) {
3192                 return -TARGET_EINVAL;
3193             }
3194 
3195             if (get_user_u32(val, optval_addr)) {
3196                 return -TARGET_EFAULT;
3197             }
3198             ret = get_errno(setsockopt(sockfd, level, optname,
3199                                        &val, sizeof(val)));
3200             break;
3201 
3202         default:
3203             goto unimplemented;
3204         }
3205         break;
3206     case TARGET_SOL_SOCKET:
3207         switch (optname) {
3208         case TARGET_SO_RCVTIMEO:
3209         {
3210                 struct timeval tv;
3211 
3212                 optname = SO_RCVTIMEO;
3213 
3214 set_timeout:
3215                 if (optlen != sizeof(struct target_timeval)) {
3216                     return -TARGET_EINVAL;
3217                 }
3218 
3219                 if (copy_from_user_timeval(&tv, optval_addr)) {
3220                     return -TARGET_EFAULT;
3221                 }
3222 
3223                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3224                                 &tv, sizeof(tv)));
3225                 return ret;
3226         }
3227         case TARGET_SO_SNDTIMEO:
3228                 optname = SO_SNDTIMEO;
3229                 goto set_timeout;
3230         case TARGET_SO_ATTACH_FILTER:
3231         {
3232                 struct target_sock_fprog *tfprog;
3233                 struct target_sock_filter *tfilter;
3234                 struct sock_fprog fprog;
3235                 struct sock_filter *filter;
3236                 int i;
3237 
3238                 if (optlen != sizeof(*tfprog)) {
3239                     return -TARGET_EINVAL;
3240                 }
3241                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3242                     return -TARGET_EFAULT;
3243                 }
3244                 if (!lock_user_struct(VERIFY_READ, tfilter,
3245                                       tswapal(tfprog->filter), 0)) {
3246                     unlock_user_struct(tfprog, optval_addr, 1);
3247                     return -TARGET_EFAULT;
3248                 }
3249 
3250                 fprog.len = tswap16(tfprog->len);
3251                 filter = g_try_new(struct sock_filter, fprog.len);
3252                 if (filter == NULL) {
3253                     unlock_user_struct(tfilter, tfprog->filter, 1);
3254                     unlock_user_struct(tfprog, optval_addr, 1);
3255                     return -TARGET_ENOMEM;
3256                 }
3257                 for (i = 0; i < fprog.len; i++) {
3258                     filter[i].code = tswap16(tfilter[i].code);
3259                     filter[i].jt = tfilter[i].jt;
3260                     filter[i].jf = tfilter[i].jf;
3261                     filter[i].k = tswap32(tfilter[i].k);
3262                 }
3263                 fprog.filter = filter;
3264 
3265                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3266                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3267                 g_free(filter);
3268 
3269                 unlock_user_struct(tfilter, tfprog->filter, 1);
3270                 unlock_user_struct(tfprog, optval_addr, 1);
3271                 return ret;
3272         }
3273 	case TARGET_SO_BINDTODEVICE:
3274 	{
3275 		char *dev_ifname, *addr_ifname;
3276 
3277 		if (optlen > IFNAMSIZ - 1) {
3278 		    optlen = IFNAMSIZ - 1;
3279 		}
3280 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3281 		if (!dev_ifname) {
3282 		    return -TARGET_EFAULT;
3283 		}
3284 		optname = SO_BINDTODEVICE;
3285 		addr_ifname = alloca(IFNAMSIZ);
3286 		memcpy(addr_ifname, dev_ifname, optlen);
3287 		addr_ifname[optlen] = 0;
3288 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3289                                            addr_ifname, optlen));
3290 		unlock_user (dev_ifname, optval_addr, 0);
3291 		return ret;
3292 	}
3293             /* Options with 'int' argument.  */
3294         case TARGET_SO_DEBUG:
3295 		optname = SO_DEBUG;
3296 		break;
3297         case TARGET_SO_REUSEADDR:
3298 		optname = SO_REUSEADDR;
3299 		break;
3300         case TARGET_SO_TYPE:
3301 		optname = SO_TYPE;
3302 		break;
3303         case TARGET_SO_ERROR:
3304 		optname = SO_ERROR;
3305 		break;
3306         case TARGET_SO_DONTROUTE:
3307 		optname = SO_DONTROUTE;
3308 		break;
3309         case TARGET_SO_BROADCAST:
3310 		optname = SO_BROADCAST;
3311 		break;
3312         case TARGET_SO_SNDBUF:
3313 		optname = SO_SNDBUF;
3314 		break;
3315         case TARGET_SO_SNDBUFFORCE:
3316                 optname = SO_SNDBUFFORCE;
3317                 break;
3318         case TARGET_SO_RCVBUF:
3319 		optname = SO_RCVBUF;
3320 		break;
3321         case TARGET_SO_RCVBUFFORCE:
3322                 optname = SO_RCVBUFFORCE;
3323                 break;
3324         case TARGET_SO_KEEPALIVE:
3325 		optname = SO_KEEPALIVE;
3326 		break;
3327         case TARGET_SO_OOBINLINE:
3328 		optname = SO_OOBINLINE;
3329 		break;
3330         case TARGET_SO_NO_CHECK:
3331 		optname = SO_NO_CHECK;
3332 		break;
3333         case TARGET_SO_PRIORITY:
3334 		optname = SO_PRIORITY;
3335 		break;
3336 #ifdef SO_BSDCOMPAT
3337         case TARGET_SO_BSDCOMPAT:
3338 		optname = SO_BSDCOMPAT;
3339 		break;
3340 #endif
3341         case TARGET_SO_PASSCRED:
3342 		optname = SO_PASSCRED;
3343 		break;
3344         case TARGET_SO_PASSSEC:
3345                 optname = SO_PASSSEC;
3346                 break;
3347         case TARGET_SO_TIMESTAMP:
3348 		optname = SO_TIMESTAMP;
3349 		break;
3350         case TARGET_SO_RCVLOWAT:
3351 		optname = SO_RCVLOWAT;
3352 		break;
3353         default:
3354             goto unimplemented;
3355         }
3356 	if (optlen < sizeof(uint32_t))
3357             return -TARGET_EINVAL;
3358 
3359 	if (get_user_u32(val, optval_addr))
3360             return -TARGET_EFAULT;
3361 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3362         break;
3363     default:
3364     unimplemented:
3365         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3366         ret = -TARGET_ENOPROTOOPT;
3367     }
3368     return ret;
3369 }
3370 
3371 /* do_getsockopt() Must return target values and target errnos. */
3372 static abi_long do_getsockopt(int sockfd, int level, int optname,
3373                               abi_ulong optval_addr, abi_ulong optlen)
3374 {
3375     abi_long ret;
3376     int len, val;
3377     socklen_t lv;
3378 
3379     switch(level) {
3380     case TARGET_SOL_SOCKET:
3381         level = SOL_SOCKET;
3382         switch (optname) {
3383         /* These don't just return a single integer */
3384         case TARGET_SO_LINGER:
3385         case TARGET_SO_RCVTIMEO:
3386         case TARGET_SO_SNDTIMEO:
3387         case TARGET_SO_PEERNAME:
3388             goto unimplemented;
3389         case TARGET_SO_PEERCRED: {
3390             struct ucred cr;
3391             socklen_t crlen;
3392             struct target_ucred *tcr;
3393 
3394             if (get_user_u32(len, optlen)) {
3395                 return -TARGET_EFAULT;
3396             }
3397             if (len < 0) {
3398                 return -TARGET_EINVAL;
3399             }
3400 
3401             crlen = sizeof(cr);
3402             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3403                                        &cr, &crlen));
3404             if (ret < 0) {
3405                 return ret;
3406             }
3407             if (len > crlen) {
3408                 len = crlen;
3409             }
3410             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3411                 return -TARGET_EFAULT;
3412             }
3413             __put_user(cr.pid, &tcr->pid);
3414             __put_user(cr.uid, &tcr->uid);
3415             __put_user(cr.gid, &tcr->gid);
3416             unlock_user_struct(tcr, optval_addr, 1);
3417             if (put_user_u32(len, optlen)) {
3418                 return -TARGET_EFAULT;
3419             }
3420             break;
3421         }
3422         /* Options with 'int' argument.  */
3423         case TARGET_SO_DEBUG:
3424             optname = SO_DEBUG;
3425             goto int_case;
3426         case TARGET_SO_REUSEADDR:
3427             optname = SO_REUSEADDR;
3428             goto int_case;
3429         case TARGET_SO_TYPE:
3430             optname = SO_TYPE;
3431             goto int_case;
3432         case TARGET_SO_ERROR:
3433             optname = SO_ERROR;
3434             goto int_case;
3435         case TARGET_SO_DONTROUTE:
3436             optname = SO_DONTROUTE;
3437             goto int_case;
3438         case TARGET_SO_BROADCAST:
3439             optname = SO_BROADCAST;
3440             goto int_case;
3441         case TARGET_SO_SNDBUF:
3442             optname = SO_SNDBUF;
3443             goto int_case;
3444         case TARGET_SO_RCVBUF:
3445             optname = SO_RCVBUF;
3446             goto int_case;
3447         case TARGET_SO_KEEPALIVE:
3448             optname = SO_KEEPALIVE;
3449             goto int_case;
3450         case TARGET_SO_OOBINLINE:
3451             optname = SO_OOBINLINE;
3452             goto int_case;
3453         case TARGET_SO_NO_CHECK:
3454             optname = SO_NO_CHECK;
3455             goto int_case;
3456         case TARGET_SO_PRIORITY:
3457             optname = SO_PRIORITY;
3458             goto int_case;
3459 #ifdef SO_BSDCOMPAT
3460         case TARGET_SO_BSDCOMPAT:
3461             optname = SO_BSDCOMPAT;
3462             goto int_case;
3463 #endif
3464         case TARGET_SO_PASSCRED:
3465             optname = SO_PASSCRED;
3466             goto int_case;
3467         case TARGET_SO_TIMESTAMP:
3468             optname = SO_TIMESTAMP;
3469             goto int_case;
3470         case TARGET_SO_RCVLOWAT:
3471             optname = SO_RCVLOWAT;
3472             goto int_case;
3473         case TARGET_SO_ACCEPTCONN:
3474             optname = SO_ACCEPTCONN;
3475             goto int_case;
3476         default:
3477             goto int_case;
3478         }
3479         break;
3480     case SOL_TCP:
3481         /* TCP options all take an 'int' value.  */
3482     int_case:
3483         if (get_user_u32(len, optlen))
3484             return -TARGET_EFAULT;
3485         if (len < 0)
3486             return -TARGET_EINVAL;
3487         lv = sizeof(lv);
3488         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3489         if (ret < 0)
3490             return ret;
3491         if (optname == SO_TYPE) {
3492             val = host_to_target_sock_type(val);
3493         }
3494         if (len > lv)
3495             len = lv;
3496         if (len == 4) {
3497             if (put_user_u32(val, optval_addr))
3498                 return -TARGET_EFAULT;
3499         } else {
3500             if (put_user_u8(val, optval_addr))
3501                 return -TARGET_EFAULT;
3502         }
3503         if (put_user_u32(len, optlen))
3504             return -TARGET_EFAULT;
3505         break;
3506     case SOL_IP:
3507         switch(optname) {
3508         case IP_TOS:
3509         case IP_TTL:
3510         case IP_HDRINCL:
3511         case IP_ROUTER_ALERT:
3512         case IP_RECVOPTS:
3513         case IP_RETOPTS:
3514         case IP_PKTINFO:
3515         case IP_MTU_DISCOVER:
3516         case IP_RECVERR:
3517         case IP_RECVTOS:
3518 #ifdef IP_FREEBIND
3519         case IP_FREEBIND:
3520 #endif
3521         case IP_MULTICAST_TTL:
3522         case IP_MULTICAST_LOOP:
3523             if (get_user_u32(len, optlen))
3524                 return -TARGET_EFAULT;
3525             if (len < 0)
3526                 return -TARGET_EINVAL;
3527             lv = sizeof(lv);
3528             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3529             if (ret < 0)
3530                 return ret;
3531             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3532                 len = 1;
3533                 if (put_user_u32(len, optlen)
3534                     || put_user_u8(val, optval_addr))
3535                     return -TARGET_EFAULT;
3536             } else {
3537                 if (len > sizeof(int))
3538                     len = sizeof(int);
3539                 if (put_user_u32(len, optlen)
3540                     || put_user_u32(val, optval_addr))
3541                     return -TARGET_EFAULT;
3542             }
3543             break;
3544         default:
3545             ret = -TARGET_ENOPROTOOPT;
3546             break;
3547         }
3548         break;
3549     default:
3550     unimplemented:
3551         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3552                  level, optname);
3553         ret = -TARGET_EOPNOTSUPP;
3554         break;
3555     }
3556     return ret;
3557 }
3558 
3559 /* Convert target low/high pair representing file offset into the host
3560  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3561  * as the kernel doesn't handle them either.
3562  */
3563 static void target_to_host_low_high(abi_ulong tlow,
3564                                     abi_ulong thigh,
3565                                     unsigned long *hlow,
3566                                     unsigned long *hhigh)
3567 {
3568     uint64_t off = tlow |
3569         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3570         TARGET_LONG_BITS / 2;
3571 
3572     *hlow = off;
3573     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3574 }
3575 
3576 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3577                                 abi_ulong count, int copy)
3578 {
3579     struct target_iovec *target_vec;
3580     struct iovec *vec;
3581     abi_ulong total_len, max_len;
3582     int i;
3583     int err = 0;
3584     bool bad_address = false;
3585 
3586     if (count == 0) {
3587         errno = 0;
3588         return NULL;
3589     }
3590     if (count > IOV_MAX) {
3591         errno = EINVAL;
3592         return NULL;
3593     }
3594 
3595     vec = g_try_new0(struct iovec, count);
3596     if (vec == NULL) {
3597         errno = ENOMEM;
3598         return NULL;
3599     }
3600 
3601     target_vec = lock_user(VERIFY_READ, target_addr,
3602                            count * sizeof(struct target_iovec), 1);
3603     if (target_vec == NULL) {
3604         err = EFAULT;
3605         goto fail2;
3606     }
3607 
3608     /* ??? If host page size > target page size, this will result in a
3609        value larger than what we can actually support.  */
3610     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3611     total_len = 0;
3612 
3613     for (i = 0; i < count; i++) {
3614         abi_ulong base = tswapal(target_vec[i].iov_base);
3615         abi_long len = tswapal(target_vec[i].iov_len);
3616 
3617         if (len < 0) {
3618             err = EINVAL;
3619             goto fail;
3620         } else if (len == 0) {
3621             /* Zero length pointer is ignored.  */
3622             vec[i].iov_base = 0;
3623         } else {
3624             vec[i].iov_base = lock_user(type, base, len, copy);
3625             /* If the first buffer pointer is bad, this is a fault.  But
3626              * subsequent bad buffers will result in a partial write; this
3627              * is realized by filling the vector with null pointers and
3628              * zero lengths. */
3629             if (!vec[i].iov_base) {
3630                 if (i == 0) {
3631                     err = EFAULT;
3632                     goto fail;
3633                 } else {
3634                     bad_address = true;
3635                 }
3636             }
3637             if (bad_address) {
3638                 len = 0;
3639             }
3640             if (len > max_len - total_len) {
3641                 len = max_len - total_len;
3642             }
3643         }
3644         vec[i].iov_len = len;
3645         total_len += len;
3646     }
3647 
3648     unlock_user(target_vec, target_addr, 0);
3649     return vec;
3650 
3651  fail:
3652     while (--i >= 0) {
3653         if (tswapal(target_vec[i].iov_len) > 0) {
3654             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3655         }
3656     }
3657     unlock_user(target_vec, target_addr, 0);
3658  fail2:
3659     g_free(vec);
3660     errno = err;
3661     return NULL;
3662 }
3663 
3664 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3665                          abi_ulong count, int copy)
3666 {
3667     struct target_iovec *target_vec;
3668     int i;
3669 
3670     target_vec = lock_user(VERIFY_READ, target_addr,
3671                            count * sizeof(struct target_iovec), 1);
3672     if (target_vec) {
3673         for (i = 0; i < count; i++) {
3674             abi_ulong base = tswapal(target_vec[i].iov_base);
3675             abi_long len = tswapal(target_vec[i].iov_len);
3676             if (len < 0) {
3677                 break;
3678             }
3679             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3680         }
3681         unlock_user(target_vec, target_addr, 0);
3682     }
3683 
3684     g_free(vec);
3685 }
3686 
3687 static inline int target_to_host_sock_type(int *type)
3688 {
3689     int host_type = 0;
3690     int target_type = *type;
3691 
3692     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3693     case TARGET_SOCK_DGRAM:
3694         host_type = SOCK_DGRAM;
3695         break;
3696     case TARGET_SOCK_STREAM:
3697         host_type = SOCK_STREAM;
3698         break;
3699     default:
3700         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3701         break;
3702     }
3703     if (target_type & TARGET_SOCK_CLOEXEC) {
3704 #if defined(SOCK_CLOEXEC)
3705         host_type |= SOCK_CLOEXEC;
3706 #else
3707         return -TARGET_EINVAL;
3708 #endif
3709     }
3710     if (target_type & TARGET_SOCK_NONBLOCK) {
3711 #if defined(SOCK_NONBLOCK)
3712         host_type |= SOCK_NONBLOCK;
3713 #elif !defined(O_NONBLOCK)
3714         return -TARGET_EINVAL;
3715 #endif
3716     }
3717     *type = host_type;
3718     return 0;
3719 }
3720 
3721 /* Try to emulate socket type flags after socket creation.  */
3722 static int sock_flags_fixup(int fd, int target_type)
3723 {
3724 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3725     if (target_type & TARGET_SOCK_NONBLOCK) {
3726         int flags = fcntl(fd, F_GETFL);
3727         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3728             close(fd);
3729             return -TARGET_EINVAL;
3730         }
3731     }
3732 #endif
3733     return fd;
3734 }
3735 
3736 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3737                                                abi_ulong target_addr,
3738                                                socklen_t len)
3739 {
3740     struct sockaddr *addr = host_addr;
3741     struct target_sockaddr *target_saddr;
3742 
3743     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3744     if (!target_saddr) {
3745         return -TARGET_EFAULT;
3746     }
3747 
3748     memcpy(addr, target_saddr, len);
3749     addr->sa_family = tswap16(target_saddr->sa_family);
3750     /* spkt_protocol is big-endian */
3751 
3752     unlock_user(target_saddr, target_addr, 0);
3753     return 0;
3754 }
3755 
3756 static TargetFdTrans target_packet_trans = {
3757     .target_to_host_addr = packet_target_to_host_sockaddr,
3758 };
3759 
3760 #ifdef CONFIG_RTNETLINK
3761 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3762 {
3763     abi_long ret;
3764 
3765     ret = target_to_host_nlmsg_route(buf, len);
3766     if (ret < 0) {
3767         return ret;
3768     }
3769 
3770     return len;
3771 }
3772 
3773 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3774 {
3775     abi_long ret;
3776 
3777     ret = host_to_target_nlmsg_route(buf, len);
3778     if (ret < 0) {
3779         return ret;
3780     }
3781 
3782     return len;
3783 }
3784 
3785 static TargetFdTrans target_netlink_route_trans = {
3786     .target_to_host_data = netlink_route_target_to_host,
3787     .host_to_target_data = netlink_route_host_to_target,
3788 };
3789 #endif /* CONFIG_RTNETLINK */
3790 
3791 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3792 {
3793     abi_long ret;
3794 
3795     ret = target_to_host_nlmsg_audit(buf, len);
3796     if (ret < 0) {
3797         return ret;
3798     }
3799 
3800     return len;
3801 }
3802 
3803 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3804 {
3805     abi_long ret;
3806 
3807     ret = host_to_target_nlmsg_audit(buf, len);
3808     if (ret < 0) {
3809         return ret;
3810     }
3811 
3812     return len;
3813 }
3814 
3815 static TargetFdTrans target_netlink_audit_trans = {
3816     .target_to_host_data = netlink_audit_target_to_host,
3817     .host_to_target_data = netlink_audit_host_to_target,
3818 };
3819 
3820 /* do_socket() Must return target values and target errnos. */
3821 static abi_long do_socket(int domain, int type, int protocol)
3822 {
3823     int target_type = type;
3824     int ret;
3825 
3826     ret = target_to_host_sock_type(&type);
3827     if (ret) {
3828         return ret;
3829     }
3830 
3831     if (domain == PF_NETLINK && !(
3832 #ifdef CONFIG_RTNETLINK
3833          protocol == NETLINK_ROUTE ||
3834 #endif
3835          protocol == NETLINK_KOBJECT_UEVENT ||
3836          protocol == NETLINK_AUDIT)) {
3837         return -EPFNOSUPPORT;
3838     }
3839 
3840     if (domain == AF_PACKET ||
3841         (domain == AF_INET && type == SOCK_PACKET)) {
3842         protocol = tswap16(protocol);
3843     }
3844 
3845     ret = get_errno(socket(domain, type, protocol));
3846     if (ret >= 0) {
3847         ret = sock_flags_fixup(ret, target_type);
3848         if (type == SOCK_PACKET) {
3849             /* Manage an obsolete case :
3850              * if socket type is SOCK_PACKET, bind by name
3851              */
3852             fd_trans_register(ret, &target_packet_trans);
3853         } else if (domain == PF_NETLINK) {
3854             switch (protocol) {
3855 #ifdef CONFIG_RTNETLINK
3856             case NETLINK_ROUTE:
3857                 fd_trans_register(ret, &target_netlink_route_trans);
3858                 break;
3859 #endif
3860             case NETLINK_KOBJECT_UEVENT:
3861                 /* nothing to do: messages are strings */
3862                 break;
3863             case NETLINK_AUDIT:
3864                 fd_trans_register(ret, &target_netlink_audit_trans);
3865                 break;
3866             default:
3867                 g_assert_not_reached();
3868             }
3869         }
3870     }
3871     return ret;
3872 }
3873 
3874 /* do_bind() Must return target values and target errnos. */
3875 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3876                         socklen_t addrlen)
3877 {
3878     void *addr;
3879     abi_long ret;
3880 
3881     if ((int)addrlen < 0) {
3882         return -TARGET_EINVAL;
3883     }
3884 
3885     addr = alloca(addrlen+1);
3886 
3887     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3888     if (ret)
3889         return ret;
3890 
3891     return get_errno(bind(sockfd, addr, addrlen));
3892 }
3893 
3894 /* do_connect() Must return target values and target errnos. */
3895 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3896                            socklen_t addrlen)
3897 {
3898     void *addr;
3899     abi_long ret;
3900 
3901     if ((int)addrlen < 0) {
3902         return -TARGET_EINVAL;
3903     }
3904 
3905     addr = alloca(addrlen+1);
3906 
3907     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3908     if (ret)
3909         return ret;
3910 
3911     return get_errno(safe_connect(sockfd, addr, addrlen));
3912 }
3913 
3914 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3915 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3916                                       int flags, int send)
3917 {
3918     abi_long ret, len;
3919     struct msghdr msg;
3920     abi_ulong count;
3921     struct iovec *vec;
3922     abi_ulong target_vec;
3923 
3924     if (msgp->msg_name) {
3925         msg.msg_namelen = tswap32(msgp->msg_namelen);
3926         msg.msg_name = alloca(msg.msg_namelen+1);
3927         ret = target_to_host_sockaddr(fd, msg.msg_name,
3928                                       tswapal(msgp->msg_name),
3929                                       msg.msg_namelen);
3930         if (ret == -TARGET_EFAULT) {
3931             /* For connected sockets msg_name and msg_namelen must
3932              * be ignored, so returning EFAULT immediately is wrong.
3933              * Instead, pass a bad msg_name to the host kernel, and
3934              * let it decide whether to return EFAULT or not.
3935              */
3936             msg.msg_name = (void *)-1;
3937         } else if (ret) {
3938             goto out2;
3939         }
3940     } else {
3941         msg.msg_name = NULL;
3942         msg.msg_namelen = 0;
3943     }
3944     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3945     msg.msg_control = alloca(msg.msg_controllen);
3946     memset(msg.msg_control, 0, msg.msg_controllen);
3947 
3948     msg.msg_flags = tswap32(msgp->msg_flags);
3949 
3950     count = tswapal(msgp->msg_iovlen);
3951     target_vec = tswapal(msgp->msg_iov);
3952 
3953     if (count > IOV_MAX) {
3954         /* sendrcvmsg returns a different errno for this condition than
3955          * readv/writev, so we must catch it here before lock_iovec() does.
3956          */
3957         ret = -TARGET_EMSGSIZE;
3958         goto out2;
3959     }
3960 
3961     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3962                      target_vec, count, send);
3963     if (vec == NULL) {
3964         ret = -host_to_target_errno(errno);
3965         goto out2;
3966     }
3967     msg.msg_iovlen = count;
3968     msg.msg_iov = vec;
3969 
3970     if (send) {
3971         if (fd_trans_target_to_host_data(fd)) {
3972             void *host_msg;
3973 
3974             host_msg = g_malloc(msg.msg_iov->iov_len);
3975             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3976             ret = fd_trans_target_to_host_data(fd)(host_msg,
3977                                                    msg.msg_iov->iov_len);
3978             if (ret >= 0) {
3979                 msg.msg_iov->iov_base = host_msg;
3980                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3981             }
3982             g_free(host_msg);
3983         } else {
3984             ret = target_to_host_cmsg(&msg, msgp);
3985             if (ret == 0) {
3986                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3987             }
3988         }
3989     } else {
3990         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3991         if (!is_error(ret)) {
3992             len = ret;
3993             if (fd_trans_host_to_target_data(fd)) {
3994                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3995                                                MIN(msg.msg_iov->iov_len, len));
3996             } else {
3997                 ret = host_to_target_cmsg(msgp, &msg);
3998             }
3999             if (!is_error(ret)) {
4000                 msgp->msg_namelen = tswap32(msg.msg_namelen);
4001                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
4002                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
4003                                     msg.msg_name, msg.msg_namelen);
4004                     if (ret) {
4005                         goto out;
4006                     }
4007                 }
4008 
4009                 ret = len;
4010             }
4011         }
4012     }
4013 
4014 out:
4015     unlock_iovec(vec, target_vec, count, !send);
4016 out2:
4017     return ret;
4018 }
4019 
4020 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
4021                                int flags, int send)
4022 {
4023     abi_long ret;
4024     struct target_msghdr *msgp;
4025 
4026     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
4027                           msgp,
4028                           target_msg,
4029                           send ? 1 : 0)) {
4030         return -TARGET_EFAULT;
4031     }
4032     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
4033     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
4034     return ret;
4035 }
4036 
4037 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
4038  * so it might not have this *mmsg-specific flag either.
4039  */
4040 #ifndef MSG_WAITFORONE
4041 #define MSG_WAITFORONE 0x10000
4042 #endif
4043 
4044 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
4045                                 unsigned int vlen, unsigned int flags,
4046                                 int send)
4047 {
4048     struct target_mmsghdr *mmsgp;
4049     abi_long ret = 0;
4050     int i;
4051 
4052     if (vlen > UIO_MAXIOV) {
4053         vlen = UIO_MAXIOV;
4054     }
4055 
4056     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
4057     if (!mmsgp) {
4058         return -TARGET_EFAULT;
4059     }
4060 
4061     for (i = 0; i < vlen; i++) {
4062         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
4063         if (is_error(ret)) {
4064             break;
4065         }
4066         mmsgp[i].msg_len = tswap32(ret);
4067         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
4068         if (flags & MSG_WAITFORONE) {
4069             flags |= MSG_DONTWAIT;
4070         }
4071     }
4072 
4073     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
4074 
4075     /* Return number of datagrams sent if we sent any at all;
4076      * otherwise return the error.
4077      */
4078     if (i) {
4079         return i;
4080     }
4081     return ret;
4082 }
4083 
4084 /* do_accept4() Must return target values and target errnos. */
4085 static abi_long do_accept4(int fd, abi_ulong target_addr,
4086                            abi_ulong target_addrlen_addr, int flags)
4087 {
4088     socklen_t addrlen;
4089     void *addr;
4090     abi_long ret;
4091     int host_flags;
4092 
4093     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
4094 
4095     if (target_addr == 0) {
4096         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
4097     }
4098 
4099     /* linux returns EINVAL if addrlen pointer is invalid */
4100     if (get_user_u32(addrlen, target_addrlen_addr))
4101         return -TARGET_EINVAL;
4102 
4103     if ((int)addrlen < 0) {
4104         return -TARGET_EINVAL;
4105     }
4106 
4107     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4108         return -TARGET_EINVAL;
4109 
4110     addr = alloca(addrlen);
4111 
4112     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4113     if (!is_error(ret)) {
4114         host_to_target_sockaddr(target_addr, addr, addrlen);
4115         if (put_user_u32(addrlen, target_addrlen_addr))
4116             ret = -TARGET_EFAULT;
4117     }
4118     return ret;
4119 }
4120 
4121 /* do_getpeername() Must return target values and target errnos. */
4122 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4123                                abi_ulong target_addrlen_addr)
4124 {
4125     socklen_t addrlen;
4126     void *addr;
4127     abi_long ret;
4128 
4129     if (get_user_u32(addrlen, target_addrlen_addr))
4130         return -TARGET_EFAULT;
4131 
4132     if ((int)addrlen < 0) {
4133         return -TARGET_EINVAL;
4134     }
4135 
4136     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4137         return -TARGET_EFAULT;
4138 
4139     addr = alloca(addrlen);
4140 
4141     ret = get_errno(getpeername(fd, addr, &addrlen));
4142     if (!is_error(ret)) {
4143         host_to_target_sockaddr(target_addr, addr, addrlen);
4144         if (put_user_u32(addrlen, target_addrlen_addr))
4145             ret = -TARGET_EFAULT;
4146     }
4147     return ret;
4148 }
4149 
4150 /* do_getsockname() Must return target values and target errnos. */
4151 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4152                                abi_ulong target_addrlen_addr)
4153 {
4154     socklen_t addrlen;
4155     void *addr;
4156     abi_long ret;
4157 
4158     if (get_user_u32(addrlen, target_addrlen_addr))
4159         return -TARGET_EFAULT;
4160 
4161     if ((int)addrlen < 0) {
4162         return -TARGET_EINVAL;
4163     }
4164 
4165     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4166         return -TARGET_EFAULT;
4167 
4168     addr = alloca(addrlen);
4169 
4170     ret = get_errno(getsockname(fd, addr, &addrlen));
4171     if (!is_error(ret)) {
4172         host_to_target_sockaddr(target_addr, addr, addrlen);
4173         if (put_user_u32(addrlen, target_addrlen_addr))
4174             ret = -TARGET_EFAULT;
4175     }
4176     return ret;
4177 }
4178 
4179 /* do_socketpair() Must return target values and target errnos. */
4180 static abi_long do_socketpair(int domain, int type, int protocol,
4181                               abi_ulong target_tab_addr)
4182 {
4183     int tab[2];
4184     abi_long ret;
4185 
4186     target_to_host_sock_type(&type);
4187 
4188     ret = get_errno(socketpair(domain, type, protocol, tab));
4189     if (!is_error(ret)) {
4190         if (put_user_s32(tab[0], target_tab_addr)
4191             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4192             ret = -TARGET_EFAULT;
4193     }
4194     return ret;
4195 }
4196 
4197 /* do_sendto() Must return target values and target errnos. */
4198 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4199                           abi_ulong target_addr, socklen_t addrlen)
4200 {
4201     void *addr;
4202     void *host_msg;
4203     void *copy_msg = NULL;
4204     abi_long ret;
4205 
4206     if ((int)addrlen < 0) {
4207         return -TARGET_EINVAL;
4208     }
4209 
4210     host_msg = lock_user(VERIFY_READ, msg, len, 1);
4211     if (!host_msg)
4212         return -TARGET_EFAULT;
4213     if (fd_trans_target_to_host_data(fd)) {
4214         copy_msg = host_msg;
4215         host_msg = g_malloc(len);
4216         memcpy(host_msg, copy_msg, len);
4217         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4218         if (ret < 0) {
4219             goto fail;
4220         }
4221     }
4222     if (target_addr) {
4223         addr = alloca(addrlen+1);
4224         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4225         if (ret) {
4226             goto fail;
4227         }
4228         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4229     } else {
4230         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4231     }
4232 fail:
4233     if (copy_msg) {
4234         g_free(host_msg);
4235         host_msg = copy_msg;
4236     }
4237     unlock_user(host_msg, msg, 0);
4238     return ret;
4239 }
4240 
4241 /* do_recvfrom() Must return target values and target errnos. */
4242 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4243                             abi_ulong target_addr,
4244                             abi_ulong target_addrlen)
4245 {
4246     socklen_t addrlen;
4247     void *addr;
4248     void *host_msg;
4249     abi_long ret;
4250 
4251     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4252     if (!host_msg)
4253         return -TARGET_EFAULT;
4254     if (target_addr) {
4255         if (get_user_u32(addrlen, target_addrlen)) {
4256             ret = -TARGET_EFAULT;
4257             goto fail;
4258         }
4259         if ((int)addrlen < 0) {
4260             ret = -TARGET_EINVAL;
4261             goto fail;
4262         }
4263         addr = alloca(addrlen);
4264         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4265                                       addr, &addrlen));
4266     } else {
4267         addr = NULL; /* To keep compiler quiet.  */
4268         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4269     }
4270     if (!is_error(ret)) {
4271         if (fd_trans_host_to_target_data(fd)) {
4272             abi_long trans;
4273             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
4274             if (is_error(trans)) {
4275                 ret = trans;
4276                 goto fail;
4277             }
4278         }
4279         if (target_addr) {
4280             host_to_target_sockaddr(target_addr, addr, addrlen);
4281             if (put_user_u32(addrlen, target_addrlen)) {
4282                 ret = -TARGET_EFAULT;
4283                 goto fail;
4284             }
4285         }
4286         unlock_user(host_msg, msg, len);
4287     } else {
4288 fail:
4289         unlock_user(host_msg, msg, 0);
4290     }
4291     return ret;
4292 }
4293 
4294 #ifdef TARGET_NR_socketcall
4295 /* do_socketcall() must return target values and target errnos. */
4296 static abi_long do_socketcall(int num, abi_ulong vptr)
4297 {
4298     static const unsigned nargs[] = { /* number of arguments per operation */
4299         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
4300         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
4301         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
4302         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
4303         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
4304         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4305         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4306         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
4307         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
4308         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
4309         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
4310         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
4311         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
4312         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4313         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4314         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
4315         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
4316         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
4317         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
4318         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
4319     };
4320     abi_long a[6]; /* max 6 args */
4321     unsigned i;
4322 
4323     /* check the range of the first argument num */
4324     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4325     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4326         return -TARGET_EINVAL;
4327     }
4328     /* ensure we have space for args */
4329     if (nargs[num] > ARRAY_SIZE(a)) {
4330         return -TARGET_EINVAL;
4331     }
4332     /* collect the arguments in a[] according to nargs[] */
4333     for (i = 0; i < nargs[num]; ++i) {
4334         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4335             return -TARGET_EFAULT;
4336         }
4337     }
4338     /* now when we have the args, invoke the appropriate underlying function */
4339     switch (num) {
4340     case TARGET_SYS_SOCKET: /* domain, type, protocol */
4341         return do_socket(a[0], a[1], a[2]);
4342     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4343         return do_bind(a[0], a[1], a[2]);
4344     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4345         return do_connect(a[0], a[1], a[2]);
4346     case TARGET_SYS_LISTEN: /* sockfd, backlog */
4347         return get_errno(listen(a[0], a[1]));
4348     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4349         return do_accept4(a[0], a[1], a[2], 0);
4350     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4351         return do_getsockname(a[0], a[1], a[2]);
4352     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4353         return do_getpeername(a[0], a[1], a[2]);
4354     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4355         return do_socketpair(a[0], a[1], a[2], a[3]);
4356     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4357         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4358     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4359         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4360     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4361         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4362     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4363         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4364     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4365         return get_errno(shutdown(a[0], a[1]));
4366     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4367         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4368     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4369         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4370     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4371         return do_sendrecvmsg(a[0], a[1], a[2], 1);
4372     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4373         return do_sendrecvmsg(a[0], a[1], a[2], 0);
4374     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4375         return do_accept4(a[0], a[1], a[2], a[3]);
4376     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4377         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4378     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4379         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4380     default:
4381         gemu_log("Unsupported socketcall: %d\n", num);
4382         return -TARGET_EINVAL;
4383     }
4384 }
4385 #endif
4386 
4387 #define N_SHM_REGIONS	32
4388 
4389 static struct shm_region {
4390     abi_ulong start;
4391     abi_ulong size;
4392     bool in_use;
4393 } shm_regions[N_SHM_REGIONS];
4394 
4395 #ifndef TARGET_SEMID64_DS
4396 /* asm-generic version of this struct */
4397 struct target_semid64_ds
4398 {
4399   struct target_ipc_perm sem_perm;
4400   abi_ulong sem_otime;
4401 #if TARGET_ABI_BITS == 32
4402   abi_ulong __unused1;
4403 #endif
4404   abi_ulong sem_ctime;
4405 #if TARGET_ABI_BITS == 32
4406   abi_ulong __unused2;
4407 #endif
4408   abi_ulong sem_nsems;
4409   abi_ulong __unused3;
4410   abi_ulong __unused4;
4411 };
4412 #endif
4413 
4414 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4415                                                abi_ulong target_addr)
4416 {
4417     struct target_ipc_perm *target_ip;
4418     struct target_semid64_ds *target_sd;
4419 
4420     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4421         return -TARGET_EFAULT;
4422     target_ip = &(target_sd->sem_perm);
4423     host_ip->__key = tswap32(target_ip->__key);
4424     host_ip->uid = tswap32(target_ip->uid);
4425     host_ip->gid = tswap32(target_ip->gid);
4426     host_ip->cuid = tswap32(target_ip->cuid);
4427     host_ip->cgid = tswap32(target_ip->cgid);
4428 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4429     host_ip->mode = tswap32(target_ip->mode);
4430 #else
4431     host_ip->mode = tswap16(target_ip->mode);
4432 #endif
4433 #if defined(TARGET_PPC)
4434     host_ip->__seq = tswap32(target_ip->__seq);
4435 #else
4436     host_ip->__seq = tswap16(target_ip->__seq);
4437 #endif
4438     unlock_user_struct(target_sd, target_addr, 0);
4439     return 0;
4440 }
4441 
4442 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4443                                                struct ipc_perm *host_ip)
4444 {
4445     struct target_ipc_perm *target_ip;
4446     struct target_semid64_ds *target_sd;
4447 
4448     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     target_ip = &(target_sd->sem_perm);
4451     target_ip->__key = tswap32(host_ip->__key);
4452     target_ip->uid = tswap32(host_ip->uid);
4453     target_ip->gid = tswap32(host_ip->gid);
4454     target_ip->cuid = tswap32(host_ip->cuid);
4455     target_ip->cgid = tswap32(host_ip->cgid);
4456 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4457     target_ip->mode = tswap32(host_ip->mode);
4458 #else
4459     target_ip->mode = tswap16(host_ip->mode);
4460 #endif
4461 #if defined(TARGET_PPC)
4462     target_ip->__seq = tswap32(host_ip->__seq);
4463 #else
4464     target_ip->__seq = tswap16(host_ip->__seq);
4465 #endif
4466     unlock_user_struct(target_sd, target_addr, 1);
4467     return 0;
4468 }
4469 
4470 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4471                                                abi_ulong target_addr)
4472 {
4473     struct target_semid64_ds *target_sd;
4474 
4475     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4476         return -TARGET_EFAULT;
4477     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4478         return -TARGET_EFAULT;
4479     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4480     host_sd->sem_otime = tswapal(target_sd->sem_otime);
4481     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4482     unlock_user_struct(target_sd, target_addr, 0);
4483     return 0;
4484 }
4485 
4486 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4487                                                struct semid_ds *host_sd)
4488 {
4489     struct target_semid64_ds *target_sd;
4490 
4491     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4492         return -TARGET_EFAULT;
4493     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4494         return -TARGET_EFAULT;
4495     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4496     target_sd->sem_otime = tswapal(host_sd->sem_otime);
4497     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4498     unlock_user_struct(target_sd, target_addr, 1);
4499     return 0;
4500 }
4501 
4502 struct target_seminfo {
4503     int semmap;
4504     int semmni;
4505     int semmns;
4506     int semmnu;
4507     int semmsl;
4508     int semopm;
4509     int semume;
4510     int semusz;
4511     int semvmx;
4512     int semaem;
4513 };
4514 
4515 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4516                                               struct seminfo *host_seminfo)
4517 {
4518     struct target_seminfo *target_seminfo;
4519     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4520         return -TARGET_EFAULT;
4521     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4522     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4523     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4524     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4525     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4526     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4527     __put_user(host_seminfo->semume, &target_seminfo->semume);
4528     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4529     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4530     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4531     unlock_user_struct(target_seminfo, target_addr, 1);
4532     return 0;
4533 }
4534 
4535 union semun {
4536 	int val;
4537 	struct semid_ds *buf;
4538 	unsigned short *array;
4539 	struct seminfo *__buf;
4540 };
4541 
4542 union target_semun {
4543 	int val;
4544 	abi_ulong buf;
4545 	abi_ulong array;
4546 	abi_ulong __buf;
4547 };
4548 
4549 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4550                                                abi_ulong target_addr)
4551 {
4552     int nsems;
4553     unsigned short *array;
4554     union semun semun;
4555     struct semid_ds semid_ds;
4556     int i, ret;
4557 
4558     semun.buf = &semid_ds;
4559 
4560     ret = semctl(semid, 0, IPC_STAT, semun);
4561     if (ret == -1)
4562         return get_errno(ret);
4563 
4564     nsems = semid_ds.sem_nsems;
4565 
4566     *host_array = g_try_new(unsigned short, nsems);
4567     if (!*host_array) {
4568         return -TARGET_ENOMEM;
4569     }
4570     array = lock_user(VERIFY_READ, target_addr,
4571                       nsems*sizeof(unsigned short), 1);
4572     if (!array) {
4573         g_free(*host_array);
4574         return -TARGET_EFAULT;
4575     }
4576 
4577     for(i=0; i<nsems; i++) {
4578         __get_user((*host_array)[i], &array[i]);
4579     }
4580     unlock_user(array, target_addr, 0);
4581 
4582     return 0;
4583 }
4584 
4585 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4586                                                unsigned short **host_array)
4587 {
4588     int nsems;
4589     unsigned short *array;
4590     union semun semun;
4591     struct semid_ds semid_ds;
4592     int i, ret;
4593 
4594     semun.buf = &semid_ds;
4595 
4596     ret = semctl(semid, 0, IPC_STAT, semun);
4597     if (ret == -1)
4598         return get_errno(ret);
4599 
4600     nsems = semid_ds.sem_nsems;
4601 
4602     array = lock_user(VERIFY_WRITE, target_addr,
4603                       nsems*sizeof(unsigned short), 0);
4604     if (!array)
4605         return -TARGET_EFAULT;
4606 
4607     for(i=0; i<nsems; i++) {
4608         __put_user((*host_array)[i], &array[i]);
4609     }
4610     g_free(*host_array);
4611     unlock_user(array, target_addr, 1);
4612 
4613     return 0;
4614 }
4615 
4616 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4617                                  abi_ulong target_arg)
4618 {
4619     union target_semun target_su = { .buf = target_arg };
4620     union semun arg;
4621     struct semid_ds dsarg;
4622     unsigned short *array = NULL;
4623     struct seminfo seminfo;
4624     abi_long ret = -TARGET_EINVAL;
4625     abi_long err;
4626     cmd &= 0xff;
4627 
4628     switch( cmd ) {
4629 	case GETVAL:
4630 	case SETVAL:
4631             /* In 64 bit cross-endian situations, we will erroneously pick up
4632              * the wrong half of the union for the "val" element.  To rectify
4633              * this, the entire 8-byte structure is byteswapped, followed by
4634 	     * a swap of the 4 byte val field. In other cases, the data is
4635 	     * already in proper host byte order. */
4636 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4637 		target_su.buf = tswapal(target_su.buf);
4638 		arg.val = tswap32(target_su.val);
4639 	    } else {
4640 		arg.val = target_su.val;
4641 	    }
4642             ret = get_errno(semctl(semid, semnum, cmd, arg));
4643             break;
4644 	case GETALL:
4645 	case SETALL:
4646             err = target_to_host_semarray(semid, &array, target_su.array);
4647             if (err)
4648                 return err;
4649             arg.array = array;
4650             ret = get_errno(semctl(semid, semnum, cmd, arg));
4651             err = host_to_target_semarray(semid, target_su.array, &array);
4652             if (err)
4653                 return err;
4654             break;
4655 	case IPC_STAT:
4656 	case IPC_SET:
4657 	case SEM_STAT:
4658             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4659             if (err)
4660                 return err;
4661             arg.buf = &dsarg;
4662             ret = get_errno(semctl(semid, semnum, cmd, arg));
4663             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4664             if (err)
4665                 return err;
4666             break;
4667 	case IPC_INFO:
4668 	case SEM_INFO:
4669             arg.__buf = &seminfo;
4670             ret = get_errno(semctl(semid, semnum, cmd, arg));
4671             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4672             if (err)
4673                 return err;
4674             break;
4675 	case IPC_RMID:
4676 	case GETPID:
4677 	case GETNCNT:
4678 	case GETZCNT:
4679             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4680             break;
4681     }
4682 
4683     return ret;
4684 }
4685 
4686 struct target_sembuf {
4687     unsigned short sem_num;
4688     short sem_op;
4689     short sem_flg;
4690 };
4691 
4692 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4693                                              abi_ulong target_addr,
4694                                              unsigned nsops)
4695 {
4696     struct target_sembuf *target_sembuf;
4697     int i;
4698 
4699     target_sembuf = lock_user(VERIFY_READ, target_addr,
4700                               nsops*sizeof(struct target_sembuf), 1);
4701     if (!target_sembuf)
4702         return -TARGET_EFAULT;
4703 
4704     for(i=0; i<nsops; i++) {
4705         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4706         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4707         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4708     }
4709 
4710     unlock_user(target_sembuf, target_addr, 0);
4711 
4712     return 0;
4713 }
4714 
4715 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4716 {
4717     struct sembuf sops[nsops];
4718 
4719     if (target_to_host_sembuf(sops, ptr, nsops))
4720         return -TARGET_EFAULT;
4721 
4722     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4723 }
4724 
4725 struct target_msqid_ds
4726 {
4727     struct target_ipc_perm msg_perm;
4728     abi_ulong msg_stime;
4729 #if TARGET_ABI_BITS == 32
4730     abi_ulong __unused1;
4731 #endif
4732     abi_ulong msg_rtime;
4733 #if TARGET_ABI_BITS == 32
4734     abi_ulong __unused2;
4735 #endif
4736     abi_ulong msg_ctime;
4737 #if TARGET_ABI_BITS == 32
4738     abi_ulong __unused3;
4739 #endif
4740     abi_ulong __msg_cbytes;
4741     abi_ulong msg_qnum;
4742     abi_ulong msg_qbytes;
4743     abi_ulong msg_lspid;
4744     abi_ulong msg_lrpid;
4745     abi_ulong __unused4;
4746     abi_ulong __unused5;
4747 };
4748 
4749 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4750                                                abi_ulong target_addr)
4751 {
4752     struct target_msqid_ds *target_md;
4753 
4754     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4755         return -TARGET_EFAULT;
4756     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4757         return -TARGET_EFAULT;
4758     host_md->msg_stime = tswapal(target_md->msg_stime);
4759     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4760     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4761     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4762     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4763     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4764     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4765     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4766     unlock_user_struct(target_md, target_addr, 0);
4767     return 0;
4768 }
4769 
4770 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4771                                                struct msqid_ds *host_md)
4772 {
4773     struct target_msqid_ds *target_md;
4774 
4775     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4776         return -TARGET_EFAULT;
4777     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4778         return -TARGET_EFAULT;
4779     target_md->msg_stime = tswapal(host_md->msg_stime);
4780     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4781     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4782     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4783     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4784     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4785     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4786     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4787     unlock_user_struct(target_md, target_addr, 1);
4788     return 0;
4789 }
4790 
4791 struct target_msginfo {
4792     int msgpool;
4793     int msgmap;
4794     int msgmax;
4795     int msgmnb;
4796     int msgmni;
4797     int msgssz;
4798     int msgtql;
4799     unsigned short int msgseg;
4800 };
4801 
4802 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4803                                               struct msginfo *host_msginfo)
4804 {
4805     struct target_msginfo *target_msginfo;
4806     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4807         return -TARGET_EFAULT;
4808     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4809     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4810     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4811     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4812     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4813     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4814     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4815     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4816     unlock_user_struct(target_msginfo, target_addr, 1);
4817     return 0;
4818 }
4819 
4820 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4821 {
4822     struct msqid_ds dsarg;
4823     struct msginfo msginfo;
4824     abi_long ret = -TARGET_EINVAL;
4825 
4826     cmd &= 0xff;
4827 
4828     switch (cmd) {
4829     case IPC_STAT:
4830     case IPC_SET:
4831     case MSG_STAT:
4832         if (target_to_host_msqid_ds(&dsarg,ptr))
4833             return -TARGET_EFAULT;
4834         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4835         if (host_to_target_msqid_ds(ptr,&dsarg))
4836             return -TARGET_EFAULT;
4837         break;
4838     case IPC_RMID:
4839         ret = get_errno(msgctl(msgid, cmd, NULL));
4840         break;
4841     case IPC_INFO:
4842     case MSG_INFO:
4843         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4844         if (host_to_target_msginfo(ptr, &msginfo))
4845             return -TARGET_EFAULT;
4846         break;
4847     }
4848 
4849     return ret;
4850 }
4851 
4852 struct target_msgbuf {
4853     abi_long mtype;
4854     char	mtext[1];
4855 };
4856 
4857 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4858                                  ssize_t msgsz, int msgflg)
4859 {
4860     struct target_msgbuf *target_mb;
4861     struct msgbuf *host_mb;
4862     abi_long ret = 0;
4863 
4864     if (msgsz < 0) {
4865         return -TARGET_EINVAL;
4866     }
4867 
4868     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4869         return -TARGET_EFAULT;
4870     host_mb = g_try_malloc(msgsz + sizeof(long));
4871     if (!host_mb) {
4872         unlock_user_struct(target_mb, msgp, 0);
4873         return -TARGET_ENOMEM;
4874     }
4875     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4876     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4877     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4878     g_free(host_mb);
4879     unlock_user_struct(target_mb, msgp, 0);
4880 
4881     return ret;
4882 }
4883 
4884 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4885                                  ssize_t msgsz, abi_long msgtyp,
4886                                  int msgflg)
4887 {
4888     struct target_msgbuf *target_mb;
4889     char *target_mtext;
4890     struct msgbuf *host_mb;
4891     abi_long ret = 0;
4892 
4893     if (msgsz < 0) {
4894         return -TARGET_EINVAL;
4895     }
4896 
4897     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4898         return -TARGET_EFAULT;
4899 
4900     host_mb = g_try_malloc(msgsz + sizeof(long));
4901     if (!host_mb) {
4902         ret = -TARGET_ENOMEM;
4903         goto end;
4904     }
4905     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4906 
4907     if (ret > 0) {
4908         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4909         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4910         if (!target_mtext) {
4911             ret = -TARGET_EFAULT;
4912             goto end;
4913         }
4914         memcpy(target_mb->mtext, host_mb->mtext, ret);
4915         unlock_user(target_mtext, target_mtext_addr, ret);
4916     }
4917 
4918     target_mb->mtype = tswapal(host_mb->mtype);
4919 
4920 end:
4921     if (target_mb)
4922         unlock_user_struct(target_mb, msgp, 1);
4923     g_free(host_mb);
4924     return ret;
4925 }
4926 
4927 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4928                                                abi_ulong target_addr)
4929 {
4930     struct target_shmid_ds *target_sd;
4931 
4932     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4933         return -TARGET_EFAULT;
4934     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4935         return -TARGET_EFAULT;
4936     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4937     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4938     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4939     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4940     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4941     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4942     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4943     unlock_user_struct(target_sd, target_addr, 0);
4944     return 0;
4945 }
4946 
4947 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4948                                                struct shmid_ds *host_sd)
4949 {
4950     struct target_shmid_ds *target_sd;
4951 
4952     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4953         return -TARGET_EFAULT;
4954     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4955         return -TARGET_EFAULT;
4956     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4957     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4958     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4959     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4960     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4961     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4962     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4963     unlock_user_struct(target_sd, target_addr, 1);
4964     return 0;
4965 }
4966 
4967 struct  target_shminfo {
4968     abi_ulong shmmax;
4969     abi_ulong shmmin;
4970     abi_ulong shmmni;
4971     abi_ulong shmseg;
4972     abi_ulong shmall;
4973 };
4974 
4975 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4976                                               struct shminfo *host_shminfo)
4977 {
4978     struct target_shminfo *target_shminfo;
4979     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4980         return -TARGET_EFAULT;
4981     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4982     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4983     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4984     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4985     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4986     unlock_user_struct(target_shminfo, target_addr, 1);
4987     return 0;
4988 }
4989 
4990 struct target_shm_info {
4991     int used_ids;
4992     abi_ulong shm_tot;
4993     abi_ulong shm_rss;
4994     abi_ulong shm_swp;
4995     abi_ulong swap_attempts;
4996     abi_ulong swap_successes;
4997 };
4998 
4999 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
5000                                                struct shm_info *host_shm_info)
5001 {
5002     struct target_shm_info *target_shm_info;
5003     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
5004         return -TARGET_EFAULT;
5005     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
5006     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
5007     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
5008     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
5009     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
5010     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
5011     unlock_user_struct(target_shm_info, target_addr, 1);
5012     return 0;
5013 }
5014 
5015 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
5016 {
5017     struct shmid_ds dsarg;
5018     struct shminfo shminfo;
5019     struct shm_info shm_info;
5020     abi_long ret = -TARGET_EINVAL;
5021 
5022     cmd &= 0xff;
5023 
5024     switch(cmd) {
5025     case IPC_STAT:
5026     case IPC_SET:
5027     case SHM_STAT:
5028         if (target_to_host_shmid_ds(&dsarg, buf))
5029             return -TARGET_EFAULT;
5030         ret = get_errno(shmctl(shmid, cmd, &dsarg));
5031         if (host_to_target_shmid_ds(buf, &dsarg))
5032             return -TARGET_EFAULT;
5033         break;
5034     case IPC_INFO:
5035         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
5036         if (host_to_target_shminfo(buf, &shminfo))
5037             return -TARGET_EFAULT;
5038         break;
5039     case SHM_INFO:
5040         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
5041         if (host_to_target_shm_info(buf, &shm_info))
5042             return -TARGET_EFAULT;
5043         break;
5044     case IPC_RMID:
5045     case SHM_LOCK:
5046     case SHM_UNLOCK:
5047         ret = get_errno(shmctl(shmid, cmd, NULL));
5048         break;
5049     }
5050 
5051     return ret;
5052 }
5053 
5054 #ifndef TARGET_FORCE_SHMLBA
5055 /* For most architectures, SHMLBA is the same as the page size;
5056  * some architectures have larger values, in which case they should
5057  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
5058  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
5059  * and defining its own value for SHMLBA.
5060  *
5061  * The kernel also permits SHMLBA to be set by the architecture to a
5062  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
5063  * this means that addresses are rounded to the large size if
5064  * SHM_RND is set but addresses not aligned to that size are not rejected
5065  * as long as they are at least page-aligned. Since the only architecture
5066  * which uses this is ia64 this code doesn't provide for that oddity.
5067  */
5068 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
5069 {
5070     return TARGET_PAGE_SIZE;
5071 }
5072 #endif
5073 
5074 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
5075                                  int shmid, abi_ulong shmaddr, int shmflg)
5076 {
5077     abi_long raddr;
5078     void *host_raddr;
5079     struct shmid_ds shm_info;
5080     int i,ret;
5081     abi_ulong shmlba;
5082 
5083     /* find out the length of the shared memory segment */
5084     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
5085     if (is_error(ret)) {
5086         /* can't get length, bail out */
5087         return ret;
5088     }
5089 
5090     shmlba = target_shmlba(cpu_env);
5091 
5092     if (shmaddr & (shmlba - 1)) {
5093         if (shmflg & SHM_RND) {
5094             shmaddr &= ~(shmlba - 1);
5095         } else {
5096             return -TARGET_EINVAL;
5097         }
5098     }
5099     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
5100         return -TARGET_EINVAL;
5101     }
5102 
5103     mmap_lock();
5104 
5105     if (shmaddr)
5106         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5107     else {
5108         abi_ulong mmap_start;
5109 
5110         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5111 
5112         if (mmap_start == -1) {
5113             errno = ENOMEM;
5114             host_raddr = (void *)-1;
5115         } else
5116             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5117     }
5118 
5119     if (host_raddr == (void *)-1) {
5120         mmap_unlock();
5121         return get_errno((long)host_raddr);
5122     }
5123     raddr=h2g((unsigned long)host_raddr);
5124 
5125     page_set_flags(raddr, raddr + shm_info.shm_segsz,
5126                    PAGE_VALID | PAGE_READ |
5127                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5128 
5129     for (i = 0; i < N_SHM_REGIONS; i++) {
5130         if (!shm_regions[i].in_use) {
5131             shm_regions[i].in_use = true;
5132             shm_regions[i].start = raddr;
5133             shm_regions[i].size = shm_info.shm_segsz;
5134             break;
5135         }
5136     }
5137 
5138     mmap_unlock();
5139     return raddr;
5140 
5141 }
5142 
5143 static inline abi_long do_shmdt(abi_ulong shmaddr)
5144 {
5145     int i;
5146     abi_long rv;
5147 
5148     mmap_lock();
5149 
5150     for (i = 0; i < N_SHM_REGIONS; ++i) {
5151         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5152             shm_regions[i].in_use = false;
5153             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5154             break;
5155         }
5156     }
5157     rv = get_errno(shmdt(g2h(shmaddr)));
5158 
5159     mmap_unlock();
5160 
5161     return rv;
5162 }
5163 
5164 #ifdef TARGET_NR_ipc
5165 /* ??? This only works with linear mappings.  */
5166 /* do_ipc() must return target values and target errnos. */
5167 static abi_long do_ipc(CPUArchState *cpu_env,
5168                        unsigned int call, abi_long first,
5169                        abi_long second, abi_long third,
5170                        abi_long ptr, abi_long fifth)
5171 {
5172     int version;
5173     abi_long ret = 0;
5174 
5175     version = call >> 16;
5176     call &= 0xffff;
5177 
5178     switch (call) {
5179     case IPCOP_semop:
5180         ret = do_semop(first, ptr, second);
5181         break;
5182 
5183     case IPCOP_semget:
5184         ret = get_errno(semget(first, second, third));
5185         break;
5186 
5187     case IPCOP_semctl: {
5188         /* The semun argument to semctl is passed by value, so dereference the
5189          * ptr argument. */
5190         abi_ulong atptr;
5191         get_user_ual(atptr, ptr);
5192         ret = do_semctl(first, second, third, atptr);
5193         break;
5194     }
5195 
5196     case IPCOP_msgget:
5197         ret = get_errno(msgget(first, second));
5198         break;
5199 
5200     case IPCOP_msgsnd:
5201         ret = do_msgsnd(first, ptr, second, third);
5202         break;
5203 
5204     case IPCOP_msgctl:
5205         ret = do_msgctl(first, second, ptr);
5206         break;
5207 
5208     case IPCOP_msgrcv:
5209         switch (version) {
5210         case 0:
5211             {
5212                 struct target_ipc_kludge {
5213                     abi_long msgp;
5214                     abi_long msgtyp;
5215                 } *tmp;
5216 
5217                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5218                     ret = -TARGET_EFAULT;
5219                     break;
5220                 }
5221 
5222                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5223 
5224                 unlock_user_struct(tmp, ptr, 0);
5225                 break;
5226             }
5227         default:
5228             ret = do_msgrcv(first, ptr, second, fifth, third);
5229         }
5230         break;
5231 
5232     case IPCOP_shmat:
5233         switch (version) {
5234         default:
5235         {
5236             abi_ulong raddr;
5237             raddr = do_shmat(cpu_env, first, ptr, second);
5238             if (is_error(raddr))
5239                 return get_errno(raddr);
5240             if (put_user_ual(raddr, third))
5241                 return -TARGET_EFAULT;
5242             break;
5243         }
5244         case 1:
5245             ret = -TARGET_EINVAL;
5246             break;
5247         }
5248 	break;
5249     case IPCOP_shmdt:
5250         ret = do_shmdt(ptr);
5251 	break;
5252 
5253     case IPCOP_shmget:
5254 	/* IPC_* flag values are the same on all linux platforms */
5255 	ret = get_errno(shmget(first, second, third));
5256 	break;
5257 
5258 	/* IPC_* and SHM_* command values are the same on all linux platforms */
5259     case IPCOP_shmctl:
5260         ret = do_shmctl(first, second, ptr);
5261         break;
5262     default:
5263 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5264 	ret = -TARGET_ENOSYS;
5265 	break;
5266     }
5267     return ret;
5268 }
5269 #endif
5270 
5271 /* kernel structure types definitions */
5272 
5273 #define STRUCT(name, ...) STRUCT_ ## name,
5274 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5275 enum {
5276 #include "syscall_types.h"
5277 STRUCT_MAX
5278 };
5279 #undef STRUCT
5280 #undef STRUCT_SPECIAL
5281 
5282 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
5283 #define STRUCT_SPECIAL(name)
5284 #include "syscall_types.h"
5285 #undef STRUCT
5286 #undef STRUCT_SPECIAL
5287 
5288 typedef struct IOCTLEntry IOCTLEntry;
5289 
5290 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5291                              int fd, int cmd, abi_long arg);
5292 
5293 struct IOCTLEntry {
5294     int target_cmd;
5295     unsigned int host_cmd;
5296     const char *name;
5297     int access;
5298     do_ioctl_fn *do_ioctl;
5299     const argtype arg_type[5];
5300 };
5301 
5302 #define IOC_R 0x0001
5303 #define IOC_W 0x0002
5304 #define IOC_RW (IOC_R | IOC_W)
5305 
5306 #define MAX_STRUCT_SIZE 4096
5307 
5308 #ifdef CONFIG_FIEMAP
5309 /* So fiemap access checks don't overflow on 32 bit systems.
5310  * This is very slightly smaller than the limit imposed by
5311  * the underlying kernel.
5312  */
5313 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
5314                             / sizeof(struct fiemap_extent))
5315 
5316 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5317                                        int fd, int cmd, abi_long arg)
5318 {
5319     /* The parameter for this ioctl is a struct fiemap followed
5320      * by an array of struct fiemap_extent whose size is set
5321      * in fiemap->fm_extent_count. The array is filled in by the
5322      * ioctl.
5323      */
5324     int target_size_in, target_size_out;
5325     struct fiemap *fm;
5326     const argtype *arg_type = ie->arg_type;
5327     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5328     void *argptr, *p;
5329     abi_long ret;
5330     int i, extent_size = thunk_type_size(extent_arg_type, 0);
5331     uint32_t outbufsz;
5332     int free_fm = 0;
5333 
5334     assert(arg_type[0] == TYPE_PTR);
5335     assert(ie->access == IOC_RW);
5336     arg_type++;
5337     target_size_in = thunk_type_size(arg_type, 0);
5338     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5339     if (!argptr) {
5340         return -TARGET_EFAULT;
5341     }
5342     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5343     unlock_user(argptr, arg, 0);
5344     fm = (struct fiemap *)buf_temp;
5345     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5346         return -TARGET_EINVAL;
5347     }
5348 
5349     outbufsz = sizeof (*fm) +
5350         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5351 
5352     if (outbufsz > MAX_STRUCT_SIZE) {
5353         /* We can't fit all the extents into the fixed size buffer.
5354          * Allocate one that is large enough and use it instead.
5355          */
5356         fm = g_try_malloc(outbufsz);
5357         if (!fm) {
5358             return -TARGET_ENOMEM;
5359         }
5360         memcpy(fm, buf_temp, sizeof(struct fiemap));
5361         free_fm = 1;
5362     }
5363     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5364     if (!is_error(ret)) {
5365         target_size_out = target_size_in;
5366         /* An extent_count of 0 means we were only counting the extents
5367          * so there are no structs to copy
5368          */
5369         if (fm->fm_extent_count != 0) {
5370             target_size_out += fm->fm_mapped_extents * extent_size;
5371         }
5372         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5373         if (!argptr) {
5374             ret = -TARGET_EFAULT;
5375         } else {
5376             /* Convert the struct fiemap */
5377             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5378             if (fm->fm_extent_count != 0) {
5379                 p = argptr + target_size_in;
5380                 /* ...and then all the struct fiemap_extents */
5381                 for (i = 0; i < fm->fm_mapped_extents; i++) {
5382                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5383                                   THUNK_TARGET);
5384                     p += extent_size;
5385                 }
5386             }
5387             unlock_user(argptr, arg, target_size_out);
5388         }
5389     }
5390     if (free_fm) {
5391         g_free(fm);
5392     }
5393     return ret;
5394 }
5395 #endif
5396 
5397 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5398                                 int fd, int cmd, abi_long arg)
5399 {
5400     const argtype *arg_type = ie->arg_type;
5401     int target_size;
5402     void *argptr;
5403     int ret;
5404     struct ifconf *host_ifconf;
5405     uint32_t outbufsz;
5406     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5407     int target_ifreq_size;
5408     int nb_ifreq;
5409     int free_buf = 0;
5410     int i;
5411     int target_ifc_len;
5412     abi_long target_ifc_buf;
5413     int host_ifc_len;
5414     char *host_ifc_buf;
5415 
5416     assert(arg_type[0] == TYPE_PTR);
5417     assert(ie->access == IOC_RW);
5418 
5419     arg_type++;
5420     target_size = thunk_type_size(arg_type, 0);
5421 
5422     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423     if (!argptr)
5424         return -TARGET_EFAULT;
5425     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5426     unlock_user(argptr, arg, 0);
5427 
5428     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5429     target_ifc_len = host_ifconf->ifc_len;
5430     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5431 
5432     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5433     nb_ifreq = target_ifc_len / target_ifreq_size;
5434     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5435 
5436     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5437     if (outbufsz > MAX_STRUCT_SIZE) {
5438         /* We can't fit all the extents into the fixed size buffer.
5439          * Allocate one that is large enough and use it instead.
5440          */
5441         host_ifconf = malloc(outbufsz);
5442         if (!host_ifconf) {
5443             return -TARGET_ENOMEM;
5444         }
5445         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5446         free_buf = 1;
5447     }
5448     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5449 
5450     host_ifconf->ifc_len = host_ifc_len;
5451     host_ifconf->ifc_buf = host_ifc_buf;
5452 
5453     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5454     if (!is_error(ret)) {
5455 	/* convert host ifc_len to target ifc_len */
5456 
5457         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5458         target_ifc_len = nb_ifreq * target_ifreq_size;
5459         host_ifconf->ifc_len = target_ifc_len;
5460 
5461 	/* restore target ifc_buf */
5462 
5463         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5464 
5465 	/* copy struct ifconf to target user */
5466 
5467         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5468         if (!argptr)
5469             return -TARGET_EFAULT;
5470         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5471         unlock_user(argptr, arg, target_size);
5472 
5473 	/* copy ifreq[] to target user */
5474 
5475         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5476         for (i = 0; i < nb_ifreq ; i++) {
5477             thunk_convert(argptr + i * target_ifreq_size,
5478                           host_ifc_buf + i * sizeof(struct ifreq),
5479                           ifreq_arg_type, THUNK_TARGET);
5480         }
5481         unlock_user(argptr, target_ifc_buf, target_ifc_len);
5482     }
5483 
5484     if (free_buf) {
5485         free(host_ifconf);
5486     }
5487 
5488     return ret;
5489 }
5490 
5491 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5492                             int cmd, abi_long arg)
5493 {
5494     void *argptr;
5495     struct dm_ioctl *host_dm;
5496     abi_long guest_data;
5497     uint32_t guest_data_size;
5498     int target_size;
5499     const argtype *arg_type = ie->arg_type;
5500     abi_long ret;
5501     void *big_buf = NULL;
5502     char *host_data;
5503 
5504     arg_type++;
5505     target_size = thunk_type_size(arg_type, 0);
5506     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5507     if (!argptr) {
5508         ret = -TARGET_EFAULT;
5509         goto out;
5510     }
5511     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5512     unlock_user(argptr, arg, 0);
5513 
5514     /* buf_temp is too small, so fetch things into a bigger buffer */
5515     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5516     memcpy(big_buf, buf_temp, target_size);
5517     buf_temp = big_buf;
5518     host_dm = big_buf;
5519 
5520     guest_data = arg + host_dm->data_start;
5521     if ((guest_data - arg) < 0) {
5522         ret = -TARGET_EINVAL;
5523         goto out;
5524     }
5525     guest_data_size = host_dm->data_size - host_dm->data_start;
5526     host_data = (char*)host_dm + host_dm->data_start;
5527 
5528     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5529     if (!argptr) {
5530         ret = -TARGET_EFAULT;
5531         goto out;
5532     }
5533 
5534     switch (ie->host_cmd) {
5535     case DM_REMOVE_ALL:
5536     case DM_LIST_DEVICES:
5537     case DM_DEV_CREATE:
5538     case DM_DEV_REMOVE:
5539     case DM_DEV_SUSPEND:
5540     case DM_DEV_STATUS:
5541     case DM_DEV_WAIT:
5542     case DM_TABLE_STATUS:
5543     case DM_TABLE_CLEAR:
5544     case DM_TABLE_DEPS:
5545     case DM_LIST_VERSIONS:
5546         /* no input data */
5547         break;
5548     case DM_DEV_RENAME:
5549     case DM_DEV_SET_GEOMETRY:
5550         /* data contains only strings */
5551         memcpy(host_data, argptr, guest_data_size);
5552         break;
5553     case DM_TARGET_MSG:
5554         memcpy(host_data, argptr, guest_data_size);
5555         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5556         break;
5557     case DM_TABLE_LOAD:
5558     {
5559         void *gspec = argptr;
5560         void *cur_data = host_data;
5561         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5562         int spec_size = thunk_type_size(arg_type, 0);
5563         int i;
5564 
5565         for (i = 0; i < host_dm->target_count; i++) {
5566             struct dm_target_spec *spec = cur_data;
5567             uint32_t next;
5568             int slen;
5569 
5570             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5571             slen = strlen((char*)gspec + spec_size) + 1;
5572             next = spec->next;
5573             spec->next = sizeof(*spec) + slen;
5574             strcpy((char*)&spec[1], gspec + spec_size);
5575             gspec += next;
5576             cur_data += spec->next;
5577         }
5578         break;
5579     }
5580     default:
5581         ret = -TARGET_EINVAL;
5582         unlock_user(argptr, guest_data, 0);
5583         goto out;
5584     }
5585     unlock_user(argptr, guest_data, 0);
5586 
5587     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5588     if (!is_error(ret)) {
5589         guest_data = arg + host_dm->data_start;
5590         guest_data_size = host_dm->data_size - host_dm->data_start;
5591         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5592         switch (ie->host_cmd) {
5593         case DM_REMOVE_ALL:
5594         case DM_DEV_CREATE:
5595         case DM_DEV_REMOVE:
5596         case DM_DEV_RENAME:
5597         case DM_DEV_SUSPEND:
5598         case DM_DEV_STATUS:
5599         case DM_TABLE_LOAD:
5600         case DM_TABLE_CLEAR:
5601         case DM_TARGET_MSG:
5602         case DM_DEV_SET_GEOMETRY:
5603             /* no return data */
5604             break;
5605         case DM_LIST_DEVICES:
5606         {
5607             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5608             uint32_t remaining_data = guest_data_size;
5609             void *cur_data = argptr;
5610             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5611             int nl_size = 12; /* can't use thunk_size due to alignment */
5612 
5613             while (1) {
5614                 uint32_t next = nl->next;
5615                 if (next) {
5616                     nl->next = nl_size + (strlen(nl->name) + 1);
5617                 }
5618                 if (remaining_data < nl->next) {
5619                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5620                     break;
5621                 }
5622                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5623                 strcpy(cur_data + nl_size, nl->name);
5624                 cur_data += nl->next;
5625                 remaining_data -= nl->next;
5626                 if (!next) {
5627                     break;
5628                 }
5629                 nl = (void*)nl + next;
5630             }
5631             break;
5632         }
5633         case DM_DEV_WAIT:
5634         case DM_TABLE_STATUS:
5635         {
5636             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5637             void *cur_data = argptr;
5638             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5639             int spec_size = thunk_type_size(arg_type, 0);
5640             int i;
5641 
5642             for (i = 0; i < host_dm->target_count; i++) {
5643                 uint32_t next = spec->next;
5644                 int slen = strlen((char*)&spec[1]) + 1;
5645                 spec->next = (cur_data - argptr) + spec_size + slen;
5646                 if (guest_data_size < spec->next) {
5647                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5648                     break;
5649                 }
5650                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5651                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5652                 cur_data = argptr + spec->next;
5653                 spec = (void*)host_dm + host_dm->data_start + next;
5654             }
5655             break;
5656         }
5657         case DM_TABLE_DEPS:
5658         {
5659             void *hdata = (void*)host_dm + host_dm->data_start;
5660             int count = *(uint32_t*)hdata;
5661             uint64_t *hdev = hdata + 8;
5662             uint64_t *gdev = argptr + 8;
5663             int i;
5664 
5665             *(uint32_t*)argptr = tswap32(count);
5666             for (i = 0; i < count; i++) {
5667                 *gdev = tswap64(*hdev);
5668                 gdev++;
5669                 hdev++;
5670             }
5671             break;
5672         }
5673         case DM_LIST_VERSIONS:
5674         {
5675             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5676             uint32_t remaining_data = guest_data_size;
5677             void *cur_data = argptr;
5678             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5679             int vers_size = thunk_type_size(arg_type, 0);
5680 
5681             while (1) {
5682                 uint32_t next = vers->next;
5683                 if (next) {
5684                     vers->next = vers_size + (strlen(vers->name) + 1);
5685                 }
5686                 if (remaining_data < vers->next) {
5687                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5688                     break;
5689                 }
5690                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5691                 strcpy(cur_data + vers_size, vers->name);
5692                 cur_data += vers->next;
5693                 remaining_data -= vers->next;
5694                 if (!next) {
5695                     break;
5696                 }
5697                 vers = (void*)vers + next;
5698             }
5699             break;
5700         }
5701         default:
5702             unlock_user(argptr, guest_data, 0);
5703             ret = -TARGET_EINVAL;
5704             goto out;
5705         }
5706         unlock_user(argptr, guest_data, guest_data_size);
5707 
5708         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5709         if (!argptr) {
5710             ret = -TARGET_EFAULT;
5711             goto out;
5712         }
5713         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5714         unlock_user(argptr, arg, target_size);
5715     }
5716 out:
5717     g_free(big_buf);
5718     return ret;
5719 }
5720 
5721 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5722                                int cmd, abi_long arg)
5723 {
5724     void *argptr;
5725     int target_size;
5726     const argtype *arg_type = ie->arg_type;
5727     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5728     abi_long ret;
5729 
5730     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5731     struct blkpg_partition host_part;
5732 
5733     /* Read and convert blkpg */
5734     arg_type++;
5735     target_size = thunk_type_size(arg_type, 0);
5736     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5737     if (!argptr) {
5738         ret = -TARGET_EFAULT;
5739         goto out;
5740     }
5741     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5742     unlock_user(argptr, arg, 0);
5743 
5744     switch (host_blkpg->op) {
5745     case BLKPG_ADD_PARTITION:
5746     case BLKPG_DEL_PARTITION:
5747         /* payload is struct blkpg_partition */
5748         break;
5749     default:
5750         /* Unknown opcode */
5751         ret = -TARGET_EINVAL;
5752         goto out;
5753     }
5754 
5755     /* Read and convert blkpg->data */
5756     arg = (abi_long)(uintptr_t)host_blkpg->data;
5757     target_size = thunk_type_size(part_arg_type, 0);
5758     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5759     if (!argptr) {
5760         ret = -TARGET_EFAULT;
5761         goto out;
5762     }
5763     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5764     unlock_user(argptr, arg, 0);
5765 
5766     /* Swizzle the data pointer to our local copy and call! */
5767     host_blkpg->data = &host_part;
5768     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5769 
5770 out:
5771     return ret;
5772 }
5773 
5774 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5775                                 int fd, int cmd, abi_long arg)
5776 {
5777     const argtype *arg_type = ie->arg_type;
5778     const StructEntry *se;
5779     const argtype *field_types;
5780     const int *dst_offsets, *src_offsets;
5781     int target_size;
5782     void *argptr;
5783     abi_ulong *target_rt_dev_ptr;
5784     unsigned long *host_rt_dev_ptr;
5785     abi_long ret;
5786     int i;
5787 
5788     assert(ie->access == IOC_W);
5789     assert(*arg_type == TYPE_PTR);
5790     arg_type++;
5791     assert(*arg_type == TYPE_STRUCT);
5792     target_size = thunk_type_size(arg_type, 0);
5793     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5794     if (!argptr) {
5795         return -TARGET_EFAULT;
5796     }
5797     arg_type++;
5798     assert(*arg_type == (int)STRUCT_rtentry);
5799     se = struct_entries + *arg_type++;
5800     assert(se->convert[0] == NULL);
5801     /* convert struct here to be able to catch rt_dev string */
5802     field_types = se->field_types;
5803     dst_offsets = se->field_offsets[THUNK_HOST];
5804     src_offsets = se->field_offsets[THUNK_TARGET];
5805     for (i = 0; i < se->nb_fields; i++) {
5806         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5807             assert(*field_types == TYPE_PTRVOID);
5808             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5809             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5810             if (*target_rt_dev_ptr != 0) {
5811                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5812                                                   tswapal(*target_rt_dev_ptr));
5813                 if (!*host_rt_dev_ptr) {
5814                     unlock_user(argptr, arg, 0);
5815                     return -TARGET_EFAULT;
5816                 }
5817             } else {
5818                 *host_rt_dev_ptr = 0;
5819             }
5820             field_types++;
5821             continue;
5822         }
5823         field_types = thunk_convert(buf_temp + dst_offsets[i],
5824                                     argptr + src_offsets[i],
5825                                     field_types, THUNK_HOST);
5826     }
5827     unlock_user(argptr, arg, 0);
5828 
5829     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5830     if (*host_rt_dev_ptr != 0) {
5831         unlock_user((void *)*host_rt_dev_ptr,
5832                     *target_rt_dev_ptr, 0);
5833     }
5834     return ret;
5835 }
5836 
5837 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5838                                      int fd, int cmd, abi_long arg)
5839 {
5840     int sig = target_to_host_signal(arg);
5841     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5842 }
5843 
5844 #ifdef TIOCGPTPEER
5845 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5846                                      int fd, int cmd, abi_long arg)
5847 {
5848     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5849     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5850 }
5851 #endif
5852 
5853 static IOCTLEntry ioctl_entries[] = {
5854 #define IOCTL(cmd, access, ...) \
5855     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5856 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5857     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5858 #define IOCTL_IGNORE(cmd) \
5859     { TARGET_ ## cmd, 0, #cmd },
5860 #include "ioctls.h"
5861     { 0, 0, },
5862 };
5863 
5864 /* ??? Implement proper locking for ioctls.  */
5865 /* do_ioctl() Must return target values and target errnos. */
5866 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5867 {
5868     const IOCTLEntry *ie;
5869     const argtype *arg_type;
5870     abi_long ret;
5871     uint8_t buf_temp[MAX_STRUCT_SIZE];
5872     int target_size;
5873     void *argptr;
5874 
5875     ie = ioctl_entries;
5876     for(;;) {
5877         if (ie->target_cmd == 0) {
5878             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5879             return -TARGET_ENOSYS;
5880         }
5881         if (ie->target_cmd == cmd)
5882             break;
5883         ie++;
5884     }
5885     arg_type = ie->arg_type;
5886     if (ie->do_ioctl) {
5887         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5888     } else if (!ie->host_cmd) {
5889         /* Some architectures define BSD ioctls in their headers
5890            that are not implemented in Linux.  */
5891         return -TARGET_ENOSYS;
5892     }
5893 
5894     switch(arg_type[0]) {
5895     case TYPE_NULL:
5896         /* no argument */
5897         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5898         break;
5899     case TYPE_PTRVOID:
5900     case TYPE_INT:
5901         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5902         break;
5903     case TYPE_PTR:
5904         arg_type++;
5905         target_size = thunk_type_size(arg_type, 0);
5906         switch(ie->access) {
5907         case IOC_R:
5908             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5909             if (!is_error(ret)) {
5910                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5911                 if (!argptr)
5912                     return -TARGET_EFAULT;
5913                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5914                 unlock_user(argptr, arg, target_size);
5915             }
5916             break;
5917         case IOC_W:
5918             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5919             if (!argptr)
5920                 return -TARGET_EFAULT;
5921             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5922             unlock_user(argptr, arg, 0);
5923             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5924             break;
5925         default:
5926         case IOC_RW:
5927             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5928             if (!argptr)
5929                 return -TARGET_EFAULT;
5930             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5931             unlock_user(argptr, arg, 0);
5932             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5933             if (!is_error(ret)) {
5934                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5935                 if (!argptr)
5936                     return -TARGET_EFAULT;
5937                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5938                 unlock_user(argptr, arg, target_size);
5939             }
5940             break;
5941         }
5942         break;
5943     default:
5944         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5945                  (long)cmd, arg_type[0]);
5946         ret = -TARGET_ENOSYS;
5947         break;
5948     }
5949     return ret;
5950 }
5951 
5952 static const bitmask_transtbl iflag_tbl[] = {
5953         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5954         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5955         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5956         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5957         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5958         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5959         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5960         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5961         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5962         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5963         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5964         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5965         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5966         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5967         { 0, 0, 0, 0 }
5968 };
5969 
5970 static const bitmask_transtbl oflag_tbl[] = {
5971 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5972 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5973 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5974 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5975 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5976 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5977 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5978 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5979 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5980 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5981 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5982 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5983 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5984 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5985 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5986 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5987 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5988 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5989 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5990 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5991 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5992 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5993 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5994 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5995 	{ 0, 0, 0, 0 }
5996 };
5997 
5998 static const bitmask_transtbl cflag_tbl[] = {
5999 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
6000 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
6001 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
6002 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
6003 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
6004 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
6005 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
6006 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
6007 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
6008 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
6009 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
6010 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
6011 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
6012 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
6013 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
6014 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
6015 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
6016 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
6017 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
6018 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
6019 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
6020 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
6021 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
6022 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
6023 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
6024 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
6025 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
6026 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
6027 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
6028 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
6029 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
6030 	{ 0, 0, 0, 0 }
6031 };
6032 
6033 static const bitmask_transtbl lflag_tbl[] = {
6034 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
6035 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
6036 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
6037 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
6038 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
6039 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
6040 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
6041 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
6042 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
6043 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
6044 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
6045 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
6046 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
6047 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
6048 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
6049 	{ 0, 0, 0, 0 }
6050 };
6051 
6052 static void target_to_host_termios (void *dst, const void *src)
6053 {
6054     struct host_termios *host = dst;
6055     const struct target_termios *target = src;
6056 
6057     host->c_iflag =
6058         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
6059     host->c_oflag =
6060         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
6061     host->c_cflag =
6062         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
6063     host->c_lflag =
6064         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
6065     host->c_line = target->c_line;
6066 
6067     memset(host->c_cc, 0, sizeof(host->c_cc));
6068     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
6069     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
6070     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
6071     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
6072     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
6073     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
6074     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
6075     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
6076     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
6077     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
6078     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
6079     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
6080     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
6081     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
6082     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
6083     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
6084     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
6085 }
6086 
6087 static void host_to_target_termios (void *dst, const void *src)
6088 {
6089     struct target_termios *target = dst;
6090     const struct host_termios *host = src;
6091 
6092     target->c_iflag =
6093         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
6094     target->c_oflag =
6095         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
6096     target->c_cflag =
6097         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
6098     target->c_lflag =
6099         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
6100     target->c_line = host->c_line;
6101 
6102     memset(target->c_cc, 0, sizeof(target->c_cc));
6103     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6104     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6105     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6106     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6107     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6108     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6109     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6110     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6111     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6112     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6113     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6114     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6115     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6116     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6117     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6118     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6119     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6120 }
6121 
6122 static const StructEntry struct_termios_def = {
6123     .convert = { host_to_target_termios, target_to_host_termios },
6124     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6125     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6126 };
6127 
6128 static bitmask_transtbl mmap_flags_tbl[] = {
6129     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6130     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6131     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6132     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6133       MAP_ANONYMOUS, MAP_ANONYMOUS },
6134     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6135       MAP_GROWSDOWN, MAP_GROWSDOWN },
6136     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6137       MAP_DENYWRITE, MAP_DENYWRITE },
6138     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6139       MAP_EXECUTABLE, MAP_EXECUTABLE },
6140     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6141     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6142       MAP_NORESERVE, MAP_NORESERVE },
6143     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6144     /* MAP_STACK had been ignored by the kernel for quite some time.
6145        Recognize it for the target insofar as we do not want to pass
6146        it through to the host.  */
6147     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6148     { 0, 0, 0, 0 }
6149 };
6150 
6151 #if defined(TARGET_I386)
6152 
6153 /* NOTE: there is really one LDT for all the threads */
6154 static uint8_t *ldt_table;
6155 
6156 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6157 {
6158     int size;
6159     void *p;
6160 
6161     if (!ldt_table)
6162         return 0;
6163     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6164     if (size > bytecount)
6165         size = bytecount;
6166     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6167     if (!p)
6168         return -TARGET_EFAULT;
6169     /* ??? Should this by byteswapped?  */
6170     memcpy(p, ldt_table, size);
6171     unlock_user(p, ptr, size);
6172     return size;
6173 }
6174 
6175 /* XXX: add locking support */
6176 static abi_long write_ldt(CPUX86State *env,
6177                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6178 {
6179     struct target_modify_ldt_ldt_s ldt_info;
6180     struct target_modify_ldt_ldt_s *target_ldt_info;
6181     int seg_32bit, contents, read_exec_only, limit_in_pages;
6182     int seg_not_present, useable, lm;
6183     uint32_t *lp, entry_1, entry_2;
6184 
6185     if (bytecount != sizeof(ldt_info))
6186         return -TARGET_EINVAL;
6187     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6188         return -TARGET_EFAULT;
6189     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6190     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6191     ldt_info.limit = tswap32(target_ldt_info->limit);
6192     ldt_info.flags = tswap32(target_ldt_info->flags);
6193     unlock_user_struct(target_ldt_info, ptr, 0);
6194 
6195     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6196         return -TARGET_EINVAL;
6197     seg_32bit = ldt_info.flags & 1;
6198     contents = (ldt_info.flags >> 1) & 3;
6199     read_exec_only = (ldt_info.flags >> 3) & 1;
6200     limit_in_pages = (ldt_info.flags >> 4) & 1;
6201     seg_not_present = (ldt_info.flags >> 5) & 1;
6202     useable = (ldt_info.flags >> 6) & 1;
6203 #ifdef TARGET_ABI32
6204     lm = 0;
6205 #else
6206     lm = (ldt_info.flags >> 7) & 1;
6207 #endif
6208     if (contents == 3) {
6209         if (oldmode)
6210             return -TARGET_EINVAL;
6211         if (seg_not_present == 0)
6212             return -TARGET_EINVAL;
6213     }
6214     /* allocate the LDT */
6215     if (!ldt_table) {
6216         env->ldt.base = target_mmap(0,
6217                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6218                                     PROT_READ|PROT_WRITE,
6219                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6220         if (env->ldt.base == -1)
6221             return -TARGET_ENOMEM;
6222         memset(g2h(env->ldt.base), 0,
6223                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6224         env->ldt.limit = 0xffff;
6225         ldt_table = g2h(env->ldt.base);
6226     }
6227 
6228     /* NOTE: same code as Linux kernel */
6229     /* Allow LDTs to be cleared by the user. */
6230     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6231         if (oldmode ||
6232             (contents == 0		&&
6233              read_exec_only == 1	&&
6234              seg_32bit == 0		&&
6235              limit_in_pages == 0	&&
6236              seg_not_present == 1	&&
6237              useable == 0 )) {
6238             entry_1 = 0;
6239             entry_2 = 0;
6240             goto install;
6241         }
6242     }
6243 
6244     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6245         (ldt_info.limit & 0x0ffff);
6246     entry_2 = (ldt_info.base_addr & 0xff000000) |
6247         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6248         (ldt_info.limit & 0xf0000) |
6249         ((read_exec_only ^ 1) << 9) |
6250         (contents << 10) |
6251         ((seg_not_present ^ 1) << 15) |
6252         (seg_32bit << 22) |
6253         (limit_in_pages << 23) |
6254         (lm << 21) |
6255         0x7000;
6256     if (!oldmode)
6257         entry_2 |= (useable << 20);
6258 
6259     /* Install the new entry ...  */
6260 install:
6261     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6262     lp[0] = tswap32(entry_1);
6263     lp[1] = tswap32(entry_2);
6264     return 0;
6265 }
6266 
6267 /* specific and weird i386 syscalls */
6268 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6269                               unsigned long bytecount)
6270 {
6271     abi_long ret;
6272 
6273     switch (func) {
6274     case 0:
6275         ret = read_ldt(ptr, bytecount);
6276         break;
6277     case 1:
6278         ret = write_ldt(env, ptr, bytecount, 1);
6279         break;
6280     case 0x11:
6281         ret = write_ldt(env, ptr, bytecount, 0);
6282         break;
6283     default:
6284         ret = -TARGET_ENOSYS;
6285         break;
6286     }
6287     return ret;
6288 }
6289 
6290 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6291 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6292 {
6293     uint64_t *gdt_table = g2h(env->gdt.base);
6294     struct target_modify_ldt_ldt_s ldt_info;
6295     struct target_modify_ldt_ldt_s *target_ldt_info;
6296     int seg_32bit, contents, read_exec_only, limit_in_pages;
6297     int seg_not_present, useable, lm;
6298     uint32_t *lp, entry_1, entry_2;
6299     int i;
6300 
6301     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6302     if (!target_ldt_info)
6303         return -TARGET_EFAULT;
6304     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6305     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6306     ldt_info.limit = tswap32(target_ldt_info->limit);
6307     ldt_info.flags = tswap32(target_ldt_info->flags);
6308     if (ldt_info.entry_number == -1) {
6309         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6310             if (gdt_table[i] == 0) {
6311                 ldt_info.entry_number = i;
6312                 target_ldt_info->entry_number = tswap32(i);
6313                 break;
6314             }
6315         }
6316     }
6317     unlock_user_struct(target_ldt_info, ptr, 1);
6318 
6319     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6320         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6321            return -TARGET_EINVAL;
6322     seg_32bit = ldt_info.flags & 1;
6323     contents = (ldt_info.flags >> 1) & 3;
6324     read_exec_only = (ldt_info.flags >> 3) & 1;
6325     limit_in_pages = (ldt_info.flags >> 4) & 1;
6326     seg_not_present = (ldt_info.flags >> 5) & 1;
6327     useable = (ldt_info.flags >> 6) & 1;
6328 #ifdef TARGET_ABI32
6329     lm = 0;
6330 #else
6331     lm = (ldt_info.flags >> 7) & 1;
6332 #endif
6333 
6334     if (contents == 3) {
6335         if (seg_not_present == 0)
6336             return -TARGET_EINVAL;
6337     }
6338 
6339     /* NOTE: same code as Linux kernel */
6340     /* Allow LDTs to be cleared by the user. */
6341     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6342         if ((contents == 0             &&
6343              read_exec_only == 1       &&
6344              seg_32bit == 0            &&
6345              limit_in_pages == 0       &&
6346              seg_not_present == 1      &&
6347              useable == 0 )) {
6348             entry_1 = 0;
6349             entry_2 = 0;
6350             goto install;
6351         }
6352     }
6353 
6354     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6355         (ldt_info.limit & 0x0ffff);
6356     entry_2 = (ldt_info.base_addr & 0xff000000) |
6357         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6358         (ldt_info.limit & 0xf0000) |
6359         ((read_exec_only ^ 1) << 9) |
6360         (contents << 10) |
6361         ((seg_not_present ^ 1) << 15) |
6362         (seg_32bit << 22) |
6363         (limit_in_pages << 23) |
6364         (useable << 20) |
6365         (lm << 21) |
6366         0x7000;
6367 
6368     /* Install the new entry ...  */
6369 install:
6370     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6371     lp[0] = tswap32(entry_1);
6372     lp[1] = tswap32(entry_2);
6373     return 0;
6374 }
6375 
6376 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6377 {
6378     struct target_modify_ldt_ldt_s *target_ldt_info;
6379     uint64_t *gdt_table = g2h(env->gdt.base);
6380     uint32_t base_addr, limit, flags;
6381     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6382     int seg_not_present, useable, lm;
6383     uint32_t *lp, entry_1, entry_2;
6384 
6385     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6386     if (!target_ldt_info)
6387         return -TARGET_EFAULT;
6388     idx = tswap32(target_ldt_info->entry_number);
6389     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6390         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6391         unlock_user_struct(target_ldt_info, ptr, 1);
6392         return -TARGET_EINVAL;
6393     }
6394     lp = (uint32_t *)(gdt_table + idx);
6395     entry_1 = tswap32(lp[0]);
6396     entry_2 = tswap32(lp[1]);
6397 
6398     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6399     contents = (entry_2 >> 10) & 3;
6400     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6401     seg_32bit = (entry_2 >> 22) & 1;
6402     limit_in_pages = (entry_2 >> 23) & 1;
6403     useable = (entry_2 >> 20) & 1;
6404 #ifdef TARGET_ABI32
6405     lm = 0;
6406 #else
6407     lm = (entry_2 >> 21) & 1;
6408 #endif
6409     flags = (seg_32bit << 0) | (contents << 1) |
6410         (read_exec_only << 3) | (limit_in_pages << 4) |
6411         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6412     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6413     base_addr = (entry_1 >> 16) |
6414         (entry_2 & 0xff000000) |
6415         ((entry_2 & 0xff) << 16);
6416     target_ldt_info->base_addr = tswapal(base_addr);
6417     target_ldt_info->limit = tswap32(limit);
6418     target_ldt_info->flags = tswap32(flags);
6419     unlock_user_struct(target_ldt_info, ptr, 1);
6420     return 0;
6421 }
6422 #endif /* TARGET_I386 && TARGET_ABI32 */
6423 
6424 #ifndef TARGET_ABI32
6425 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6426 {
6427     abi_long ret = 0;
6428     abi_ulong val;
6429     int idx;
6430 
6431     switch(code) {
6432     case TARGET_ARCH_SET_GS:
6433     case TARGET_ARCH_SET_FS:
6434         if (code == TARGET_ARCH_SET_GS)
6435             idx = R_GS;
6436         else
6437             idx = R_FS;
6438         cpu_x86_load_seg(env, idx, 0);
6439         env->segs[idx].base = addr;
6440         break;
6441     case TARGET_ARCH_GET_GS:
6442     case TARGET_ARCH_GET_FS:
6443         if (code == TARGET_ARCH_GET_GS)
6444             idx = R_GS;
6445         else
6446             idx = R_FS;
6447         val = env->segs[idx].base;
6448         if (put_user(val, addr, abi_ulong))
6449             ret = -TARGET_EFAULT;
6450         break;
6451     default:
6452         ret = -TARGET_EINVAL;
6453         break;
6454     }
6455     return ret;
6456 }
6457 #endif
6458 
6459 #endif /* defined(TARGET_I386) */
6460 
6461 #define NEW_STACK_SIZE 0x40000
6462 
6463 
6464 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6465 typedef struct {
6466     CPUArchState *env;
6467     pthread_mutex_t mutex;
6468     pthread_cond_t cond;
6469     pthread_t thread;
6470     uint32_t tid;
6471     abi_ulong child_tidptr;
6472     abi_ulong parent_tidptr;
6473     sigset_t sigmask;
6474 } new_thread_info;
6475 
6476 static void *clone_func(void *arg)
6477 {
6478     new_thread_info *info = arg;
6479     CPUArchState *env;
6480     CPUState *cpu;
6481     TaskState *ts;
6482 
6483     rcu_register_thread();
6484     tcg_register_thread();
6485     env = info->env;
6486     cpu = ENV_GET_CPU(env);
6487     thread_cpu = cpu;
6488     ts = (TaskState *)cpu->opaque;
6489     info->tid = gettid();
6490     task_settid(ts);
6491     if (info->child_tidptr)
6492         put_user_u32(info->tid, info->child_tidptr);
6493     if (info->parent_tidptr)
6494         put_user_u32(info->tid, info->parent_tidptr);
6495     /* Enable signals.  */
6496     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6497     /* Signal to the parent that we're ready.  */
6498     pthread_mutex_lock(&info->mutex);
6499     pthread_cond_broadcast(&info->cond);
6500     pthread_mutex_unlock(&info->mutex);
6501     /* Wait until the parent has finished initializing the tls state.  */
6502     pthread_mutex_lock(&clone_lock);
6503     pthread_mutex_unlock(&clone_lock);
6504     cpu_loop(env);
6505     /* never exits */
6506     return NULL;
6507 }
6508 
6509 /* do_fork() Must return host values and target errnos (unlike most
6510    do_*() functions). */
6511 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6512                    abi_ulong parent_tidptr, target_ulong newtls,
6513                    abi_ulong child_tidptr)
6514 {
6515     CPUState *cpu = ENV_GET_CPU(env);
6516     int ret;
6517     TaskState *ts;
6518     CPUState *new_cpu;
6519     CPUArchState *new_env;
6520     sigset_t sigmask;
6521 
6522     flags &= ~CLONE_IGNORED_FLAGS;
6523 
6524     /* Emulate vfork() with fork() */
6525     if (flags & CLONE_VFORK)
6526         flags &= ~(CLONE_VFORK | CLONE_VM);
6527 
6528     if (flags & CLONE_VM) {
6529         TaskState *parent_ts = (TaskState *)cpu->opaque;
6530         new_thread_info info;
6531         pthread_attr_t attr;
6532 
6533         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6534             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6535             return -TARGET_EINVAL;
6536         }
6537 
6538         ts = g_new0(TaskState, 1);
6539         init_task_state(ts);
6540 
6541         /* Grab a mutex so that thread setup appears atomic.  */
6542         pthread_mutex_lock(&clone_lock);
6543 
6544         /* we create a new CPU instance. */
6545         new_env = cpu_copy(env);
6546         /* Init regs that differ from the parent.  */
6547         cpu_clone_regs(new_env, newsp);
6548         new_cpu = ENV_GET_CPU(new_env);
6549         new_cpu->opaque = ts;
6550         ts->bprm = parent_ts->bprm;
6551         ts->info = parent_ts->info;
6552         ts->signal_mask = parent_ts->signal_mask;
6553 
6554         if (flags & CLONE_CHILD_CLEARTID) {
6555             ts->child_tidptr = child_tidptr;
6556         }
6557 
6558         if (flags & CLONE_SETTLS) {
6559             cpu_set_tls (new_env, newtls);
6560         }
6561 
6562         memset(&info, 0, sizeof(info));
6563         pthread_mutex_init(&info.mutex, NULL);
6564         pthread_mutex_lock(&info.mutex);
6565         pthread_cond_init(&info.cond, NULL);
6566         info.env = new_env;
6567         if (flags & CLONE_CHILD_SETTID) {
6568             info.child_tidptr = child_tidptr;
6569         }
6570         if (flags & CLONE_PARENT_SETTID) {
6571             info.parent_tidptr = parent_tidptr;
6572         }
6573 
6574         ret = pthread_attr_init(&attr);
6575         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6576         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6577         /* It is not safe to deliver signals until the child has finished
6578            initializing, so temporarily block all signals.  */
6579         sigfillset(&sigmask);
6580         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6581 
6582         /* If this is our first additional thread, we need to ensure we
6583          * generate code for parallel execution and flush old translations.
6584          */
6585         if (!parallel_cpus) {
6586             parallel_cpus = true;
6587             tb_flush(cpu);
6588         }
6589 
6590         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6591         /* TODO: Free new CPU state if thread creation failed.  */
6592 
6593         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6594         pthread_attr_destroy(&attr);
6595         if (ret == 0) {
6596             /* Wait for the child to initialize.  */
6597             pthread_cond_wait(&info.cond, &info.mutex);
6598             ret = info.tid;
6599         } else {
6600             ret = -1;
6601         }
6602         pthread_mutex_unlock(&info.mutex);
6603         pthread_cond_destroy(&info.cond);
6604         pthread_mutex_destroy(&info.mutex);
6605         pthread_mutex_unlock(&clone_lock);
6606     } else {
6607         /* if no CLONE_VM, we consider it is a fork */
6608         if (flags & CLONE_INVALID_FORK_FLAGS) {
6609             return -TARGET_EINVAL;
6610         }
6611 
6612         /* We can't support custom termination signals */
6613         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6614             return -TARGET_EINVAL;
6615         }
6616 
6617         if (block_signals()) {
6618             return -TARGET_ERESTARTSYS;
6619         }
6620 
6621         fork_start();
6622         ret = fork();
6623         if (ret == 0) {
6624             /* Child Process.  */
6625             cpu_clone_regs(env, newsp);
6626             fork_end(1);
6627             /* There is a race condition here.  The parent process could
6628                theoretically read the TID in the child process before the child
6629                tid is set.  This would require using either ptrace
6630                (not implemented) or having *_tidptr to point at a shared memory
6631                mapping.  We can't repeat the spinlock hack used above because
6632                the child process gets its own copy of the lock.  */
6633             if (flags & CLONE_CHILD_SETTID)
6634                 put_user_u32(gettid(), child_tidptr);
6635             if (flags & CLONE_PARENT_SETTID)
6636                 put_user_u32(gettid(), parent_tidptr);
6637             ts = (TaskState *)cpu->opaque;
6638             if (flags & CLONE_SETTLS)
6639                 cpu_set_tls (env, newtls);
6640             if (flags & CLONE_CHILD_CLEARTID)
6641                 ts->child_tidptr = child_tidptr;
6642         } else {
6643             fork_end(0);
6644         }
6645     }
6646     return ret;
6647 }
6648 
6649 /* warning : doesn't handle linux specific flags... */
6650 static int target_to_host_fcntl_cmd(int cmd)
6651 {
6652     int ret;
6653 
6654     switch(cmd) {
6655     case TARGET_F_DUPFD:
6656     case TARGET_F_GETFD:
6657     case TARGET_F_SETFD:
6658     case TARGET_F_GETFL:
6659     case TARGET_F_SETFL:
6660         ret = cmd;
6661         break;
6662     case TARGET_F_GETLK:
6663         ret = F_GETLK64;
6664         break;
6665     case TARGET_F_SETLK:
6666         ret = F_SETLK64;
6667         break;
6668     case TARGET_F_SETLKW:
6669         ret = F_SETLKW64;
6670         break;
6671     case TARGET_F_GETOWN:
6672         ret = F_GETOWN;
6673         break;
6674     case TARGET_F_SETOWN:
6675         ret = F_SETOWN;
6676         break;
6677     case TARGET_F_GETSIG:
6678         ret = F_GETSIG;
6679         break;
6680     case TARGET_F_SETSIG:
6681         ret = F_SETSIG;
6682         break;
6683 #if TARGET_ABI_BITS == 32
6684     case TARGET_F_GETLK64:
6685         ret = F_GETLK64;
6686         break;
6687     case TARGET_F_SETLK64:
6688         ret = F_SETLK64;
6689         break;
6690     case TARGET_F_SETLKW64:
6691         ret = F_SETLKW64;
6692         break;
6693 #endif
6694     case TARGET_F_SETLEASE:
6695         ret = F_SETLEASE;
6696         break;
6697     case TARGET_F_GETLEASE:
6698         ret = F_GETLEASE;
6699         break;
6700 #ifdef F_DUPFD_CLOEXEC
6701     case TARGET_F_DUPFD_CLOEXEC:
6702         ret = F_DUPFD_CLOEXEC;
6703         break;
6704 #endif
6705     case TARGET_F_NOTIFY:
6706         ret = F_NOTIFY;
6707         break;
6708 #ifdef F_GETOWN_EX
6709     case TARGET_F_GETOWN_EX:
6710         ret = F_GETOWN_EX;
6711         break;
6712 #endif
6713 #ifdef F_SETOWN_EX
6714     case TARGET_F_SETOWN_EX:
6715         ret = F_SETOWN_EX;
6716         break;
6717 #endif
6718 #ifdef F_SETPIPE_SZ
6719     case TARGET_F_SETPIPE_SZ:
6720         ret = F_SETPIPE_SZ;
6721         break;
6722     case TARGET_F_GETPIPE_SZ:
6723         ret = F_GETPIPE_SZ;
6724         break;
6725 #endif
6726     default:
6727         ret = -TARGET_EINVAL;
6728         break;
6729     }
6730 
6731 #if defined(__powerpc64__)
6732     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6733      * is not supported by kernel. The glibc fcntl call actually adjusts
6734      * them to 5, 6 and 7 before making the syscall(). Since we make the
6735      * syscall directly, adjust to what is supported by the kernel.
6736      */
6737     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6738         ret -= F_GETLK64 - 5;
6739     }
6740 #endif
6741 
6742     return ret;
6743 }
6744 
6745 #define FLOCK_TRANSTBL \
6746     switch (type) { \
6747     TRANSTBL_CONVERT(F_RDLCK); \
6748     TRANSTBL_CONVERT(F_WRLCK); \
6749     TRANSTBL_CONVERT(F_UNLCK); \
6750     TRANSTBL_CONVERT(F_EXLCK); \
6751     TRANSTBL_CONVERT(F_SHLCK); \
6752     }
6753 
6754 static int target_to_host_flock(int type)
6755 {
6756 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6757     FLOCK_TRANSTBL
6758 #undef  TRANSTBL_CONVERT
6759     return -TARGET_EINVAL;
6760 }
6761 
6762 static int host_to_target_flock(int type)
6763 {
6764 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6765     FLOCK_TRANSTBL
6766 #undef  TRANSTBL_CONVERT
6767     /* if we don't know how to convert the value coming
6768      * from the host we copy to the target field as-is
6769      */
6770     return type;
6771 }
6772 
6773 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6774                                             abi_ulong target_flock_addr)
6775 {
6776     struct target_flock *target_fl;
6777     int l_type;
6778 
6779     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6780         return -TARGET_EFAULT;
6781     }
6782 
6783     __get_user(l_type, &target_fl->l_type);
6784     l_type = target_to_host_flock(l_type);
6785     if (l_type < 0) {
6786         return l_type;
6787     }
6788     fl->l_type = l_type;
6789     __get_user(fl->l_whence, &target_fl->l_whence);
6790     __get_user(fl->l_start, &target_fl->l_start);
6791     __get_user(fl->l_len, &target_fl->l_len);
6792     __get_user(fl->l_pid, &target_fl->l_pid);
6793     unlock_user_struct(target_fl, target_flock_addr, 0);
6794     return 0;
6795 }
6796 
6797 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6798                                           const struct flock64 *fl)
6799 {
6800     struct target_flock *target_fl;
6801     short l_type;
6802 
6803     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6804         return -TARGET_EFAULT;
6805     }
6806 
6807     l_type = host_to_target_flock(fl->l_type);
6808     __put_user(l_type, &target_fl->l_type);
6809     __put_user(fl->l_whence, &target_fl->l_whence);
6810     __put_user(fl->l_start, &target_fl->l_start);
6811     __put_user(fl->l_len, &target_fl->l_len);
6812     __put_user(fl->l_pid, &target_fl->l_pid);
6813     unlock_user_struct(target_fl, target_flock_addr, 1);
6814     return 0;
6815 }
6816 
6817 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6818 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6819 
6820 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6821 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6822                                                    abi_ulong target_flock_addr)
6823 {
6824     struct target_oabi_flock64 *target_fl;
6825     int l_type;
6826 
6827     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6828         return -TARGET_EFAULT;
6829     }
6830 
6831     __get_user(l_type, &target_fl->l_type);
6832     l_type = target_to_host_flock(l_type);
6833     if (l_type < 0) {
6834         return l_type;
6835     }
6836     fl->l_type = l_type;
6837     __get_user(fl->l_whence, &target_fl->l_whence);
6838     __get_user(fl->l_start, &target_fl->l_start);
6839     __get_user(fl->l_len, &target_fl->l_len);
6840     __get_user(fl->l_pid, &target_fl->l_pid);
6841     unlock_user_struct(target_fl, target_flock_addr, 0);
6842     return 0;
6843 }
6844 
6845 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6846                                                  const struct flock64 *fl)
6847 {
6848     struct target_oabi_flock64 *target_fl;
6849     short l_type;
6850 
6851     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6852         return -TARGET_EFAULT;
6853     }
6854 
6855     l_type = host_to_target_flock(fl->l_type);
6856     __put_user(l_type, &target_fl->l_type);
6857     __put_user(fl->l_whence, &target_fl->l_whence);
6858     __put_user(fl->l_start, &target_fl->l_start);
6859     __put_user(fl->l_len, &target_fl->l_len);
6860     __put_user(fl->l_pid, &target_fl->l_pid);
6861     unlock_user_struct(target_fl, target_flock_addr, 1);
6862     return 0;
6863 }
6864 #endif
6865 
6866 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6867                                               abi_ulong target_flock_addr)
6868 {
6869     struct target_flock64 *target_fl;
6870     int l_type;
6871 
6872     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6873         return -TARGET_EFAULT;
6874     }
6875 
6876     __get_user(l_type, &target_fl->l_type);
6877     l_type = target_to_host_flock(l_type);
6878     if (l_type < 0) {
6879         return l_type;
6880     }
6881     fl->l_type = l_type;
6882     __get_user(fl->l_whence, &target_fl->l_whence);
6883     __get_user(fl->l_start, &target_fl->l_start);
6884     __get_user(fl->l_len, &target_fl->l_len);
6885     __get_user(fl->l_pid, &target_fl->l_pid);
6886     unlock_user_struct(target_fl, target_flock_addr, 0);
6887     return 0;
6888 }
6889 
6890 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6891                                             const struct flock64 *fl)
6892 {
6893     struct target_flock64 *target_fl;
6894     short l_type;
6895 
6896     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6897         return -TARGET_EFAULT;
6898     }
6899 
6900     l_type = host_to_target_flock(fl->l_type);
6901     __put_user(l_type, &target_fl->l_type);
6902     __put_user(fl->l_whence, &target_fl->l_whence);
6903     __put_user(fl->l_start, &target_fl->l_start);
6904     __put_user(fl->l_len, &target_fl->l_len);
6905     __put_user(fl->l_pid, &target_fl->l_pid);
6906     unlock_user_struct(target_fl, target_flock_addr, 1);
6907     return 0;
6908 }
6909 
6910 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6911 {
6912     struct flock64 fl64;
6913 #ifdef F_GETOWN_EX
6914     struct f_owner_ex fox;
6915     struct target_f_owner_ex *target_fox;
6916 #endif
6917     abi_long ret;
6918     int host_cmd = target_to_host_fcntl_cmd(cmd);
6919 
6920     if (host_cmd == -TARGET_EINVAL)
6921 	    return host_cmd;
6922 
6923     switch(cmd) {
6924     case TARGET_F_GETLK:
6925         ret = copy_from_user_flock(&fl64, arg);
6926         if (ret) {
6927             return ret;
6928         }
6929         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6930         if (ret == 0) {
6931             ret = copy_to_user_flock(arg, &fl64);
6932         }
6933         break;
6934 
6935     case TARGET_F_SETLK:
6936     case TARGET_F_SETLKW:
6937         ret = copy_from_user_flock(&fl64, arg);
6938         if (ret) {
6939             return ret;
6940         }
6941         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6942         break;
6943 
6944     case TARGET_F_GETLK64:
6945         ret = copy_from_user_flock64(&fl64, arg);
6946         if (ret) {
6947             return ret;
6948         }
6949         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6950         if (ret == 0) {
6951             ret = copy_to_user_flock64(arg, &fl64);
6952         }
6953         break;
6954     case TARGET_F_SETLK64:
6955     case TARGET_F_SETLKW64:
6956         ret = copy_from_user_flock64(&fl64, arg);
6957         if (ret) {
6958             return ret;
6959         }
6960         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6961         break;
6962 
6963     case TARGET_F_GETFL:
6964         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6965         if (ret >= 0) {
6966             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6967         }
6968         break;
6969 
6970     case TARGET_F_SETFL:
6971         ret = get_errno(safe_fcntl(fd, host_cmd,
6972                                    target_to_host_bitmask(arg,
6973                                                           fcntl_flags_tbl)));
6974         break;
6975 
6976 #ifdef F_GETOWN_EX
6977     case TARGET_F_GETOWN_EX:
6978         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6979         if (ret >= 0) {
6980             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6981                 return -TARGET_EFAULT;
6982             target_fox->type = tswap32(fox.type);
6983             target_fox->pid = tswap32(fox.pid);
6984             unlock_user_struct(target_fox, arg, 1);
6985         }
6986         break;
6987 #endif
6988 
6989 #ifdef F_SETOWN_EX
6990     case TARGET_F_SETOWN_EX:
6991         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6992             return -TARGET_EFAULT;
6993         fox.type = tswap32(target_fox->type);
6994         fox.pid = tswap32(target_fox->pid);
6995         unlock_user_struct(target_fox, arg, 0);
6996         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6997         break;
6998 #endif
6999 
7000     case TARGET_F_SETOWN:
7001     case TARGET_F_GETOWN:
7002     case TARGET_F_SETSIG:
7003     case TARGET_F_GETSIG:
7004     case TARGET_F_SETLEASE:
7005     case TARGET_F_GETLEASE:
7006     case TARGET_F_SETPIPE_SZ:
7007     case TARGET_F_GETPIPE_SZ:
7008         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7009         break;
7010 
7011     default:
7012         ret = get_errno(safe_fcntl(fd, cmd, arg));
7013         break;
7014     }
7015     return ret;
7016 }
7017 
7018 #ifdef USE_UID16
7019 
7020 static inline int high2lowuid(int uid)
7021 {
7022     if (uid > 65535)
7023         return 65534;
7024     else
7025         return uid;
7026 }
7027 
7028 static inline int high2lowgid(int gid)
7029 {
7030     if (gid > 65535)
7031         return 65534;
7032     else
7033         return gid;
7034 }
7035 
7036 static inline int low2highuid(int uid)
7037 {
7038     if ((int16_t)uid == -1)
7039         return -1;
7040     else
7041         return uid;
7042 }
7043 
7044 static inline int low2highgid(int gid)
7045 {
7046     if ((int16_t)gid == -1)
7047         return -1;
7048     else
7049         return gid;
7050 }
7051 static inline int tswapid(int id)
7052 {
7053     return tswap16(id);
7054 }
7055 
7056 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7057 
7058 #else /* !USE_UID16 */
7059 static inline int high2lowuid(int uid)
7060 {
7061     return uid;
7062 }
7063 static inline int high2lowgid(int gid)
7064 {
7065     return gid;
7066 }
7067 static inline int low2highuid(int uid)
7068 {
7069     return uid;
7070 }
7071 static inline int low2highgid(int gid)
7072 {
7073     return gid;
7074 }
7075 static inline int tswapid(int id)
7076 {
7077     return tswap32(id);
7078 }
7079 
7080 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7081 
7082 #endif /* USE_UID16 */
7083 
7084 /* We must do direct syscalls for setting UID/GID, because we want to
7085  * implement the Linux system call semantics of "change only for this thread",
7086  * not the libc/POSIX semantics of "change for all threads in process".
7087  * (See http://ewontfix.com/17/ for more details.)
7088  * We use the 32-bit version of the syscalls if present; if it is not
7089  * then either the host architecture supports 32-bit UIDs natively with
7090  * the standard syscall, or the 16-bit UID is the best we can do.
7091  */
7092 #ifdef __NR_setuid32
7093 #define __NR_sys_setuid __NR_setuid32
7094 #else
7095 #define __NR_sys_setuid __NR_setuid
7096 #endif
7097 #ifdef __NR_setgid32
7098 #define __NR_sys_setgid __NR_setgid32
7099 #else
7100 #define __NR_sys_setgid __NR_setgid
7101 #endif
7102 #ifdef __NR_setresuid32
7103 #define __NR_sys_setresuid __NR_setresuid32
7104 #else
7105 #define __NR_sys_setresuid __NR_setresuid
7106 #endif
7107 #ifdef __NR_setresgid32
7108 #define __NR_sys_setresgid __NR_setresgid32
7109 #else
7110 #define __NR_sys_setresgid __NR_setresgid
7111 #endif
7112 
7113 _syscall1(int, sys_setuid, uid_t, uid)
7114 _syscall1(int, sys_setgid, gid_t, gid)
7115 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7116 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7117 
7118 void syscall_init(void)
7119 {
7120     IOCTLEntry *ie;
7121     const argtype *arg_type;
7122     int size;
7123     int i;
7124 
7125     thunk_init(STRUCT_MAX);
7126 
7127 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7128 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7129 #include "syscall_types.h"
7130 #undef STRUCT
7131 #undef STRUCT_SPECIAL
7132 
7133     /* Build target_to_host_errno_table[] table from
7134      * host_to_target_errno_table[]. */
7135     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7136         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7137     }
7138 
7139     /* we patch the ioctl size if necessary. We rely on the fact that
7140        no ioctl has all the bits at '1' in the size field */
7141     ie = ioctl_entries;
7142     while (ie->target_cmd != 0) {
7143         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7144             TARGET_IOC_SIZEMASK) {
7145             arg_type = ie->arg_type;
7146             if (arg_type[0] != TYPE_PTR) {
7147                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7148                         ie->target_cmd);
7149                 exit(1);
7150             }
7151             arg_type++;
7152             size = thunk_type_size(arg_type, 0);
7153             ie->target_cmd = (ie->target_cmd &
7154                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7155                 (size << TARGET_IOC_SIZESHIFT);
7156         }
7157 
7158         /* automatic consistency check if same arch */
7159 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7160     (defined(__x86_64__) && defined(TARGET_X86_64))
7161         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7162             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7163                     ie->name, ie->target_cmd, ie->host_cmd);
7164         }
7165 #endif
7166         ie++;
7167     }
7168 }
7169 
7170 #if TARGET_ABI_BITS == 32
7171 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7172 {
7173 #ifdef TARGET_WORDS_BIGENDIAN
7174     return ((uint64_t)word0 << 32) | word1;
7175 #else
7176     return ((uint64_t)word1 << 32) | word0;
7177 #endif
7178 }
7179 #else /* TARGET_ABI_BITS == 32 */
7180 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7181 {
7182     return word0;
7183 }
7184 #endif /* TARGET_ABI_BITS != 32 */
7185 
7186 #ifdef TARGET_NR_truncate64
7187 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7188                                          abi_long arg2,
7189                                          abi_long arg3,
7190                                          abi_long arg4)
7191 {
7192     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7193         arg2 = arg3;
7194         arg3 = arg4;
7195     }
7196     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7197 }
7198 #endif
7199 
7200 #ifdef TARGET_NR_ftruncate64
7201 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7202                                           abi_long arg2,
7203                                           abi_long arg3,
7204                                           abi_long arg4)
7205 {
7206     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7207         arg2 = arg3;
7208         arg3 = arg4;
7209     }
7210     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7211 }
7212 #endif
7213 
7214 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7215                                                abi_ulong target_addr)
7216 {
7217     struct target_timespec *target_ts;
7218 
7219     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7220         return -TARGET_EFAULT;
7221     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7222     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7223     unlock_user_struct(target_ts, target_addr, 0);
7224     return 0;
7225 }
7226 
7227 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7228                                                struct timespec *host_ts)
7229 {
7230     struct target_timespec *target_ts;
7231 
7232     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7233         return -TARGET_EFAULT;
7234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7236     unlock_user_struct(target_ts, target_addr, 1);
7237     return 0;
7238 }
7239 
7240 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7241                                                  abi_ulong target_addr)
7242 {
7243     struct target_itimerspec *target_itspec;
7244 
7245     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7246         return -TARGET_EFAULT;
7247     }
7248 
7249     host_itspec->it_interval.tv_sec =
7250                             tswapal(target_itspec->it_interval.tv_sec);
7251     host_itspec->it_interval.tv_nsec =
7252                             tswapal(target_itspec->it_interval.tv_nsec);
7253     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7254     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7255 
7256     unlock_user_struct(target_itspec, target_addr, 1);
7257     return 0;
7258 }
7259 
7260 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7261                                                struct itimerspec *host_its)
7262 {
7263     struct target_itimerspec *target_itspec;
7264 
7265     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7266         return -TARGET_EFAULT;
7267     }
7268 
7269     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7270     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7271 
7272     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7273     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7274 
7275     unlock_user_struct(target_itspec, target_addr, 0);
7276     return 0;
7277 }
7278 
7279 static inline abi_long target_to_host_timex(struct timex *host_tx,
7280                                             abi_long target_addr)
7281 {
7282     struct target_timex *target_tx;
7283 
7284     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7285         return -TARGET_EFAULT;
7286     }
7287 
7288     __get_user(host_tx->modes, &target_tx->modes);
7289     __get_user(host_tx->offset, &target_tx->offset);
7290     __get_user(host_tx->freq, &target_tx->freq);
7291     __get_user(host_tx->maxerror, &target_tx->maxerror);
7292     __get_user(host_tx->esterror, &target_tx->esterror);
7293     __get_user(host_tx->status, &target_tx->status);
7294     __get_user(host_tx->constant, &target_tx->constant);
7295     __get_user(host_tx->precision, &target_tx->precision);
7296     __get_user(host_tx->tolerance, &target_tx->tolerance);
7297     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7298     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7299     __get_user(host_tx->tick, &target_tx->tick);
7300     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7301     __get_user(host_tx->jitter, &target_tx->jitter);
7302     __get_user(host_tx->shift, &target_tx->shift);
7303     __get_user(host_tx->stabil, &target_tx->stabil);
7304     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7305     __get_user(host_tx->calcnt, &target_tx->calcnt);
7306     __get_user(host_tx->errcnt, &target_tx->errcnt);
7307     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7308     __get_user(host_tx->tai, &target_tx->tai);
7309 
7310     unlock_user_struct(target_tx, target_addr, 0);
7311     return 0;
7312 }
7313 
7314 static inline abi_long host_to_target_timex(abi_long target_addr,
7315                                             struct timex *host_tx)
7316 {
7317     struct target_timex *target_tx;
7318 
7319     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7320         return -TARGET_EFAULT;
7321     }
7322 
7323     __put_user(host_tx->modes, &target_tx->modes);
7324     __put_user(host_tx->offset, &target_tx->offset);
7325     __put_user(host_tx->freq, &target_tx->freq);
7326     __put_user(host_tx->maxerror, &target_tx->maxerror);
7327     __put_user(host_tx->esterror, &target_tx->esterror);
7328     __put_user(host_tx->status, &target_tx->status);
7329     __put_user(host_tx->constant, &target_tx->constant);
7330     __put_user(host_tx->precision, &target_tx->precision);
7331     __put_user(host_tx->tolerance, &target_tx->tolerance);
7332     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7333     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7334     __put_user(host_tx->tick, &target_tx->tick);
7335     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7336     __put_user(host_tx->jitter, &target_tx->jitter);
7337     __put_user(host_tx->shift, &target_tx->shift);
7338     __put_user(host_tx->stabil, &target_tx->stabil);
7339     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7340     __put_user(host_tx->calcnt, &target_tx->calcnt);
7341     __put_user(host_tx->errcnt, &target_tx->errcnt);
7342     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7343     __put_user(host_tx->tai, &target_tx->tai);
7344 
7345     unlock_user_struct(target_tx, target_addr, 1);
7346     return 0;
7347 }
7348 
7349 
7350 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7351                                                abi_ulong target_addr)
7352 {
7353     struct target_sigevent *target_sevp;
7354 
7355     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7356         return -TARGET_EFAULT;
7357     }
7358 
7359     /* This union is awkward on 64 bit systems because it has a 32 bit
7360      * integer and a pointer in it; we follow the conversion approach
7361      * used for handling sigval types in signal.c so the guest should get
7362      * the correct value back even if we did a 64 bit byteswap and it's
7363      * using the 32 bit integer.
7364      */
7365     host_sevp->sigev_value.sival_ptr =
7366         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7367     host_sevp->sigev_signo =
7368         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7369     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7370     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7371 
7372     unlock_user_struct(target_sevp, target_addr, 1);
7373     return 0;
7374 }
7375 
7376 #if defined(TARGET_NR_mlockall)
7377 static inline int target_to_host_mlockall_arg(int arg)
7378 {
7379     int result = 0;
7380 
7381     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7382         result |= MCL_CURRENT;
7383     }
7384     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7385         result |= MCL_FUTURE;
7386     }
7387     return result;
7388 }
7389 #endif
7390 
7391 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7392      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7393      defined(TARGET_NR_newfstatat))
7394 static inline abi_long host_to_target_stat64(void *cpu_env,
7395                                              abi_ulong target_addr,
7396                                              struct stat *host_st)
7397 {
7398 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7399     if (((CPUARMState *)cpu_env)->eabi) {
7400         struct target_eabi_stat64 *target_st;
7401 
7402         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7403             return -TARGET_EFAULT;
7404         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7405         __put_user(host_st->st_dev, &target_st->st_dev);
7406         __put_user(host_st->st_ino, &target_st->st_ino);
7407 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7408         __put_user(host_st->st_ino, &target_st->__st_ino);
7409 #endif
7410         __put_user(host_st->st_mode, &target_st->st_mode);
7411         __put_user(host_st->st_nlink, &target_st->st_nlink);
7412         __put_user(host_st->st_uid, &target_st->st_uid);
7413         __put_user(host_st->st_gid, &target_st->st_gid);
7414         __put_user(host_st->st_rdev, &target_st->st_rdev);
7415         __put_user(host_st->st_size, &target_st->st_size);
7416         __put_user(host_st->st_blksize, &target_st->st_blksize);
7417         __put_user(host_st->st_blocks, &target_st->st_blocks);
7418         __put_user(host_st->st_atime, &target_st->target_st_atime);
7419         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7420         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7421         unlock_user_struct(target_st, target_addr, 1);
7422     } else
7423 #endif
7424     {
7425 #if defined(TARGET_HAS_STRUCT_STAT64)
7426         struct target_stat64 *target_st;
7427 #else
7428         struct target_stat *target_st;
7429 #endif
7430 
7431         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7432             return -TARGET_EFAULT;
7433         memset(target_st, 0, sizeof(*target_st));
7434         __put_user(host_st->st_dev, &target_st->st_dev);
7435         __put_user(host_st->st_ino, &target_st->st_ino);
7436 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7437         __put_user(host_st->st_ino, &target_st->__st_ino);
7438 #endif
7439         __put_user(host_st->st_mode, &target_st->st_mode);
7440         __put_user(host_st->st_nlink, &target_st->st_nlink);
7441         __put_user(host_st->st_uid, &target_st->st_uid);
7442         __put_user(host_st->st_gid, &target_st->st_gid);
7443         __put_user(host_st->st_rdev, &target_st->st_rdev);
7444         /* XXX: better use of kernel struct */
7445         __put_user(host_st->st_size, &target_st->st_size);
7446         __put_user(host_st->st_blksize, &target_st->st_blksize);
7447         __put_user(host_st->st_blocks, &target_st->st_blocks);
7448         __put_user(host_st->st_atime, &target_st->target_st_atime);
7449         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7450         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7451         unlock_user_struct(target_st, target_addr, 1);
7452     }
7453 
7454     return 0;
7455 }
7456 #endif
7457 
7458 /* ??? Using host futex calls even when target atomic operations
7459    are not really atomic probably breaks things.  However implementing
7460    futexes locally would make futexes shared between multiple processes
7461    tricky.  However they're probably useless because guest atomic
7462    operations won't work either.  */
7463 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7464                     target_ulong uaddr2, int val3)
7465 {
7466     struct timespec ts, *pts;
7467     int base_op;
7468 
7469     /* ??? We assume FUTEX_* constants are the same on both host
7470        and target.  */
7471 #ifdef FUTEX_CMD_MASK
7472     base_op = op & FUTEX_CMD_MASK;
7473 #else
7474     base_op = op;
7475 #endif
7476     switch (base_op) {
7477     case FUTEX_WAIT:
7478     case FUTEX_WAIT_BITSET:
7479         if (timeout) {
7480             pts = &ts;
7481             target_to_host_timespec(pts, timeout);
7482         } else {
7483             pts = NULL;
7484         }
7485         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7486                          pts, NULL, val3));
7487     case FUTEX_WAKE:
7488         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7489     case FUTEX_FD:
7490         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7491     case FUTEX_REQUEUE:
7492     case FUTEX_CMP_REQUEUE:
7493     case FUTEX_WAKE_OP:
7494         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7495            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7496            But the prototype takes a `struct timespec *'; insert casts
7497            to satisfy the compiler.  We do not need to tswap TIMEOUT
7498            since it's not compared to guest memory.  */
7499         pts = (struct timespec *)(uintptr_t) timeout;
7500         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7501                                     g2h(uaddr2),
7502                                     (base_op == FUTEX_CMP_REQUEUE
7503                                      ? tswap32(val3)
7504                                      : val3)));
7505     default:
7506         return -TARGET_ENOSYS;
7507     }
7508 }
7509 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7510 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7511                                      abi_long handle, abi_long mount_id,
7512                                      abi_long flags)
7513 {
7514     struct file_handle *target_fh;
7515     struct file_handle *fh;
7516     int mid = 0;
7517     abi_long ret;
7518     char *name;
7519     unsigned int size, total_size;
7520 
7521     if (get_user_s32(size, handle)) {
7522         return -TARGET_EFAULT;
7523     }
7524 
7525     name = lock_user_string(pathname);
7526     if (!name) {
7527         return -TARGET_EFAULT;
7528     }
7529 
7530     total_size = sizeof(struct file_handle) + size;
7531     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7532     if (!target_fh) {
7533         unlock_user(name, pathname, 0);
7534         return -TARGET_EFAULT;
7535     }
7536 
7537     fh = g_malloc0(total_size);
7538     fh->handle_bytes = size;
7539 
7540     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7541     unlock_user(name, pathname, 0);
7542 
7543     /* man name_to_handle_at(2):
7544      * Other than the use of the handle_bytes field, the caller should treat
7545      * the file_handle structure as an opaque data type
7546      */
7547 
7548     memcpy(target_fh, fh, total_size);
7549     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7550     target_fh->handle_type = tswap32(fh->handle_type);
7551     g_free(fh);
7552     unlock_user(target_fh, handle, total_size);
7553 
7554     if (put_user_s32(mid, mount_id)) {
7555         return -TARGET_EFAULT;
7556     }
7557 
7558     return ret;
7559 
7560 }
7561 #endif
7562 
7563 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7564 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7565                                      abi_long flags)
7566 {
7567     struct file_handle *target_fh;
7568     struct file_handle *fh;
7569     unsigned int size, total_size;
7570     abi_long ret;
7571 
7572     if (get_user_s32(size, handle)) {
7573         return -TARGET_EFAULT;
7574     }
7575 
7576     total_size = sizeof(struct file_handle) + size;
7577     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7578     if (!target_fh) {
7579         return -TARGET_EFAULT;
7580     }
7581 
7582     fh = g_memdup(target_fh, total_size);
7583     fh->handle_bytes = size;
7584     fh->handle_type = tswap32(target_fh->handle_type);
7585 
7586     ret = get_errno(open_by_handle_at(mount_fd, fh,
7587                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7588 
7589     g_free(fh);
7590 
7591     unlock_user(target_fh, handle, total_size);
7592 
7593     return ret;
7594 }
7595 #endif
7596 
7597 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7598 
7599 /* signalfd siginfo conversion */
7600 
7601 static void
7602 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7603                                 const struct signalfd_siginfo *info)
7604 {
7605     int sig = host_to_target_signal(info->ssi_signo);
7606 
7607     /* linux/signalfd.h defines a ssi_addr_lsb
7608      * not defined in sys/signalfd.h but used by some kernels
7609      */
7610 
7611 #ifdef BUS_MCEERR_AO
7612     if (tinfo->ssi_signo == SIGBUS &&
7613         (tinfo->ssi_code == BUS_MCEERR_AR ||
7614          tinfo->ssi_code == BUS_MCEERR_AO)) {
7615         uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7616         uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7617         *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7618     }
7619 #endif
7620 
7621     tinfo->ssi_signo = tswap32(sig);
7622     tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7623     tinfo->ssi_code = tswap32(info->ssi_code);
7624     tinfo->ssi_pid = tswap32(info->ssi_pid);
7625     tinfo->ssi_uid = tswap32(info->ssi_uid);
7626     tinfo->ssi_fd = tswap32(info->ssi_fd);
7627     tinfo->ssi_tid = tswap32(info->ssi_tid);
7628     tinfo->ssi_band = tswap32(info->ssi_band);
7629     tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7630     tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7631     tinfo->ssi_status = tswap32(info->ssi_status);
7632     tinfo->ssi_int = tswap32(info->ssi_int);
7633     tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7634     tinfo->ssi_utime = tswap64(info->ssi_utime);
7635     tinfo->ssi_stime = tswap64(info->ssi_stime);
7636     tinfo->ssi_addr = tswap64(info->ssi_addr);
7637 }
7638 
7639 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7640 {
7641     int i;
7642 
7643     for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7644         host_to_target_signalfd_siginfo(buf + i, buf + i);
7645     }
7646 
7647     return len;
7648 }
7649 
7650 static TargetFdTrans target_signalfd_trans = {
7651     .host_to_target_data = host_to_target_data_signalfd,
7652 };
7653 
7654 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7655 {
7656     int host_flags;
7657     target_sigset_t *target_mask;
7658     sigset_t host_mask;
7659     abi_long ret;
7660 
7661     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7662         return -TARGET_EINVAL;
7663     }
7664     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7665         return -TARGET_EFAULT;
7666     }
7667 
7668     target_to_host_sigset(&host_mask, target_mask);
7669 
7670     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7671 
7672     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7673     if (ret >= 0) {
7674         fd_trans_register(ret, &target_signalfd_trans);
7675     }
7676 
7677     unlock_user_struct(target_mask, mask, 0);
7678 
7679     return ret;
7680 }
7681 #endif
7682 
7683 /* Map host to target signal numbers for the wait family of syscalls.
7684    Assume all other status bits are the same.  */
7685 int host_to_target_waitstatus(int status)
7686 {
7687     if (WIFSIGNALED(status)) {
7688         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7689     }
7690     if (WIFSTOPPED(status)) {
7691         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7692                | (status & 0xff);
7693     }
7694     return status;
7695 }
7696 
7697 static int open_self_cmdline(void *cpu_env, int fd)
7698 {
7699     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7700     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7701     int i;
7702 
7703     for (i = 0; i < bprm->argc; i++) {
7704         size_t len = strlen(bprm->argv[i]) + 1;
7705 
7706         if (write(fd, bprm->argv[i], len) != len) {
7707             return -1;
7708         }
7709     }
7710 
7711     return 0;
7712 }
7713 
7714 static int open_self_maps(void *cpu_env, int fd)
7715 {
7716     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7717     TaskState *ts = cpu->opaque;
7718     FILE *fp;
7719     char *line = NULL;
7720     size_t len = 0;
7721     ssize_t read;
7722 
7723     fp = fopen("/proc/self/maps", "r");
7724     if (fp == NULL) {
7725         return -1;
7726     }
7727 
7728     while ((read = getline(&line, &len, fp)) != -1) {
7729         int fields, dev_maj, dev_min, inode;
7730         uint64_t min, max, offset;
7731         char flag_r, flag_w, flag_x, flag_p;
7732         char path[512] = "";
7733         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7734                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7735                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7736 
7737         if ((fields < 10) || (fields > 11)) {
7738             continue;
7739         }
7740         if (h2g_valid(min)) {
7741             int flags = page_get_flags(h2g(min));
7742             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7743             if (page_check_range(h2g(min), max - min, flags) == -1) {
7744                 continue;
7745             }
7746             if (h2g(min) == ts->info->stack_limit) {
7747                 pstrcpy(path, sizeof(path), "      [stack]");
7748             }
7749             dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7750                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7751                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7752                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7753                     path[0] ? "         " : "", path);
7754         }
7755     }
7756 
7757     free(line);
7758     fclose(fp);
7759 
7760     return 0;
7761 }
7762 
7763 static int open_self_stat(void *cpu_env, int fd)
7764 {
7765     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7766     TaskState *ts = cpu->opaque;
7767     abi_ulong start_stack = ts->info->start_stack;
7768     int i;
7769 
7770     for (i = 0; i < 44; i++) {
7771       char buf[128];
7772       int len;
7773       uint64_t val = 0;
7774 
7775       if (i == 0) {
7776         /* pid */
7777         val = getpid();
7778         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7779       } else if (i == 1) {
7780         /* app name */
7781         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7782       } else if (i == 27) {
7783         /* stack bottom */
7784         val = start_stack;
7785         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7786       } else {
7787         /* for the rest, there is MasterCard */
7788         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7789       }
7790 
7791       len = strlen(buf);
7792       if (write(fd, buf, len) != len) {
7793           return -1;
7794       }
7795     }
7796 
7797     return 0;
7798 }
7799 
7800 static int open_self_auxv(void *cpu_env, int fd)
7801 {
7802     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7803     TaskState *ts = cpu->opaque;
7804     abi_ulong auxv = ts->info->saved_auxv;
7805     abi_ulong len = ts->info->auxv_len;
7806     char *ptr;
7807 
7808     /*
7809      * Auxiliary vector is stored in target process stack.
7810      * read in whole auxv vector and copy it to file
7811      */
7812     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7813     if (ptr != NULL) {
7814         while (len > 0) {
7815             ssize_t r;
7816             r = write(fd, ptr, len);
7817             if (r <= 0) {
7818                 break;
7819             }
7820             len -= r;
7821             ptr += r;
7822         }
7823         lseek(fd, 0, SEEK_SET);
7824         unlock_user(ptr, auxv, len);
7825     }
7826 
7827     return 0;
7828 }
7829 
7830 static int is_proc_myself(const char *filename, const char *entry)
7831 {
7832     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7833         filename += strlen("/proc/");
7834         if (!strncmp(filename, "self/", strlen("self/"))) {
7835             filename += strlen("self/");
7836         } else if (*filename >= '1' && *filename <= '9') {
7837             char myself[80];
7838             snprintf(myself, sizeof(myself), "%d/", getpid());
7839             if (!strncmp(filename, myself, strlen(myself))) {
7840                 filename += strlen(myself);
7841             } else {
7842                 return 0;
7843             }
7844         } else {
7845             return 0;
7846         }
7847         if (!strcmp(filename, entry)) {
7848             return 1;
7849         }
7850     }
7851     return 0;
7852 }
7853 
7854 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7855 static int is_proc(const char *filename, const char *entry)
7856 {
7857     return strcmp(filename, entry) == 0;
7858 }
7859 
7860 static int open_net_route(void *cpu_env, int fd)
7861 {
7862     FILE *fp;
7863     char *line = NULL;
7864     size_t len = 0;
7865     ssize_t read;
7866 
7867     fp = fopen("/proc/net/route", "r");
7868     if (fp == NULL) {
7869         return -1;
7870     }
7871 
7872     /* read header */
7873 
7874     read = getline(&line, &len, fp);
7875     dprintf(fd, "%s", line);
7876 
7877     /* read routes */
7878 
7879     while ((read = getline(&line, &len, fp)) != -1) {
7880         char iface[16];
7881         uint32_t dest, gw, mask;
7882         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7883         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7884                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7885                      &mask, &mtu, &window, &irtt);
7886         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7887                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7888                 metric, tswap32(mask), mtu, window, irtt);
7889     }
7890 
7891     free(line);
7892     fclose(fp);
7893 
7894     return 0;
7895 }
7896 #endif
7897 
7898 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7899 {
7900     struct fake_open {
7901         const char *filename;
7902         int (*fill)(void *cpu_env, int fd);
7903         int (*cmp)(const char *s1, const char *s2);
7904     };
7905     const struct fake_open *fake_open;
7906     static const struct fake_open fakes[] = {
7907         { "maps", open_self_maps, is_proc_myself },
7908         { "stat", open_self_stat, is_proc_myself },
7909         { "auxv", open_self_auxv, is_proc_myself },
7910         { "cmdline", open_self_cmdline, is_proc_myself },
7911 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7912         { "/proc/net/route", open_net_route, is_proc },
7913 #endif
7914         { NULL, NULL, NULL }
7915     };
7916 
7917     if (is_proc_myself(pathname, "exe")) {
7918         int execfd = qemu_getauxval(AT_EXECFD);
7919         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7920     }
7921 
7922     for (fake_open = fakes; fake_open->filename; fake_open++) {
7923         if (fake_open->cmp(pathname, fake_open->filename)) {
7924             break;
7925         }
7926     }
7927 
7928     if (fake_open->filename) {
7929         const char *tmpdir;
7930         char filename[PATH_MAX];
7931         int fd, r;
7932 
7933         /* create temporary file to map stat to */
7934         tmpdir = getenv("TMPDIR");
7935         if (!tmpdir)
7936             tmpdir = "/tmp";
7937         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7938         fd = mkstemp(filename);
7939         if (fd < 0) {
7940             return fd;
7941         }
7942         unlink(filename);
7943 
7944         if ((r = fake_open->fill(cpu_env, fd))) {
7945             int e = errno;
7946             close(fd);
7947             errno = e;
7948             return r;
7949         }
7950         lseek(fd, 0, SEEK_SET);
7951 
7952         return fd;
7953     }
7954 
7955     return safe_openat(dirfd, path(pathname), flags, mode);
7956 }
7957 
7958 #define TIMER_MAGIC 0x0caf0000
7959 #define TIMER_MAGIC_MASK 0xffff0000
7960 
7961 /* Convert QEMU provided timer ID back to internal 16bit index format */
7962 static target_timer_t get_timer_id(abi_long arg)
7963 {
7964     target_timer_t timerid = arg;
7965 
7966     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7967         return -TARGET_EINVAL;
7968     }
7969 
7970     timerid &= 0xffff;
7971 
7972     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7973         return -TARGET_EINVAL;
7974     }
7975 
7976     return timerid;
7977 }
7978 
7979 static abi_long swap_data_eventfd(void *buf, size_t len)
7980 {
7981     uint64_t *counter = buf;
7982     int i;
7983 
7984     if (len < sizeof(uint64_t)) {
7985         return -EINVAL;
7986     }
7987 
7988     for (i = 0; i < len; i += sizeof(uint64_t)) {
7989         *counter = tswap64(*counter);
7990         counter++;
7991     }
7992 
7993     return len;
7994 }
7995 
7996 static TargetFdTrans target_eventfd_trans = {
7997     .host_to_target_data = swap_data_eventfd,
7998     .target_to_host_data = swap_data_eventfd,
7999 };
8000 
8001 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
8002     (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
8003      defined(__NR_inotify_init1))
8004 static abi_long host_to_target_data_inotify(void *buf, size_t len)
8005 {
8006     struct inotify_event *ev;
8007     int i;
8008     uint32_t name_len;
8009 
8010     for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
8011         ev = (struct inotify_event *)((char *)buf + i);
8012         name_len = ev->len;
8013 
8014         ev->wd = tswap32(ev->wd);
8015         ev->mask = tswap32(ev->mask);
8016         ev->cookie = tswap32(ev->cookie);
8017         ev->len = tswap32(name_len);
8018     }
8019 
8020     return len;
8021 }
8022 
8023 static TargetFdTrans target_inotify_trans = {
8024     .host_to_target_data = host_to_target_data_inotify,
8025 };
8026 #endif
8027 
8028 static int target_to_host_cpu_mask(unsigned long *host_mask,
8029                                    size_t host_size,
8030                                    abi_ulong target_addr,
8031                                    size_t target_size)
8032 {
8033     unsigned target_bits = sizeof(abi_ulong) * 8;
8034     unsigned host_bits = sizeof(*host_mask) * 8;
8035     abi_ulong *target_mask;
8036     unsigned i, j;
8037 
8038     assert(host_size >= target_size);
8039 
8040     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8041     if (!target_mask) {
8042         return -TARGET_EFAULT;
8043     }
8044     memset(host_mask, 0, host_size);
8045 
8046     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8047         unsigned bit = i * target_bits;
8048         abi_ulong val;
8049 
8050         __get_user(val, &target_mask[i]);
8051         for (j = 0; j < target_bits; j++, bit++) {
8052             if (val & (1UL << j)) {
8053                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8054             }
8055         }
8056     }
8057 
8058     unlock_user(target_mask, target_addr, 0);
8059     return 0;
8060 }
8061 
8062 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8063                                    size_t host_size,
8064                                    abi_ulong target_addr,
8065                                    size_t target_size)
8066 {
8067     unsigned target_bits = sizeof(abi_ulong) * 8;
8068     unsigned host_bits = sizeof(*host_mask) * 8;
8069     abi_ulong *target_mask;
8070     unsigned i, j;
8071 
8072     assert(host_size >= target_size);
8073 
8074     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8075     if (!target_mask) {
8076         return -TARGET_EFAULT;
8077     }
8078 
8079     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8080         unsigned bit = i * target_bits;
8081         abi_ulong val = 0;
8082 
8083         for (j = 0; j < target_bits; j++, bit++) {
8084             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8085                 val |= 1UL << j;
8086             }
8087         }
8088         __put_user(val, &target_mask[i]);
8089     }
8090 
8091     unlock_user(target_mask, target_addr, target_size);
8092     return 0;
8093 }
8094 
8095 /* This is an internal helper for do_syscall so that it is easier
8096  * to have a single return point, so that actions, such as logging
8097  * of syscall results, can be performed.
8098  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8099  */
8100 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8101                             abi_long arg2, abi_long arg3, abi_long arg4,
8102                             abi_long arg5, abi_long arg6, abi_long arg7,
8103                             abi_long arg8)
8104 {
8105     CPUState *cpu = ENV_GET_CPU(cpu_env);
8106     abi_long ret;
8107 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8108     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8109     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
8110     struct stat st;
8111 #endif
8112 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8113     || defined(TARGET_NR_fstatfs)
8114     struct statfs stfs;
8115 #endif
8116     void *p;
8117 
8118     switch(num) {
8119     case TARGET_NR_exit:
8120         /* In old applications this may be used to implement _exit(2).
8121            However in threaded applictions it is used for thread termination,
8122            and _exit_group is used for application termination.
8123            Do thread termination if we have more then one thread.  */
8124 
8125         if (block_signals()) {
8126             return -TARGET_ERESTARTSYS;
8127         }
8128 
8129         cpu_list_lock();
8130 
8131         if (CPU_NEXT(first_cpu)) {
8132             TaskState *ts;
8133 
8134             /* Remove the CPU from the list.  */
8135             QTAILQ_REMOVE(&cpus, cpu, node);
8136 
8137             cpu_list_unlock();
8138 
8139             ts = cpu->opaque;
8140             if (ts->child_tidptr) {
8141                 put_user_u32(0, ts->child_tidptr);
8142                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8143                           NULL, NULL, 0);
8144             }
8145             thread_cpu = NULL;
8146             object_unref(OBJECT(cpu));
8147             g_free(ts);
8148             rcu_unregister_thread();
8149             pthread_exit(NULL);
8150         }
8151 
8152         cpu_list_unlock();
8153         preexit_cleanup(cpu_env, arg1);
8154         _exit(arg1);
8155         return 0; /* avoid warning */
8156     case TARGET_NR_read:
8157         if (arg3 == 0) {
8158             return 0;
8159         } else {
8160             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8161                 return -TARGET_EFAULT;
8162             ret = get_errno(safe_read(arg1, p, arg3));
8163             if (ret >= 0 &&
8164                 fd_trans_host_to_target_data(arg1)) {
8165                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8166             }
8167             unlock_user(p, arg2, ret);
8168         }
8169         return ret;
8170     case TARGET_NR_write:
8171         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8172             return -TARGET_EFAULT;
8173         if (fd_trans_target_to_host_data(arg1)) {
8174             void *copy = g_malloc(arg3);
8175             memcpy(copy, p, arg3);
8176             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8177             if (ret >= 0) {
8178                 ret = get_errno(safe_write(arg1, copy, ret));
8179             }
8180             g_free(copy);
8181         } else {
8182             ret = get_errno(safe_write(arg1, p, arg3));
8183         }
8184         unlock_user(p, arg2, 0);
8185         return ret;
8186 
8187 #ifdef TARGET_NR_open
8188     case TARGET_NR_open:
8189         if (!(p = lock_user_string(arg1)))
8190             return -TARGET_EFAULT;
8191         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8192                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8193                                   arg3));
8194         fd_trans_unregister(ret);
8195         unlock_user(p, arg1, 0);
8196         return ret;
8197 #endif
8198     case TARGET_NR_openat:
8199         if (!(p = lock_user_string(arg2)))
8200             return -TARGET_EFAULT;
8201         ret = get_errno(do_openat(cpu_env, arg1, p,
8202                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8203                                   arg4));
8204         fd_trans_unregister(ret);
8205         unlock_user(p, arg2, 0);
8206         return ret;
8207 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8208     case TARGET_NR_name_to_handle_at:
8209         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8210         return ret;
8211 #endif
8212 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8213     case TARGET_NR_open_by_handle_at:
8214         ret = do_open_by_handle_at(arg1, arg2, arg3);
8215         fd_trans_unregister(ret);
8216         return ret;
8217 #endif
8218     case TARGET_NR_close:
8219         fd_trans_unregister(arg1);
8220         return get_errno(close(arg1));
8221 
8222     case TARGET_NR_brk:
8223         return do_brk(arg1);
8224 #ifdef TARGET_NR_fork
8225     case TARGET_NR_fork:
8226         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8227 #endif
8228 #ifdef TARGET_NR_waitpid
8229     case TARGET_NR_waitpid:
8230         {
8231             int status;
8232             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8233             if (!is_error(ret) && arg2 && ret
8234                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8235                 return -TARGET_EFAULT;
8236         }
8237         return ret;
8238 #endif
8239 #ifdef TARGET_NR_waitid
8240     case TARGET_NR_waitid:
8241         {
8242             siginfo_t info;
8243             info.si_pid = 0;
8244             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8245             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8246                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8247                     return -TARGET_EFAULT;
8248                 host_to_target_siginfo(p, &info);
8249                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8250             }
8251         }
8252         return ret;
8253 #endif
8254 #ifdef TARGET_NR_creat /* not on alpha */
8255     case TARGET_NR_creat:
8256         if (!(p = lock_user_string(arg1)))
8257             return -TARGET_EFAULT;
8258         ret = get_errno(creat(p, arg2));
8259         fd_trans_unregister(ret);
8260         unlock_user(p, arg1, 0);
8261         return ret;
8262 #endif
8263 #ifdef TARGET_NR_link
8264     case TARGET_NR_link:
8265         {
8266             void * p2;
8267             p = lock_user_string(arg1);
8268             p2 = lock_user_string(arg2);
8269             if (!p || !p2)
8270                 ret = -TARGET_EFAULT;
8271             else
8272                 ret = get_errno(link(p, p2));
8273             unlock_user(p2, arg2, 0);
8274             unlock_user(p, arg1, 0);
8275         }
8276         return ret;
8277 #endif
8278 #if defined(TARGET_NR_linkat)
8279     case TARGET_NR_linkat:
8280         {
8281             void * p2 = NULL;
8282             if (!arg2 || !arg4)
8283                 return -TARGET_EFAULT;
8284             p  = lock_user_string(arg2);
8285             p2 = lock_user_string(arg4);
8286             if (!p || !p2)
8287                 ret = -TARGET_EFAULT;
8288             else
8289                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8290             unlock_user(p, arg2, 0);
8291             unlock_user(p2, arg4, 0);
8292         }
8293         return ret;
8294 #endif
8295 #ifdef TARGET_NR_unlink
8296     case TARGET_NR_unlink:
8297         if (!(p = lock_user_string(arg1)))
8298             return -TARGET_EFAULT;
8299         ret = get_errno(unlink(p));
8300         unlock_user(p, arg1, 0);
8301         return ret;
8302 #endif
8303 #if defined(TARGET_NR_unlinkat)
8304     case TARGET_NR_unlinkat:
8305         if (!(p = lock_user_string(arg2)))
8306             return -TARGET_EFAULT;
8307         ret = get_errno(unlinkat(arg1, p, arg3));
8308         unlock_user(p, arg2, 0);
8309         return ret;
8310 #endif
8311     case TARGET_NR_execve:
8312         {
8313             char **argp, **envp;
8314             int argc, envc;
8315             abi_ulong gp;
8316             abi_ulong guest_argp;
8317             abi_ulong guest_envp;
8318             abi_ulong addr;
8319             char **q;
8320             int total_size = 0;
8321 
8322             argc = 0;
8323             guest_argp = arg2;
8324             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8325                 if (get_user_ual(addr, gp))
8326                     return -TARGET_EFAULT;
8327                 if (!addr)
8328                     break;
8329                 argc++;
8330             }
8331             envc = 0;
8332             guest_envp = arg3;
8333             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8334                 if (get_user_ual(addr, gp))
8335                     return -TARGET_EFAULT;
8336                 if (!addr)
8337                     break;
8338                 envc++;
8339             }
8340 
8341             argp = g_new0(char *, argc + 1);
8342             envp = g_new0(char *, envc + 1);
8343 
8344             for (gp = guest_argp, q = argp; gp;
8345                   gp += sizeof(abi_ulong), q++) {
8346                 if (get_user_ual(addr, gp))
8347                     goto execve_efault;
8348                 if (!addr)
8349                     break;
8350                 if (!(*q = lock_user_string(addr)))
8351                     goto execve_efault;
8352                 total_size += strlen(*q) + 1;
8353             }
8354             *q = NULL;
8355 
8356             for (gp = guest_envp, q = envp; gp;
8357                   gp += sizeof(abi_ulong), q++) {
8358                 if (get_user_ual(addr, gp))
8359                     goto execve_efault;
8360                 if (!addr)
8361                     break;
8362                 if (!(*q = lock_user_string(addr)))
8363                     goto execve_efault;
8364                 total_size += strlen(*q) + 1;
8365             }
8366             *q = NULL;
8367 
8368             if (!(p = lock_user_string(arg1)))
8369                 goto execve_efault;
8370             /* Although execve() is not an interruptible syscall it is
8371              * a special case where we must use the safe_syscall wrapper:
8372              * if we allow a signal to happen before we make the host
8373              * syscall then we will 'lose' it, because at the point of
8374              * execve the process leaves QEMU's control. So we use the
8375              * safe syscall wrapper to ensure that we either take the
8376              * signal as a guest signal, or else it does not happen
8377              * before the execve completes and makes it the other
8378              * program's problem.
8379              */
8380             ret = get_errno(safe_execve(p, argp, envp));
8381             unlock_user(p, arg1, 0);
8382 
8383             goto execve_end;
8384 
8385         execve_efault:
8386             ret = -TARGET_EFAULT;
8387 
8388         execve_end:
8389             for (gp = guest_argp, q = argp; *q;
8390                   gp += sizeof(abi_ulong), q++) {
8391                 if (get_user_ual(addr, gp)
8392                     || !addr)
8393                     break;
8394                 unlock_user(*q, addr, 0);
8395             }
8396             for (gp = guest_envp, q = envp; *q;
8397                   gp += sizeof(abi_ulong), q++) {
8398                 if (get_user_ual(addr, gp)
8399                     || !addr)
8400                     break;
8401                 unlock_user(*q, addr, 0);
8402             }
8403 
8404             g_free(argp);
8405             g_free(envp);
8406         }
8407         return ret;
8408     case TARGET_NR_chdir:
8409         if (!(p = lock_user_string(arg1)))
8410             return -TARGET_EFAULT;
8411         ret = get_errno(chdir(p));
8412         unlock_user(p, arg1, 0);
8413         return ret;
8414 #ifdef TARGET_NR_time
8415     case TARGET_NR_time:
8416         {
8417             time_t host_time;
8418             ret = get_errno(time(&host_time));
8419             if (!is_error(ret)
8420                 && arg1
8421                 && put_user_sal(host_time, arg1))
8422                 return -TARGET_EFAULT;
8423         }
8424         return ret;
8425 #endif
8426 #ifdef TARGET_NR_mknod
8427     case TARGET_NR_mknod:
8428         if (!(p = lock_user_string(arg1)))
8429             return -TARGET_EFAULT;
8430         ret = get_errno(mknod(p, arg2, arg3));
8431         unlock_user(p, arg1, 0);
8432         return ret;
8433 #endif
8434 #if defined(TARGET_NR_mknodat)
8435     case TARGET_NR_mknodat:
8436         if (!(p = lock_user_string(arg2)))
8437             return -TARGET_EFAULT;
8438         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8439         unlock_user(p, arg2, 0);
8440         return ret;
8441 #endif
8442 #ifdef TARGET_NR_chmod
8443     case TARGET_NR_chmod:
8444         if (!(p = lock_user_string(arg1)))
8445             return -TARGET_EFAULT;
8446         ret = get_errno(chmod(p, arg2));
8447         unlock_user(p, arg1, 0);
8448         return ret;
8449 #endif
8450 #ifdef TARGET_NR_break
8451     case TARGET_NR_break:
8452         goto unimplemented;
8453 #endif
8454 #ifdef TARGET_NR_oldstat
8455     case TARGET_NR_oldstat:
8456         goto unimplemented;
8457 #endif
8458 #ifdef TARGET_NR_lseek
8459     case TARGET_NR_lseek:
8460         return get_errno(lseek(arg1, arg2, arg3));
8461 #endif
8462 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8463     /* Alpha specific */
8464     case TARGET_NR_getxpid:
8465         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8466         return get_errno(getpid());
8467 #endif
8468 #ifdef TARGET_NR_getpid
8469     case TARGET_NR_getpid:
8470         return get_errno(getpid());
8471 #endif
8472     case TARGET_NR_mount:
8473         {
8474             /* need to look at the data field */
8475             void *p2, *p3;
8476 
8477             if (arg1) {
8478                 p = lock_user_string(arg1);
8479                 if (!p) {
8480                     return -TARGET_EFAULT;
8481                 }
8482             } else {
8483                 p = NULL;
8484             }
8485 
8486             p2 = lock_user_string(arg2);
8487             if (!p2) {
8488                 if (arg1) {
8489                     unlock_user(p, arg1, 0);
8490                 }
8491                 return -TARGET_EFAULT;
8492             }
8493 
8494             if (arg3) {
8495                 p3 = lock_user_string(arg3);
8496                 if (!p3) {
8497                     if (arg1) {
8498                         unlock_user(p, arg1, 0);
8499                     }
8500                     unlock_user(p2, arg2, 0);
8501                     return -TARGET_EFAULT;
8502                 }
8503             } else {
8504                 p3 = NULL;
8505             }
8506 
8507             /* FIXME - arg5 should be locked, but it isn't clear how to
8508              * do that since it's not guaranteed to be a NULL-terminated
8509              * string.
8510              */
8511             if (!arg5) {
8512                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8513             } else {
8514                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8515             }
8516             ret = get_errno(ret);
8517 
8518             if (arg1) {
8519                 unlock_user(p, arg1, 0);
8520             }
8521             unlock_user(p2, arg2, 0);
8522             if (arg3) {
8523                 unlock_user(p3, arg3, 0);
8524             }
8525         }
8526         return ret;
8527 #ifdef TARGET_NR_umount
8528     case TARGET_NR_umount:
8529         if (!(p = lock_user_string(arg1)))
8530             return -TARGET_EFAULT;
8531         ret = get_errno(umount(p));
8532         unlock_user(p, arg1, 0);
8533         return ret;
8534 #endif
8535 #ifdef TARGET_NR_stime /* not on alpha */
8536     case TARGET_NR_stime:
8537         {
8538             time_t host_time;
8539             if (get_user_sal(host_time, arg1))
8540                 return -TARGET_EFAULT;
8541             return get_errno(stime(&host_time));
8542         }
8543 #endif
8544     case TARGET_NR_ptrace:
8545         goto unimplemented;
8546 #ifdef TARGET_NR_alarm /* not on alpha */
8547     case TARGET_NR_alarm:
8548         return alarm(arg1);
8549 #endif
8550 #ifdef TARGET_NR_oldfstat
8551     case TARGET_NR_oldfstat:
8552         goto unimplemented;
8553 #endif
8554 #ifdef TARGET_NR_pause /* not on alpha */
8555     case TARGET_NR_pause:
8556         if (!block_signals()) {
8557             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8558         }
8559         return -TARGET_EINTR;
8560 #endif
8561 #ifdef TARGET_NR_utime
8562     case TARGET_NR_utime:
8563         {
8564             struct utimbuf tbuf, *host_tbuf;
8565             struct target_utimbuf *target_tbuf;
8566             if (arg2) {
8567                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8568                     return -TARGET_EFAULT;
8569                 tbuf.actime = tswapal(target_tbuf->actime);
8570                 tbuf.modtime = tswapal(target_tbuf->modtime);
8571                 unlock_user_struct(target_tbuf, arg2, 0);
8572                 host_tbuf = &tbuf;
8573             } else {
8574                 host_tbuf = NULL;
8575             }
8576             if (!(p = lock_user_string(arg1)))
8577                 return -TARGET_EFAULT;
8578             ret = get_errno(utime(p, host_tbuf));
8579             unlock_user(p, arg1, 0);
8580         }
8581         return ret;
8582 #endif
8583 #ifdef TARGET_NR_utimes
8584     case TARGET_NR_utimes:
8585         {
8586             struct timeval *tvp, tv[2];
8587             if (arg2) {
8588                 if (copy_from_user_timeval(&tv[0], arg2)
8589                     || copy_from_user_timeval(&tv[1],
8590                                               arg2 + sizeof(struct target_timeval)))
8591                     return -TARGET_EFAULT;
8592                 tvp = tv;
8593             } else {
8594                 tvp = NULL;
8595             }
8596             if (!(p = lock_user_string(arg1)))
8597                 return -TARGET_EFAULT;
8598             ret = get_errno(utimes(p, tvp));
8599             unlock_user(p, arg1, 0);
8600         }
8601         return ret;
8602 #endif
8603 #if defined(TARGET_NR_futimesat)
8604     case TARGET_NR_futimesat:
8605         {
8606             struct timeval *tvp, tv[2];
8607             if (arg3) {
8608                 if (copy_from_user_timeval(&tv[0], arg3)
8609                     || copy_from_user_timeval(&tv[1],
8610                                               arg3 + sizeof(struct target_timeval)))
8611                     return -TARGET_EFAULT;
8612                 tvp = tv;
8613             } else {
8614                 tvp = NULL;
8615             }
8616             if (!(p = lock_user_string(arg2))) {
8617                 return -TARGET_EFAULT;
8618             }
8619             ret = get_errno(futimesat(arg1, path(p), tvp));
8620             unlock_user(p, arg2, 0);
8621         }
8622         return ret;
8623 #endif
8624 #ifdef TARGET_NR_stty
8625     case TARGET_NR_stty:
8626         goto unimplemented;
8627 #endif
8628 #ifdef TARGET_NR_gtty
8629     case TARGET_NR_gtty:
8630         goto unimplemented;
8631 #endif
8632 #ifdef TARGET_NR_access
8633     case TARGET_NR_access:
8634         if (!(p = lock_user_string(arg1))) {
8635             return -TARGET_EFAULT;
8636         }
8637         ret = get_errno(access(path(p), arg2));
8638         unlock_user(p, arg1, 0);
8639         return ret;
8640 #endif
8641 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8642     case TARGET_NR_faccessat:
8643         if (!(p = lock_user_string(arg2))) {
8644             return -TARGET_EFAULT;
8645         }
8646         ret = get_errno(faccessat(arg1, p, arg3, 0));
8647         unlock_user(p, arg2, 0);
8648         return ret;
8649 #endif
8650 #ifdef TARGET_NR_nice /* not on alpha */
8651     case TARGET_NR_nice:
8652         return get_errno(nice(arg1));
8653 #endif
8654 #ifdef TARGET_NR_ftime
8655     case TARGET_NR_ftime:
8656         goto unimplemented;
8657 #endif
8658     case TARGET_NR_sync:
8659         sync();
8660         return 0;
8661 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8662     case TARGET_NR_syncfs:
8663         return get_errno(syncfs(arg1));
8664 #endif
8665     case TARGET_NR_kill:
8666         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8667 #ifdef TARGET_NR_rename
8668     case TARGET_NR_rename:
8669         {
8670             void *p2;
8671             p = lock_user_string(arg1);
8672             p2 = lock_user_string(arg2);
8673             if (!p || !p2)
8674                 ret = -TARGET_EFAULT;
8675             else
8676                 ret = get_errno(rename(p, p2));
8677             unlock_user(p2, arg2, 0);
8678             unlock_user(p, arg1, 0);
8679         }
8680         return ret;
8681 #endif
8682 #if defined(TARGET_NR_renameat)
8683     case TARGET_NR_renameat:
8684         {
8685             void *p2;
8686             p  = lock_user_string(arg2);
8687             p2 = lock_user_string(arg4);
8688             if (!p || !p2)
8689                 ret = -TARGET_EFAULT;
8690             else
8691                 ret = get_errno(renameat(arg1, p, arg3, p2));
8692             unlock_user(p2, arg4, 0);
8693             unlock_user(p, arg2, 0);
8694         }
8695         return ret;
8696 #endif
8697 #if defined(TARGET_NR_renameat2)
8698     case TARGET_NR_renameat2:
8699         {
8700             void *p2;
8701             p  = lock_user_string(arg2);
8702             p2 = lock_user_string(arg4);
8703             if (!p || !p2) {
8704                 ret = -TARGET_EFAULT;
8705             } else {
8706                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8707             }
8708             unlock_user(p2, arg4, 0);
8709             unlock_user(p, arg2, 0);
8710         }
8711         return ret;
8712 #endif
8713 #ifdef TARGET_NR_mkdir
8714     case TARGET_NR_mkdir:
8715         if (!(p = lock_user_string(arg1)))
8716             return -TARGET_EFAULT;
8717         ret = get_errno(mkdir(p, arg2));
8718         unlock_user(p, arg1, 0);
8719         return ret;
8720 #endif
8721 #if defined(TARGET_NR_mkdirat)
8722     case TARGET_NR_mkdirat:
8723         if (!(p = lock_user_string(arg2)))
8724             return -TARGET_EFAULT;
8725         ret = get_errno(mkdirat(arg1, p, arg3));
8726         unlock_user(p, arg2, 0);
8727         return ret;
8728 #endif
8729 #ifdef TARGET_NR_rmdir
8730     case TARGET_NR_rmdir:
8731         if (!(p = lock_user_string(arg1)))
8732             return -TARGET_EFAULT;
8733         ret = get_errno(rmdir(p));
8734         unlock_user(p, arg1, 0);
8735         return ret;
8736 #endif
8737     case TARGET_NR_dup:
8738         ret = get_errno(dup(arg1));
8739         if (ret >= 0) {
8740             fd_trans_dup(arg1, ret);
8741         }
8742         return ret;
8743 #ifdef TARGET_NR_pipe
8744     case TARGET_NR_pipe:
8745         return do_pipe(cpu_env, arg1, 0, 0);
8746 #endif
8747 #ifdef TARGET_NR_pipe2
8748     case TARGET_NR_pipe2:
8749         return do_pipe(cpu_env, arg1,
8750                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8751 #endif
8752     case TARGET_NR_times:
8753         {
8754             struct target_tms *tmsp;
8755             struct tms tms;
8756             ret = get_errno(times(&tms));
8757             if (arg1) {
8758                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8759                 if (!tmsp)
8760                     return -TARGET_EFAULT;
8761                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8762                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8763                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8764                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8765             }
8766             if (!is_error(ret))
8767                 ret = host_to_target_clock_t(ret);
8768         }
8769         return ret;
8770 #ifdef TARGET_NR_prof
8771     case TARGET_NR_prof:
8772         goto unimplemented;
8773 #endif
8774 #ifdef TARGET_NR_signal
8775     case TARGET_NR_signal:
8776         goto unimplemented;
8777 #endif
8778     case TARGET_NR_acct:
8779         if (arg1 == 0) {
8780             ret = get_errno(acct(NULL));
8781         } else {
8782             if (!(p = lock_user_string(arg1))) {
8783                 return -TARGET_EFAULT;
8784             }
8785             ret = get_errno(acct(path(p)));
8786             unlock_user(p, arg1, 0);
8787         }
8788         return ret;
8789 #ifdef TARGET_NR_umount2
8790     case TARGET_NR_umount2:
8791         if (!(p = lock_user_string(arg1)))
8792             return -TARGET_EFAULT;
8793         ret = get_errno(umount2(p, arg2));
8794         unlock_user(p, arg1, 0);
8795         return ret;
8796 #endif
8797 #ifdef TARGET_NR_lock
8798     case TARGET_NR_lock:
8799         goto unimplemented;
8800 #endif
8801     case TARGET_NR_ioctl:
8802         return do_ioctl(arg1, arg2, arg3);
8803 #ifdef TARGET_NR_fcntl
8804     case TARGET_NR_fcntl:
8805         return do_fcntl(arg1, arg2, arg3);
8806 #endif
8807 #ifdef TARGET_NR_mpx
8808     case TARGET_NR_mpx:
8809         goto unimplemented;
8810 #endif
8811     case TARGET_NR_setpgid:
8812         return get_errno(setpgid(arg1, arg2));
8813 #ifdef TARGET_NR_ulimit
8814     case TARGET_NR_ulimit:
8815         goto unimplemented;
8816 #endif
8817 #ifdef TARGET_NR_oldolduname
8818     case TARGET_NR_oldolduname:
8819         goto unimplemented;
8820 #endif
8821     case TARGET_NR_umask:
8822         return get_errno(umask(arg1));
8823     case TARGET_NR_chroot:
8824         if (!(p = lock_user_string(arg1)))
8825             return -TARGET_EFAULT;
8826         ret = get_errno(chroot(p));
8827         unlock_user(p, arg1, 0);
8828         return ret;
8829 #ifdef TARGET_NR_ustat
8830     case TARGET_NR_ustat:
8831         goto unimplemented;
8832 #endif
8833 #ifdef TARGET_NR_dup2
8834     case TARGET_NR_dup2:
8835         ret = get_errno(dup2(arg1, arg2));
8836         if (ret >= 0) {
8837             fd_trans_dup(arg1, arg2);
8838         }
8839         return ret;
8840 #endif
8841 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8842     case TARGET_NR_dup3:
8843     {
8844         int host_flags;
8845 
8846         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8847             return -EINVAL;
8848         }
8849         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8850         ret = get_errno(dup3(arg1, arg2, host_flags));
8851         if (ret >= 0) {
8852             fd_trans_dup(arg1, arg2);
8853         }
8854         return ret;
8855     }
8856 #endif
8857 #ifdef TARGET_NR_getppid /* not on alpha */
8858     case TARGET_NR_getppid:
8859         return get_errno(getppid());
8860 #endif
8861 #ifdef TARGET_NR_getpgrp
8862     case TARGET_NR_getpgrp:
8863         return get_errno(getpgrp());
8864 #endif
8865     case TARGET_NR_setsid:
8866         return get_errno(setsid());
8867 #ifdef TARGET_NR_sigaction
8868     case TARGET_NR_sigaction:
8869         {
8870 #if defined(TARGET_ALPHA)
8871             struct target_sigaction act, oact, *pact = 0;
8872             struct target_old_sigaction *old_act;
8873             if (arg2) {
8874                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8875                     return -TARGET_EFAULT;
8876                 act._sa_handler = old_act->_sa_handler;
8877                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8878                 act.sa_flags = old_act->sa_flags;
8879                 act.sa_restorer = 0;
8880                 unlock_user_struct(old_act, arg2, 0);
8881                 pact = &act;
8882             }
8883             ret = get_errno(do_sigaction(arg1, pact, &oact));
8884             if (!is_error(ret) && arg3) {
8885                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8886                     return -TARGET_EFAULT;
8887                 old_act->_sa_handler = oact._sa_handler;
8888                 old_act->sa_mask = oact.sa_mask.sig[0];
8889                 old_act->sa_flags = oact.sa_flags;
8890                 unlock_user_struct(old_act, arg3, 1);
8891             }
8892 #elif defined(TARGET_MIPS)
8893 	    struct target_sigaction act, oact, *pact, *old_act;
8894 
8895 	    if (arg2) {
8896                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8897                     return -TARGET_EFAULT;
8898 		act._sa_handler = old_act->_sa_handler;
8899 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8900 		act.sa_flags = old_act->sa_flags;
8901 		unlock_user_struct(old_act, arg2, 0);
8902 		pact = &act;
8903 	    } else {
8904 		pact = NULL;
8905 	    }
8906 
8907 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8908 
8909 	    if (!is_error(ret) && arg3) {
8910                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8911                     return -TARGET_EFAULT;
8912 		old_act->_sa_handler = oact._sa_handler;
8913 		old_act->sa_flags = oact.sa_flags;
8914 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8915 		old_act->sa_mask.sig[1] = 0;
8916 		old_act->sa_mask.sig[2] = 0;
8917 		old_act->sa_mask.sig[3] = 0;
8918 		unlock_user_struct(old_act, arg3, 1);
8919 	    }
8920 #else
8921             struct target_old_sigaction *old_act;
8922             struct target_sigaction act, oact, *pact;
8923             if (arg2) {
8924                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8925                     return -TARGET_EFAULT;
8926                 act._sa_handler = old_act->_sa_handler;
8927                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8928                 act.sa_flags = old_act->sa_flags;
8929                 act.sa_restorer = old_act->sa_restorer;
8930 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8931                 act.ka_restorer = 0;
8932 #endif
8933                 unlock_user_struct(old_act, arg2, 0);
8934                 pact = &act;
8935             } else {
8936                 pact = NULL;
8937             }
8938             ret = get_errno(do_sigaction(arg1, pact, &oact));
8939             if (!is_error(ret) && arg3) {
8940                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8941                     return -TARGET_EFAULT;
8942                 old_act->_sa_handler = oact._sa_handler;
8943                 old_act->sa_mask = oact.sa_mask.sig[0];
8944                 old_act->sa_flags = oact.sa_flags;
8945                 old_act->sa_restorer = oact.sa_restorer;
8946                 unlock_user_struct(old_act, arg3, 1);
8947             }
8948 #endif
8949         }
8950         return ret;
8951 #endif
8952     case TARGET_NR_rt_sigaction:
8953         {
8954 #if defined(TARGET_ALPHA)
8955             /* For Alpha and SPARC this is a 5 argument syscall, with
8956              * a 'restorer' parameter which must be copied into the
8957              * sa_restorer field of the sigaction struct.
8958              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8959              * and arg5 is the sigsetsize.
8960              * Alpha also has a separate rt_sigaction struct that it uses
8961              * here; SPARC uses the usual sigaction struct.
8962              */
8963             struct target_rt_sigaction *rt_act;
8964             struct target_sigaction act, oact, *pact = 0;
8965 
8966             if (arg4 != sizeof(target_sigset_t)) {
8967                 return -TARGET_EINVAL;
8968             }
8969             if (arg2) {
8970                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8971                     return -TARGET_EFAULT;
8972                 act._sa_handler = rt_act->_sa_handler;
8973                 act.sa_mask = rt_act->sa_mask;
8974                 act.sa_flags = rt_act->sa_flags;
8975                 act.sa_restorer = arg5;
8976                 unlock_user_struct(rt_act, arg2, 0);
8977                 pact = &act;
8978             }
8979             ret = get_errno(do_sigaction(arg1, pact, &oact));
8980             if (!is_error(ret) && arg3) {
8981                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8982                     return -TARGET_EFAULT;
8983                 rt_act->_sa_handler = oact._sa_handler;
8984                 rt_act->sa_mask = oact.sa_mask;
8985                 rt_act->sa_flags = oact.sa_flags;
8986                 unlock_user_struct(rt_act, arg3, 1);
8987             }
8988 #else
8989 #ifdef TARGET_SPARC
8990             target_ulong restorer = arg4;
8991             target_ulong sigsetsize = arg5;
8992 #else
8993             target_ulong sigsetsize = arg4;
8994 #endif
8995             struct target_sigaction *act;
8996             struct target_sigaction *oact;
8997 
8998             if (sigsetsize != sizeof(target_sigset_t)) {
8999                 return -TARGET_EINVAL;
9000             }
9001             if (arg2) {
9002                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9003                     return -TARGET_EFAULT;
9004                 }
9005 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9006                 act->ka_restorer = restorer;
9007 #endif
9008             } else {
9009                 act = NULL;
9010             }
9011             if (arg3) {
9012                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9013                     ret = -TARGET_EFAULT;
9014                     goto rt_sigaction_fail;
9015                 }
9016             } else
9017                 oact = NULL;
9018             ret = get_errno(do_sigaction(arg1, act, oact));
9019 	rt_sigaction_fail:
9020             if (act)
9021                 unlock_user_struct(act, arg2, 0);
9022             if (oact)
9023                 unlock_user_struct(oact, arg3, 1);
9024 #endif
9025         }
9026         return ret;
9027 #ifdef TARGET_NR_sgetmask /* not on alpha */
9028     case TARGET_NR_sgetmask:
9029         {
9030             sigset_t cur_set;
9031             abi_ulong target_set;
9032             ret = do_sigprocmask(0, NULL, &cur_set);
9033             if (!ret) {
9034                 host_to_target_old_sigset(&target_set, &cur_set);
9035                 ret = target_set;
9036             }
9037         }
9038         return ret;
9039 #endif
9040 #ifdef TARGET_NR_ssetmask /* not on alpha */
9041     case TARGET_NR_ssetmask:
9042         {
9043             sigset_t set, oset;
9044             abi_ulong target_set = arg1;
9045             target_to_host_old_sigset(&set, &target_set);
9046             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9047             if (!ret) {
9048                 host_to_target_old_sigset(&target_set, &oset);
9049                 ret = target_set;
9050             }
9051         }
9052         return ret;
9053 #endif
9054 #ifdef TARGET_NR_sigprocmask
9055     case TARGET_NR_sigprocmask:
9056         {
9057 #if defined(TARGET_ALPHA)
9058             sigset_t set, oldset;
9059             abi_ulong mask;
9060             int how;
9061 
9062             switch (arg1) {
9063             case TARGET_SIG_BLOCK:
9064                 how = SIG_BLOCK;
9065                 break;
9066             case TARGET_SIG_UNBLOCK:
9067                 how = SIG_UNBLOCK;
9068                 break;
9069             case TARGET_SIG_SETMASK:
9070                 how = SIG_SETMASK;
9071                 break;
9072             default:
9073                 ret = -TARGET_EINVAL;
9074                 goto fail;
9075             }
9076             mask = arg2;
9077             target_to_host_old_sigset(&set, &mask);
9078 
9079             ret = do_sigprocmask(how, &set, &oldset);
9080             if (!is_error(ret)) {
9081                 host_to_target_old_sigset(&mask, &oldset);
9082                 ret = mask;
9083                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9084             }
9085 #else
9086             sigset_t set, oldset, *set_ptr;
9087             int how;
9088 
9089             if (arg2) {
9090                 switch (arg1) {
9091                 case TARGET_SIG_BLOCK:
9092                     how = SIG_BLOCK;
9093                     break;
9094                 case TARGET_SIG_UNBLOCK:
9095                     how = SIG_UNBLOCK;
9096                     break;
9097                 case TARGET_SIG_SETMASK:
9098                     how = SIG_SETMASK;
9099                     break;
9100                 default:
9101                     ret = -TARGET_EINVAL;
9102                     goto fail;
9103                 }
9104                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9105                     return -TARGET_EFAULT;
9106                 target_to_host_old_sigset(&set, p);
9107                 unlock_user(p, arg2, 0);
9108                 set_ptr = &set;
9109             } else {
9110                 how = 0;
9111                 set_ptr = NULL;
9112             }
9113             ret = do_sigprocmask(how, set_ptr, &oldset);
9114             if (!is_error(ret) && arg3) {
9115                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9116                     return -TARGET_EFAULT;
9117                 host_to_target_old_sigset(p, &oldset);
9118                 unlock_user(p, arg3, sizeof(target_sigset_t));
9119             }
9120 #endif
9121         }
9122         return ret;
9123 #endif
9124     case TARGET_NR_rt_sigprocmask:
9125         {
9126             int how = arg1;
9127             sigset_t set, oldset, *set_ptr;
9128 
9129             if (arg4 != sizeof(target_sigset_t)) {
9130                 return -TARGET_EINVAL;
9131             }
9132 
9133             if (arg2) {
9134                 switch(how) {
9135                 case TARGET_SIG_BLOCK:
9136                     how = SIG_BLOCK;
9137                     break;
9138                 case TARGET_SIG_UNBLOCK:
9139                     how = SIG_UNBLOCK;
9140                     break;
9141                 case TARGET_SIG_SETMASK:
9142                     how = SIG_SETMASK;
9143                     break;
9144                 default:
9145                     ret = -TARGET_EINVAL;
9146                     goto fail;
9147                 }
9148                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9149                     return -TARGET_EFAULT;
9150                 target_to_host_sigset(&set, p);
9151                 unlock_user(p, arg2, 0);
9152                 set_ptr = &set;
9153             } else {
9154                 how = 0;
9155                 set_ptr = NULL;
9156             }
9157             ret = do_sigprocmask(how, set_ptr, &oldset);
9158             if (!is_error(ret) && arg3) {
9159                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9160                     return -TARGET_EFAULT;
9161                 host_to_target_sigset(p, &oldset);
9162                 unlock_user(p, arg3, sizeof(target_sigset_t));
9163             }
9164         }
9165         return ret;
9166 #ifdef TARGET_NR_sigpending
9167     case TARGET_NR_sigpending:
9168         {
9169             sigset_t set;
9170             ret = get_errno(sigpending(&set));
9171             if (!is_error(ret)) {
9172                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9173                     return -TARGET_EFAULT;
9174                 host_to_target_old_sigset(p, &set);
9175                 unlock_user(p, arg1, sizeof(target_sigset_t));
9176             }
9177         }
9178         return ret;
9179 #endif
9180     case TARGET_NR_rt_sigpending:
9181         {
9182             sigset_t set;
9183 
9184             /* Yes, this check is >, not != like most. We follow the kernel's
9185              * logic and it does it like this because it implements
9186              * NR_sigpending through the same code path, and in that case
9187              * the old_sigset_t is smaller in size.
9188              */
9189             if (arg2 > sizeof(target_sigset_t)) {
9190                 return -TARGET_EINVAL;
9191             }
9192 
9193             ret = get_errno(sigpending(&set));
9194             if (!is_error(ret)) {
9195                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9196                     return -TARGET_EFAULT;
9197                 host_to_target_sigset(p, &set);
9198                 unlock_user(p, arg1, sizeof(target_sigset_t));
9199             }
9200         }
9201         return ret;
9202 #ifdef TARGET_NR_sigsuspend
9203     case TARGET_NR_sigsuspend:
9204         {
9205             TaskState *ts = cpu->opaque;
9206 #if defined(TARGET_ALPHA)
9207             abi_ulong mask = arg1;
9208             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9209 #else
9210             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9211                 return -TARGET_EFAULT;
9212             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9213             unlock_user(p, arg1, 0);
9214 #endif
9215             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9216                                                SIGSET_T_SIZE));
9217             if (ret != -TARGET_ERESTARTSYS) {
9218                 ts->in_sigsuspend = 1;
9219             }
9220         }
9221         return ret;
9222 #endif
9223     case TARGET_NR_rt_sigsuspend:
9224         {
9225             TaskState *ts = cpu->opaque;
9226 
9227             if (arg2 != sizeof(target_sigset_t)) {
9228                 return -TARGET_EINVAL;
9229             }
9230             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9231                 return -TARGET_EFAULT;
9232             target_to_host_sigset(&ts->sigsuspend_mask, p);
9233             unlock_user(p, arg1, 0);
9234             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9235                                                SIGSET_T_SIZE));
9236             if (ret != -TARGET_ERESTARTSYS) {
9237                 ts->in_sigsuspend = 1;
9238             }
9239         }
9240         return ret;
9241     case TARGET_NR_rt_sigtimedwait:
9242         {
9243             sigset_t set;
9244             struct timespec uts, *puts;
9245             siginfo_t uinfo;
9246 
9247             if (arg4 != sizeof(target_sigset_t)) {
9248                 return -TARGET_EINVAL;
9249             }
9250 
9251             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9252                 return -TARGET_EFAULT;
9253             target_to_host_sigset(&set, p);
9254             unlock_user(p, arg1, 0);
9255             if (arg3) {
9256                 puts = &uts;
9257                 target_to_host_timespec(puts, arg3);
9258             } else {
9259                 puts = NULL;
9260             }
9261             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9262                                                  SIGSET_T_SIZE));
9263             if (!is_error(ret)) {
9264                 if (arg2) {
9265                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9266                                   0);
9267                     if (!p) {
9268                         return -TARGET_EFAULT;
9269                     }
9270                     host_to_target_siginfo(p, &uinfo);
9271                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9272                 }
9273                 ret = host_to_target_signal(ret);
9274             }
9275         }
9276         return ret;
9277     case TARGET_NR_rt_sigqueueinfo:
9278         {
9279             siginfo_t uinfo;
9280 
9281             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9282             if (!p) {
9283                 return -TARGET_EFAULT;
9284             }
9285             target_to_host_siginfo(&uinfo, p);
9286             unlock_user(p, arg3, 0);
9287             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9288         }
9289         return ret;
9290     case TARGET_NR_rt_tgsigqueueinfo:
9291         {
9292             siginfo_t uinfo;
9293 
9294             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9295             if (!p) {
9296                 return -TARGET_EFAULT;
9297             }
9298             target_to_host_siginfo(&uinfo, p);
9299             unlock_user(p, arg4, 0);
9300             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9301         }
9302         return ret;
9303 #ifdef TARGET_NR_sigreturn
9304     case TARGET_NR_sigreturn:
9305         if (block_signals()) {
9306             return -TARGET_ERESTARTSYS;
9307         }
9308         return do_sigreturn(cpu_env);
9309 #endif
9310     case TARGET_NR_rt_sigreturn:
9311         if (block_signals()) {
9312             return -TARGET_ERESTARTSYS;
9313         }
9314         return do_rt_sigreturn(cpu_env);
9315     case TARGET_NR_sethostname:
9316         if (!(p = lock_user_string(arg1)))
9317             return -TARGET_EFAULT;
9318         ret = get_errno(sethostname(p, arg2));
9319         unlock_user(p, arg1, 0);
9320         return ret;
9321 #ifdef TARGET_NR_setrlimit
9322     case TARGET_NR_setrlimit:
9323         {
9324             int resource = target_to_host_resource(arg1);
9325             struct target_rlimit *target_rlim;
9326             struct rlimit rlim;
9327             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9328                 return -TARGET_EFAULT;
9329             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9330             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9331             unlock_user_struct(target_rlim, arg2, 0);
9332             return get_errno(setrlimit(resource, &rlim));
9333         }
9334 #endif
9335 #ifdef TARGET_NR_getrlimit
9336     case TARGET_NR_getrlimit:
9337         {
9338             int resource = target_to_host_resource(arg1);
9339             struct target_rlimit *target_rlim;
9340             struct rlimit rlim;
9341 
9342             ret = get_errno(getrlimit(resource, &rlim));
9343             if (!is_error(ret)) {
9344                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9345                     return -TARGET_EFAULT;
9346                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9347                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9348                 unlock_user_struct(target_rlim, arg2, 1);
9349             }
9350         }
9351         return ret;
9352 #endif
9353     case TARGET_NR_getrusage:
9354         {
9355             struct rusage rusage;
9356             ret = get_errno(getrusage(arg1, &rusage));
9357             if (!is_error(ret)) {
9358                 ret = host_to_target_rusage(arg2, &rusage);
9359             }
9360         }
9361         return ret;
9362     case TARGET_NR_gettimeofday:
9363         {
9364             struct timeval tv;
9365             ret = get_errno(gettimeofday(&tv, NULL));
9366             if (!is_error(ret)) {
9367                 if (copy_to_user_timeval(arg1, &tv))
9368                     return -TARGET_EFAULT;
9369             }
9370         }
9371         return ret;
9372     case TARGET_NR_settimeofday:
9373         {
9374             struct timeval tv, *ptv = NULL;
9375             struct timezone tz, *ptz = NULL;
9376 
9377             if (arg1) {
9378                 if (copy_from_user_timeval(&tv, arg1)) {
9379                     return -TARGET_EFAULT;
9380                 }
9381                 ptv = &tv;
9382             }
9383 
9384             if (arg2) {
9385                 if (copy_from_user_timezone(&tz, arg2)) {
9386                     return -TARGET_EFAULT;
9387                 }
9388                 ptz = &tz;
9389             }
9390 
9391             return get_errno(settimeofday(ptv, ptz));
9392         }
9393 #if defined(TARGET_NR_select)
9394     case TARGET_NR_select:
9395 #if defined(TARGET_WANT_NI_OLD_SELECT)
9396         /* some architectures used to have old_select here
9397          * but now ENOSYS it.
9398          */
9399         ret = -TARGET_ENOSYS;
9400 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9401         ret = do_old_select(arg1);
9402 #else
9403         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9404 #endif
9405         return ret;
9406 #endif
9407 #ifdef TARGET_NR_pselect6
9408     case TARGET_NR_pselect6:
9409         {
9410             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9411             fd_set rfds, wfds, efds;
9412             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9413             struct timespec ts, *ts_ptr;
9414 
9415             /*
9416              * The 6th arg is actually two args smashed together,
9417              * so we cannot use the C library.
9418              */
9419             sigset_t set;
9420             struct {
9421                 sigset_t *set;
9422                 size_t size;
9423             } sig, *sig_ptr;
9424 
9425             abi_ulong arg_sigset, arg_sigsize, *arg7;
9426             target_sigset_t *target_sigset;
9427 
9428             n = arg1;
9429             rfd_addr = arg2;
9430             wfd_addr = arg3;
9431             efd_addr = arg4;
9432             ts_addr = arg5;
9433 
9434             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9435             if (ret) {
9436                 goto fail;
9437             }
9438             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9439             if (ret) {
9440                 goto fail;
9441             }
9442             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9443             if (ret) {
9444                 goto fail;
9445             }
9446 
9447             /*
9448              * This takes a timespec, and not a timeval, so we cannot
9449              * use the do_select() helper ...
9450              */
9451             if (ts_addr) {
9452                 if (target_to_host_timespec(&ts, ts_addr)) {
9453                     return -TARGET_EFAULT;
9454                 }
9455                 ts_ptr = &ts;
9456             } else {
9457                 ts_ptr = NULL;
9458             }
9459 
9460             /* Extract the two packed args for the sigset */
9461             if (arg6) {
9462                 sig_ptr = &sig;
9463                 sig.size = SIGSET_T_SIZE;
9464 
9465                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9466                 if (!arg7) {
9467                     return -TARGET_EFAULT;
9468                 }
9469                 arg_sigset = tswapal(arg7[0]);
9470                 arg_sigsize = tswapal(arg7[1]);
9471                 unlock_user(arg7, arg6, 0);
9472 
9473                 if (arg_sigset) {
9474                     sig.set = &set;
9475                     if (arg_sigsize != sizeof(*target_sigset)) {
9476                         /* Like the kernel, we enforce correct size sigsets */
9477                         ret = -TARGET_EINVAL;
9478                         goto fail;
9479                     }
9480                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9481                                               sizeof(*target_sigset), 1);
9482                     if (!target_sigset) {
9483                         return -TARGET_EFAULT;
9484                     }
9485                     target_to_host_sigset(&set, target_sigset);
9486                     unlock_user(target_sigset, arg_sigset, 0);
9487                 } else {
9488                     sig.set = NULL;
9489                 }
9490             } else {
9491                 sig_ptr = NULL;
9492             }
9493 
9494             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9495                                           ts_ptr, sig_ptr));
9496 
9497             if (!is_error(ret)) {
9498                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9499                     return -TARGET_EFAULT;
9500                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9501                     return -TARGET_EFAULT;
9502                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9503                     return -TARGET_EFAULT;
9504 
9505                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9506                     return -TARGET_EFAULT;
9507             }
9508         }
9509         return ret;
9510 #endif
9511 #ifdef TARGET_NR_symlink
9512     case TARGET_NR_symlink:
9513         {
9514             void *p2;
9515             p = lock_user_string(arg1);
9516             p2 = lock_user_string(arg2);
9517             if (!p || !p2)
9518                 ret = -TARGET_EFAULT;
9519             else
9520                 ret = get_errno(symlink(p, p2));
9521             unlock_user(p2, arg2, 0);
9522             unlock_user(p, arg1, 0);
9523         }
9524         return ret;
9525 #endif
9526 #if defined(TARGET_NR_symlinkat)
9527     case TARGET_NR_symlinkat:
9528         {
9529             void *p2;
9530             p  = lock_user_string(arg1);
9531             p2 = lock_user_string(arg3);
9532             if (!p || !p2)
9533                 ret = -TARGET_EFAULT;
9534             else
9535                 ret = get_errno(symlinkat(p, arg2, p2));
9536             unlock_user(p2, arg3, 0);
9537             unlock_user(p, arg1, 0);
9538         }
9539         return ret;
9540 #endif
9541 #ifdef TARGET_NR_oldlstat
9542     case TARGET_NR_oldlstat:
9543         goto unimplemented;
9544 #endif
9545 #ifdef TARGET_NR_readlink
9546     case TARGET_NR_readlink:
9547         {
9548             void *p2;
9549             p = lock_user_string(arg1);
9550             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9551             if (!p || !p2) {
9552                 ret = -TARGET_EFAULT;
9553             } else if (!arg3) {
9554                 /* Short circuit this for the magic exe check. */
9555                 ret = -TARGET_EINVAL;
9556             } else if (is_proc_myself((const char *)p, "exe")) {
9557                 char real[PATH_MAX], *temp;
9558                 temp = realpath(exec_path, real);
9559                 /* Return value is # of bytes that we wrote to the buffer. */
9560                 if (temp == NULL) {
9561                     ret = get_errno(-1);
9562                 } else {
9563                     /* Don't worry about sign mismatch as earlier mapping
9564                      * logic would have thrown a bad address error. */
9565                     ret = MIN(strlen(real), arg3);
9566                     /* We cannot NUL terminate the string. */
9567                     memcpy(p2, real, ret);
9568                 }
9569             } else {
9570                 ret = get_errno(readlink(path(p), p2, arg3));
9571             }
9572             unlock_user(p2, arg2, ret);
9573             unlock_user(p, arg1, 0);
9574         }
9575         return ret;
9576 #endif
9577 #if defined(TARGET_NR_readlinkat)
9578     case TARGET_NR_readlinkat:
9579         {
9580             void *p2;
9581             p  = lock_user_string(arg2);
9582             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9583             if (!p || !p2) {
9584                 ret = -TARGET_EFAULT;
9585             } else if (is_proc_myself((const char *)p, "exe")) {
9586                 char real[PATH_MAX], *temp;
9587                 temp = realpath(exec_path, real);
9588                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9589                 snprintf((char *)p2, arg4, "%s", real);
9590             } else {
9591                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9592             }
9593             unlock_user(p2, arg3, ret);
9594             unlock_user(p, arg2, 0);
9595         }
9596         return ret;
9597 #endif
9598 #ifdef TARGET_NR_uselib
9599     case TARGET_NR_uselib:
9600         goto unimplemented;
9601 #endif
9602 #ifdef TARGET_NR_swapon
9603     case TARGET_NR_swapon:
9604         if (!(p = lock_user_string(arg1)))
9605             return -TARGET_EFAULT;
9606         ret = get_errno(swapon(p, arg2));
9607         unlock_user(p, arg1, 0);
9608         return ret;
9609 #endif
9610     case TARGET_NR_reboot:
9611         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9612            /* arg4 must be ignored in all other cases */
9613            p = lock_user_string(arg4);
9614            if (!p) {
9615                return -TARGET_EFAULT;
9616            }
9617            ret = get_errno(reboot(arg1, arg2, arg3, p));
9618            unlock_user(p, arg4, 0);
9619         } else {
9620            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9621         }
9622         return ret;
9623 #ifdef TARGET_NR_readdir
9624     case TARGET_NR_readdir:
9625         goto unimplemented;
9626 #endif
9627 #ifdef TARGET_NR_mmap
9628     case TARGET_NR_mmap:
9629 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9630     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9631     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9632     || defined(TARGET_S390X)
9633         {
9634             abi_ulong *v;
9635             abi_ulong v1, v2, v3, v4, v5, v6;
9636             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9637                 return -TARGET_EFAULT;
9638             v1 = tswapal(v[0]);
9639             v2 = tswapal(v[1]);
9640             v3 = tswapal(v[2]);
9641             v4 = tswapal(v[3]);
9642             v5 = tswapal(v[4]);
9643             v6 = tswapal(v[5]);
9644             unlock_user(v, arg1, 0);
9645             ret = get_errno(target_mmap(v1, v2, v3,
9646                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9647                                         v5, v6));
9648         }
9649 #else
9650         ret = get_errno(target_mmap(arg1, arg2, arg3,
9651                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9652                                     arg5,
9653                                     arg6));
9654 #endif
9655         return ret;
9656 #endif
9657 #ifdef TARGET_NR_mmap2
9658     case TARGET_NR_mmap2:
9659 #ifndef MMAP_SHIFT
9660 #define MMAP_SHIFT 12
9661 #endif
9662         ret = target_mmap(arg1, arg2, arg3,
9663                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9664                           arg5, arg6 << MMAP_SHIFT);
9665         return get_errno(ret);
9666 #endif
9667     case TARGET_NR_munmap:
9668         return get_errno(target_munmap(arg1, arg2));
9669     case TARGET_NR_mprotect:
9670         {
9671             TaskState *ts = cpu->opaque;
9672             /* Special hack to detect libc making the stack executable.  */
9673             if ((arg3 & PROT_GROWSDOWN)
9674                 && arg1 >= ts->info->stack_limit
9675                 && arg1 <= ts->info->start_stack) {
9676                 arg3 &= ~PROT_GROWSDOWN;
9677                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9678                 arg1 = ts->info->stack_limit;
9679             }
9680         }
9681         return get_errno(target_mprotect(arg1, arg2, arg3));
9682 #ifdef TARGET_NR_mremap
9683     case TARGET_NR_mremap:
9684         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9685 #endif
9686         /* ??? msync/mlock/munlock are broken for softmmu.  */
9687 #ifdef TARGET_NR_msync
9688     case TARGET_NR_msync:
9689         return get_errno(msync(g2h(arg1), arg2, arg3));
9690 #endif
9691 #ifdef TARGET_NR_mlock
9692     case TARGET_NR_mlock:
9693         return get_errno(mlock(g2h(arg1), arg2));
9694 #endif
9695 #ifdef TARGET_NR_munlock
9696     case TARGET_NR_munlock:
9697         return get_errno(munlock(g2h(arg1), arg2));
9698 #endif
9699 #ifdef TARGET_NR_mlockall
9700     case TARGET_NR_mlockall:
9701         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9702 #endif
9703 #ifdef TARGET_NR_munlockall
9704     case TARGET_NR_munlockall:
9705         return get_errno(munlockall());
9706 #endif
9707 #ifdef TARGET_NR_truncate
9708     case TARGET_NR_truncate:
9709         if (!(p = lock_user_string(arg1)))
9710             return -TARGET_EFAULT;
9711         ret = get_errno(truncate(p, arg2));
9712         unlock_user(p, arg1, 0);
9713         return ret;
9714 #endif
9715 #ifdef TARGET_NR_ftruncate
9716     case TARGET_NR_ftruncate:
9717         return get_errno(ftruncate(arg1, arg2));
9718 #endif
9719     case TARGET_NR_fchmod:
9720         return get_errno(fchmod(arg1, arg2));
9721 #if defined(TARGET_NR_fchmodat)
9722     case TARGET_NR_fchmodat:
9723         if (!(p = lock_user_string(arg2)))
9724             return -TARGET_EFAULT;
9725         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9726         unlock_user(p, arg2, 0);
9727         return ret;
9728 #endif
9729     case TARGET_NR_getpriority:
9730         /* Note that negative values are valid for getpriority, so we must
9731            differentiate based on errno settings.  */
9732         errno = 0;
9733         ret = getpriority(arg1, arg2);
9734         if (ret == -1 && errno != 0) {
9735             return -host_to_target_errno(errno);
9736         }
9737 #ifdef TARGET_ALPHA
9738         /* Return value is the unbiased priority.  Signal no error.  */
9739         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9740 #else
9741         /* Return value is a biased priority to avoid negative numbers.  */
9742         ret = 20 - ret;
9743 #endif
9744         return ret;
9745     case TARGET_NR_setpriority:
9746         return get_errno(setpriority(arg1, arg2, arg3));
9747 #ifdef TARGET_NR_profil
9748     case TARGET_NR_profil:
9749         goto unimplemented;
9750 #endif
9751 #ifdef TARGET_NR_statfs
9752     case TARGET_NR_statfs:
9753         if (!(p = lock_user_string(arg1))) {
9754             return -TARGET_EFAULT;
9755         }
9756         ret = get_errno(statfs(path(p), &stfs));
9757         unlock_user(p, arg1, 0);
9758     convert_statfs:
9759         if (!is_error(ret)) {
9760             struct target_statfs *target_stfs;
9761 
9762             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9763                 return -TARGET_EFAULT;
9764             __put_user(stfs.f_type, &target_stfs->f_type);
9765             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9766             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9767             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9768             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9769             __put_user(stfs.f_files, &target_stfs->f_files);
9770             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9771             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9772             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9773             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9774             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9775 #ifdef _STATFS_F_FLAGS
9776             __put_user(stfs.f_flags, &target_stfs->f_flags);
9777 #else
9778             __put_user(0, &target_stfs->f_flags);
9779 #endif
9780             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9781             unlock_user_struct(target_stfs, arg2, 1);
9782         }
9783         return ret;
9784 #endif
9785 #ifdef TARGET_NR_fstatfs
9786     case TARGET_NR_fstatfs:
9787         ret = get_errno(fstatfs(arg1, &stfs));
9788         goto convert_statfs;
9789 #endif
9790 #ifdef TARGET_NR_statfs64
9791     case TARGET_NR_statfs64:
9792         if (!(p = lock_user_string(arg1))) {
9793             return -TARGET_EFAULT;
9794         }
9795         ret = get_errno(statfs(path(p), &stfs));
9796         unlock_user(p, arg1, 0);
9797     convert_statfs64:
9798         if (!is_error(ret)) {
9799             struct target_statfs64 *target_stfs;
9800 
9801             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9802                 return -TARGET_EFAULT;
9803             __put_user(stfs.f_type, &target_stfs->f_type);
9804             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9805             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9806             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9807             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9808             __put_user(stfs.f_files, &target_stfs->f_files);
9809             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9810             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9811             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9812             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9813             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9814             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9815             unlock_user_struct(target_stfs, arg3, 1);
9816         }
9817         return ret;
9818     case TARGET_NR_fstatfs64:
9819         ret = get_errno(fstatfs(arg1, &stfs));
9820         goto convert_statfs64;
9821 #endif
9822 #ifdef TARGET_NR_ioperm
9823     case TARGET_NR_ioperm:
9824         goto unimplemented;
9825 #endif
9826 #ifdef TARGET_NR_socketcall
9827     case TARGET_NR_socketcall:
9828         return do_socketcall(arg1, arg2);
9829 #endif
9830 #ifdef TARGET_NR_accept
9831     case TARGET_NR_accept:
9832         return do_accept4(arg1, arg2, arg3, 0);
9833 #endif
9834 #ifdef TARGET_NR_accept4
9835     case TARGET_NR_accept4:
9836         return do_accept4(arg1, arg2, arg3, arg4);
9837 #endif
9838 #ifdef TARGET_NR_bind
9839     case TARGET_NR_bind:
9840         return do_bind(arg1, arg2, arg3);
9841 #endif
9842 #ifdef TARGET_NR_connect
9843     case TARGET_NR_connect:
9844         return do_connect(arg1, arg2, arg3);
9845 #endif
9846 #ifdef TARGET_NR_getpeername
9847     case TARGET_NR_getpeername:
9848         return do_getpeername(arg1, arg2, arg3);
9849 #endif
9850 #ifdef TARGET_NR_getsockname
9851     case TARGET_NR_getsockname:
9852         return do_getsockname(arg1, arg2, arg3);
9853 #endif
9854 #ifdef TARGET_NR_getsockopt
9855     case TARGET_NR_getsockopt:
9856         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9857 #endif
9858 #ifdef TARGET_NR_listen
9859     case TARGET_NR_listen:
9860         return get_errno(listen(arg1, arg2));
9861 #endif
9862 #ifdef TARGET_NR_recv
9863     case TARGET_NR_recv:
9864         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9865 #endif
9866 #ifdef TARGET_NR_recvfrom
9867     case TARGET_NR_recvfrom:
9868         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9869 #endif
9870 #ifdef TARGET_NR_recvmsg
9871     case TARGET_NR_recvmsg:
9872         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9873 #endif
9874 #ifdef TARGET_NR_send
9875     case TARGET_NR_send:
9876         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9877 #endif
9878 #ifdef TARGET_NR_sendmsg
9879     case TARGET_NR_sendmsg:
9880         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9881 #endif
9882 #ifdef TARGET_NR_sendmmsg
9883     case TARGET_NR_sendmmsg:
9884         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9885     case TARGET_NR_recvmmsg:
9886         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9887 #endif
9888 #ifdef TARGET_NR_sendto
9889     case TARGET_NR_sendto:
9890         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9891 #endif
9892 #ifdef TARGET_NR_shutdown
9893     case TARGET_NR_shutdown:
9894         return get_errno(shutdown(arg1, arg2));
9895 #endif
9896 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9897     case TARGET_NR_getrandom:
9898         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9899         if (!p) {
9900             return -TARGET_EFAULT;
9901         }
9902         ret = get_errno(getrandom(p, arg2, arg3));
9903         unlock_user(p, arg1, ret);
9904         return ret;
9905 #endif
9906 #ifdef TARGET_NR_socket
9907     case TARGET_NR_socket:
9908         return do_socket(arg1, arg2, arg3);
9909 #endif
9910 #ifdef TARGET_NR_socketpair
9911     case TARGET_NR_socketpair:
9912         return do_socketpair(arg1, arg2, arg3, arg4);
9913 #endif
9914 #ifdef TARGET_NR_setsockopt
9915     case TARGET_NR_setsockopt:
9916         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9917 #endif
9918 #if defined(TARGET_NR_syslog)
9919     case TARGET_NR_syslog:
9920         {
9921             int len = arg2;
9922 
9923             switch (arg1) {
9924             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9925             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9926             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9927             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9928             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9929             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9930             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9931             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9932                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9933             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9934             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9935             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9936                 {
9937                     ret = -TARGET_EINVAL;
9938                     if (len < 0) {
9939                         goto fail;
9940                     }
9941                     if (len == 0) {
9942                         return 0;
9943                     }
9944                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9945                     if (!p) {
9946                         ret = -TARGET_EFAULT;
9947                         goto fail;
9948                     }
9949                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9950                     unlock_user(p, arg2, arg3);
9951                 }
9952                 return ret;
9953             default:
9954                 return -TARGET_EINVAL;
9955             }
9956         }
9957         break;
9958 #endif
9959     case TARGET_NR_setitimer:
9960         {
9961             struct itimerval value, ovalue, *pvalue;
9962 
9963             if (arg2) {
9964                 pvalue = &value;
9965                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9966                     || copy_from_user_timeval(&pvalue->it_value,
9967                                               arg2 + sizeof(struct target_timeval)))
9968                     return -TARGET_EFAULT;
9969             } else {
9970                 pvalue = NULL;
9971             }
9972             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9973             if (!is_error(ret) && arg3) {
9974                 if (copy_to_user_timeval(arg3,
9975                                          &ovalue.it_interval)
9976                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9977                                             &ovalue.it_value))
9978                     return -TARGET_EFAULT;
9979             }
9980         }
9981         return ret;
9982     case TARGET_NR_getitimer:
9983         {
9984             struct itimerval value;
9985 
9986             ret = get_errno(getitimer(arg1, &value));
9987             if (!is_error(ret) && arg2) {
9988                 if (copy_to_user_timeval(arg2,
9989                                          &value.it_interval)
9990                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9991                                             &value.it_value))
9992                     return -TARGET_EFAULT;
9993             }
9994         }
9995         return ret;
9996 #ifdef TARGET_NR_stat
9997     case TARGET_NR_stat:
9998         if (!(p = lock_user_string(arg1))) {
9999             return -TARGET_EFAULT;
10000         }
10001         ret = get_errno(stat(path(p), &st));
10002         unlock_user(p, arg1, 0);
10003         goto do_stat;
10004 #endif
10005 #ifdef TARGET_NR_lstat
10006     case TARGET_NR_lstat:
10007         if (!(p = lock_user_string(arg1))) {
10008             return -TARGET_EFAULT;
10009         }
10010         ret = get_errno(lstat(path(p), &st));
10011         unlock_user(p, arg1, 0);
10012         goto do_stat;
10013 #endif
10014 #ifdef TARGET_NR_fstat
10015     case TARGET_NR_fstat:
10016         {
10017             ret = get_errno(fstat(arg1, &st));
10018 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10019         do_stat:
10020 #endif
10021             if (!is_error(ret)) {
10022                 struct target_stat *target_st;
10023 
10024                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10025                     return -TARGET_EFAULT;
10026                 memset(target_st, 0, sizeof(*target_st));
10027                 __put_user(st.st_dev, &target_st->st_dev);
10028                 __put_user(st.st_ino, &target_st->st_ino);
10029                 __put_user(st.st_mode, &target_st->st_mode);
10030                 __put_user(st.st_uid, &target_st->st_uid);
10031                 __put_user(st.st_gid, &target_st->st_gid);
10032                 __put_user(st.st_nlink, &target_st->st_nlink);
10033                 __put_user(st.st_rdev, &target_st->st_rdev);
10034                 __put_user(st.st_size, &target_st->st_size);
10035                 __put_user(st.st_blksize, &target_st->st_blksize);
10036                 __put_user(st.st_blocks, &target_st->st_blocks);
10037                 __put_user(st.st_atime, &target_st->target_st_atime);
10038                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10039                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10040                 unlock_user_struct(target_st, arg2, 1);
10041             }
10042         }
10043         return ret;
10044 #endif
10045 #ifdef TARGET_NR_olduname
10046     case TARGET_NR_olduname:
10047         goto unimplemented;
10048 #endif
10049 #ifdef TARGET_NR_iopl
10050     case TARGET_NR_iopl:
10051         goto unimplemented;
10052 #endif
10053     case TARGET_NR_vhangup:
10054         return get_errno(vhangup());
10055 #ifdef TARGET_NR_idle
10056     case TARGET_NR_idle:
10057         goto unimplemented;
10058 #endif
10059 #ifdef TARGET_NR_syscall
10060     case TARGET_NR_syscall:
10061         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10062                           arg6, arg7, arg8, 0);
10063 #endif
10064     case TARGET_NR_wait4:
10065         {
10066             int status;
10067             abi_long status_ptr = arg2;
10068             struct rusage rusage, *rusage_ptr;
10069             abi_ulong target_rusage = arg4;
10070             abi_long rusage_err;
10071             if (target_rusage)
10072                 rusage_ptr = &rusage;
10073             else
10074                 rusage_ptr = NULL;
10075             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10076             if (!is_error(ret)) {
10077                 if (status_ptr && ret) {
10078                     status = host_to_target_waitstatus(status);
10079                     if (put_user_s32(status, status_ptr))
10080                         return -TARGET_EFAULT;
10081                 }
10082                 if (target_rusage) {
10083                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10084                     if (rusage_err) {
10085                         ret = rusage_err;
10086                     }
10087                 }
10088             }
10089         }
10090         return ret;
10091 #ifdef TARGET_NR_swapoff
10092     case TARGET_NR_swapoff:
10093         if (!(p = lock_user_string(arg1)))
10094             return -TARGET_EFAULT;
10095         ret = get_errno(swapoff(p));
10096         unlock_user(p, arg1, 0);
10097         return ret;
10098 #endif
10099     case TARGET_NR_sysinfo:
10100         {
10101             struct target_sysinfo *target_value;
10102             struct sysinfo value;
10103             ret = get_errno(sysinfo(&value));
10104             if (!is_error(ret) && arg1)
10105             {
10106                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10107                     return -TARGET_EFAULT;
10108                 __put_user(value.uptime, &target_value->uptime);
10109                 __put_user(value.loads[0], &target_value->loads[0]);
10110                 __put_user(value.loads[1], &target_value->loads[1]);
10111                 __put_user(value.loads[2], &target_value->loads[2]);
10112                 __put_user(value.totalram, &target_value->totalram);
10113                 __put_user(value.freeram, &target_value->freeram);
10114                 __put_user(value.sharedram, &target_value->sharedram);
10115                 __put_user(value.bufferram, &target_value->bufferram);
10116                 __put_user(value.totalswap, &target_value->totalswap);
10117                 __put_user(value.freeswap, &target_value->freeswap);
10118                 __put_user(value.procs, &target_value->procs);
10119                 __put_user(value.totalhigh, &target_value->totalhigh);
10120                 __put_user(value.freehigh, &target_value->freehigh);
10121                 __put_user(value.mem_unit, &target_value->mem_unit);
10122                 unlock_user_struct(target_value, arg1, 1);
10123             }
10124         }
10125         return ret;
10126 #ifdef TARGET_NR_ipc
10127     case TARGET_NR_ipc:
10128         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10129 #endif
10130 #ifdef TARGET_NR_semget
10131     case TARGET_NR_semget:
10132         return get_errno(semget(arg1, arg2, arg3));
10133 #endif
10134 #ifdef TARGET_NR_semop
10135     case TARGET_NR_semop:
10136         return do_semop(arg1, arg2, arg3);
10137 #endif
10138 #ifdef TARGET_NR_semctl
10139     case TARGET_NR_semctl:
10140         return do_semctl(arg1, arg2, arg3, arg4);
10141 #endif
10142 #ifdef TARGET_NR_msgctl
10143     case TARGET_NR_msgctl:
10144         return do_msgctl(arg1, arg2, arg3);
10145 #endif
10146 #ifdef TARGET_NR_msgget
10147     case TARGET_NR_msgget:
10148         return get_errno(msgget(arg1, arg2));
10149 #endif
10150 #ifdef TARGET_NR_msgrcv
10151     case TARGET_NR_msgrcv:
10152         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10153 #endif
10154 #ifdef TARGET_NR_msgsnd
10155     case TARGET_NR_msgsnd:
10156         return do_msgsnd(arg1, arg2, arg3, arg4);
10157 #endif
10158 #ifdef TARGET_NR_shmget
10159     case TARGET_NR_shmget:
10160         return get_errno(shmget(arg1, arg2, arg3));
10161 #endif
10162 #ifdef TARGET_NR_shmctl
10163     case TARGET_NR_shmctl:
10164         return do_shmctl(arg1, arg2, arg3);
10165 #endif
10166 #ifdef TARGET_NR_shmat
10167     case TARGET_NR_shmat:
10168         return do_shmat(cpu_env, arg1, arg2, arg3);
10169 #endif
10170 #ifdef TARGET_NR_shmdt
10171     case TARGET_NR_shmdt:
10172         return do_shmdt(arg1);
10173 #endif
10174     case TARGET_NR_fsync:
10175         return get_errno(fsync(arg1));
10176     case TARGET_NR_clone:
10177         /* Linux manages to have three different orderings for its
10178          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10179          * match the kernel's CONFIG_CLONE_* settings.
10180          * Microblaze is further special in that it uses a sixth
10181          * implicit argument to clone for the TLS pointer.
10182          */
10183 #if defined(TARGET_MICROBLAZE)
10184         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10185 #elif defined(TARGET_CLONE_BACKWARDS)
10186         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10187 #elif defined(TARGET_CLONE_BACKWARDS2)
10188         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10189 #else
10190         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10191 #endif
10192         return ret;
10193 #ifdef __NR_exit_group
10194         /* new thread calls */
10195     case TARGET_NR_exit_group:
10196         preexit_cleanup(cpu_env, arg1);
10197         return get_errno(exit_group(arg1));
10198 #endif
10199     case TARGET_NR_setdomainname:
10200         if (!(p = lock_user_string(arg1)))
10201             return -TARGET_EFAULT;
10202         ret = get_errno(setdomainname(p, arg2));
10203         unlock_user(p, arg1, 0);
10204         return ret;
10205     case TARGET_NR_uname:
10206         /* no need to transcode because we use the linux syscall */
10207         {
10208             struct new_utsname * buf;
10209 
10210             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10211                 return -TARGET_EFAULT;
10212             ret = get_errno(sys_uname(buf));
10213             if (!is_error(ret)) {
10214                 /* Overwrite the native machine name with whatever is being
10215                    emulated. */
10216                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10217                           sizeof(buf->machine));
10218                 /* Allow the user to override the reported release.  */
10219                 if (qemu_uname_release && *qemu_uname_release) {
10220                     g_strlcpy(buf->release, qemu_uname_release,
10221                               sizeof(buf->release));
10222                 }
10223             }
10224             unlock_user_struct(buf, arg1, 1);
10225         }
10226         return ret;
10227 #ifdef TARGET_I386
10228     case TARGET_NR_modify_ldt:
10229         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10230 #if !defined(TARGET_X86_64)
10231     case TARGET_NR_vm86old:
10232         goto unimplemented;
10233     case TARGET_NR_vm86:
10234         return do_vm86(cpu_env, arg1, arg2);
10235 #endif
10236 #endif
10237     case TARGET_NR_adjtimex:
10238         {
10239             struct timex host_buf;
10240 
10241             if (target_to_host_timex(&host_buf, arg1) != 0) {
10242                 return -TARGET_EFAULT;
10243             }
10244             ret = get_errno(adjtimex(&host_buf));
10245             if (!is_error(ret)) {
10246                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10247                     return -TARGET_EFAULT;
10248                 }
10249             }
10250         }
10251         return ret;
10252 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10253     case TARGET_NR_clock_adjtime:
10254         {
10255             struct timex htx, *phtx = &htx;
10256 
10257             if (target_to_host_timex(phtx, arg2) != 0) {
10258                 return -TARGET_EFAULT;
10259             }
10260             ret = get_errno(clock_adjtime(arg1, phtx));
10261             if (!is_error(ret) && phtx) {
10262                 if (host_to_target_timex(arg2, phtx) != 0) {
10263                     return -TARGET_EFAULT;
10264                 }
10265             }
10266         }
10267         return ret;
10268 #endif
10269 #ifdef TARGET_NR_create_module
10270     case TARGET_NR_create_module:
10271 #endif
10272     case TARGET_NR_init_module:
10273     case TARGET_NR_delete_module:
10274 #ifdef TARGET_NR_get_kernel_syms
10275     case TARGET_NR_get_kernel_syms:
10276 #endif
10277         goto unimplemented;
10278     case TARGET_NR_quotactl:
10279         goto unimplemented;
10280     case TARGET_NR_getpgid:
10281         return get_errno(getpgid(arg1));
10282     case TARGET_NR_fchdir:
10283         return get_errno(fchdir(arg1));
10284 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10285     case TARGET_NR_bdflush:
10286         goto unimplemented;
10287 #endif
10288 #ifdef TARGET_NR_sysfs
10289     case TARGET_NR_sysfs:
10290         goto unimplemented;
10291 #endif
10292     case TARGET_NR_personality:
10293         return get_errno(personality(arg1));
10294 #ifdef TARGET_NR_afs_syscall
10295     case TARGET_NR_afs_syscall:
10296         goto unimplemented;
10297 #endif
10298 #ifdef TARGET_NR__llseek /* Not on alpha */
10299     case TARGET_NR__llseek:
10300         {
10301             int64_t res;
10302 #if !defined(__NR_llseek)
10303             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10304             if (res == -1) {
10305                 ret = get_errno(res);
10306             } else {
10307                 ret = 0;
10308             }
10309 #else
10310             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10311 #endif
10312             if ((ret == 0) && put_user_s64(res, arg4)) {
10313                 return -TARGET_EFAULT;
10314             }
10315         }
10316         return ret;
10317 #endif
10318 #ifdef TARGET_NR_getdents
10319     case TARGET_NR_getdents:
10320 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10321 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10322         {
10323             struct target_dirent *target_dirp;
10324             struct linux_dirent *dirp;
10325             abi_long count = arg3;
10326 
10327             dirp = g_try_malloc(count);
10328             if (!dirp) {
10329                 ret = -TARGET_ENOMEM;
10330                 goto fail;
10331             }
10332 
10333             ret = get_errno(sys_getdents(arg1, dirp, count));
10334             if (!is_error(ret)) {
10335                 struct linux_dirent *de;
10336 		struct target_dirent *tde;
10337                 int len = ret;
10338                 int reclen, treclen;
10339 		int count1, tnamelen;
10340 
10341 		count1 = 0;
10342                 de = dirp;
10343                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10344                     return -TARGET_EFAULT;
10345 		tde = target_dirp;
10346                 while (len > 0) {
10347                     reclen = de->d_reclen;
10348                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10349                     assert(tnamelen >= 0);
10350                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10351                     assert(count1 + treclen <= count);
10352                     tde->d_reclen = tswap16(treclen);
10353                     tde->d_ino = tswapal(de->d_ino);
10354                     tde->d_off = tswapal(de->d_off);
10355                     memcpy(tde->d_name, de->d_name, tnamelen);
10356                     de = (struct linux_dirent *)((char *)de + reclen);
10357                     len -= reclen;
10358                     tde = (struct target_dirent *)((char *)tde + treclen);
10359 		    count1 += treclen;
10360                 }
10361 		ret = count1;
10362                 unlock_user(target_dirp, arg2, ret);
10363             }
10364             g_free(dirp);
10365         }
10366 #else
10367         {
10368             struct linux_dirent *dirp;
10369             abi_long count = arg3;
10370 
10371             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10372                 return -TARGET_EFAULT;
10373             ret = get_errno(sys_getdents(arg1, dirp, count));
10374             if (!is_error(ret)) {
10375                 struct linux_dirent *de;
10376                 int len = ret;
10377                 int reclen;
10378                 de = dirp;
10379                 while (len > 0) {
10380                     reclen = de->d_reclen;
10381                     if (reclen > len)
10382                         break;
10383                     de->d_reclen = tswap16(reclen);
10384                     tswapls(&de->d_ino);
10385                     tswapls(&de->d_off);
10386                     de = (struct linux_dirent *)((char *)de + reclen);
10387                     len -= reclen;
10388                 }
10389             }
10390             unlock_user(dirp, arg2, ret);
10391         }
10392 #endif
10393 #else
10394         /* Implement getdents in terms of getdents64 */
10395         {
10396             struct linux_dirent64 *dirp;
10397             abi_long count = arg3;
10398 
10399             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10400             if (!dirp) {
10401                 return -TARGET_EFAULT;
10402             }
10403             ret = get_errno(sys_getdents64(arg1, dirp, count));
10404             if (!is_error(ret)) {
10405                 /* Convert the dirent64 structs to target dirent.  We do this
10406                  * in-place, since we can guarantee that a target_dirent is no
10407                  * larger than a dirent64; however this means we have to be
10408                  * careful to read everything before writing in the new format.
10409                  */
10410                 struct linux_dirent64 *de;
10411                 struct target_dirent *tde;
10412                 int len = ret;
10413                 int tlen = 0;
10414 
10415                 de = dirp;
10416                 tde = (struct target_dirent *)dirp;
10417                 while (len > 0) {
10418                     int namelen, treclen;
10419                     int reclen = de->d_reclen;
10420                     uint64_t ino = de->d_ino;
10421                     int64_t off = de->d_off;
10422                     uint8_t type = de->d_type;
10423 
10424                     namelen = strlen(de->d_name);
10425                     treclen = offsetof(struct target_dirent, d_name)
10426                         + namelen + 2;
10427                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10428 
10429                     memmove(tde->d_name, de->d_name, namelen + 1);
10430                     tde->d_ino = tswapal(ino);
10431                     tde->d_off = tswapal(off);
10432                     tde->d_reclen = tswap16(treclen);
10433                     /* The target_dirent type is in what was formerly a padding
10434                      * byte at the end of the structure:
10435                      */
10436                     *(((char *)tde) + treclen - 1) = type;
10437 
10438                     de = (struct linux_dirent64 *)((char *)de + reclen);
10439                     tde = (struct target_dirent *)((char *)tde + treclen);
10440                     len -= reclen;
10441                     tlen += treclen;
10442                 }
10443                 ret = tlen;
10444             }
10445             unlock_user(dirp, arg2, ret);
10446         }
10447 #endif
10448         return ret;
10449 #endif /* TARGET_NR_getdents */
10450 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10451     case TARGET_NR_getdents64:
10452         {
10453             struct linux_dirent64 *dirp;
10454             abi_long count = arg3;
10455             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10456                 return -TARGET_EFAULT;
10457             ret = get_errno(sys_getdents64(arg1, dirp, count));
10458             if (!is_error(ret)) {
10459                 struct linux_dirent64 *de;
10460                 int len = ret;
10461                 int reclen;
10462                 de = dirp;
10463                 while (len > 0) {
10464                     reclen = de->d_reclen;
10465                     if (reclen > len)
10466                         break;
10467                     de->d_reclen = tswap16(reclen);
10468                     tswap64s((uint64_t *)&de->d_ino);
10469                     tswap64s((uint64_t *)&de->d_off);
10470                     de = (struct linux_dirent64 *)((char *)de + reclen);
10471                     len -= reclen;
10472                 }
10473             }
10474             unlock_user(dirp, arg2, ret);
10475         }
10476         return ret;
10477 #endif /* TARGET_NR_getdents64 */
10478 #if defined(TARGET_NR__newselect)
10479     case TARGET_NR__newselect:
10480         return do_select(arg1, arg2, arg3, arg4, arg5);
10481 #endif
10482 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10483 # ifdef TARGET_NR_poll
10484     case TARGET_NR_poll:
10485 # endif
10486 # ifdef TARGET_NR_ppoll
10487     case TARGET_NR_ppoll:
10488 # endif
10489         {
10490             struct target_pollfd *target_pfd;
10491             unsigned int nfds = arg2;
10492             struct pollfd *pfd;
10493             unsigned int i;
10494 
10495             pfd = NULL;
10496             target_pfd = NULL;
10497             if (nfds) {
10498                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10499                     return -TARGET_EINVAL;
10500                 }
10501 
10502                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10503                                        sizeof(struct target_pollfd) * nfds, 1);
10504                 if (!target_pfd) {
10505                     return -TARGET_EFAULT;
10506                 }
10507 
10508                 pfd = alloca(sizeof(struct pollfd) * nfds);
10509                 for (i = 0; i < nfds; i++) {
10510                     pfd[i].fd = tswap32(target_pfd[i].fd);
10511                     pfd[i].events = tswap16(target_pfd[i].events);
10512                 }
10513             }
10514 
10515             switch (num) {
10516 # ifdef TARGET_NR_ppoll
10517             case TARGET_NR_ppoll:
10518             {
10519                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10520                 target_sigset_t *target_set;
10521                 sigset_t _set, *set = &_set;
10522 
10523                 if (arg3) {
10524                     if (target_to_host_timespec(timeout_ts, arg3)) {
10525                         unlock_user(target_pfd, arg1, 0);
10526                         return -TARGET_EFAULT;
10527                     }
10528                 } else {
10529                     timeout_ts = NULL;
10530                 }
10531 
10532                 if (arg4) {
10533                     if (arg5 != sizeof(target_sigset_t)) {
10534                         unlock_user(target_pfd, arg1, 0);
10535                         return -TARGET_EINVAL;
10536                     }
10537 
10538                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10539                     if (!target_set) {
10540                         unlock_user(target_pfd, arg1, 0);
10541                         return -TARGET_EFAULT;
10542                     }
10543                     target_to_host_sigset(set, target_set);
10544                 } else {
10545                     set = NULL;
10546                 }
10547 
10548                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10549                                            set, SIGSET_T_SIZE));
10550 
10551                 if (!is_error(ret) && arg3) {
10552                     host_to_target_timespec(arg3, timeout_ts);
10553                 }
10554                 if (arg4) {
10555                     unlock_user(target_set, arg4, 0);
10556                 }
10557                 break;
10558             }
10559 # endif
10560 # ifdef TARGET_NR_poll
10561             case TARGET_NR_poll:
10562             {
10563                 struct timespec ts, *pts;
10564 
10565                 if (arg3 >= 0) {
10566                     /* Convert ms to secs, ns */
10567                     ts.tv_sec = arg3 / 1000;
10568                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10569                     pts = &ts;
10570                 } else {
10571                     /* -ve poll() timeout means "infinite" */
10572                     pts = NULL;
10573                 }
10574                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10575                 break;
10576             }
10577 # endif
10578             default:
10579                 g_assert_not_reached();
10580             }
10581 
10582             if (!is_error(ret)) {
10583                 for(i = 0; i < nfds; i++) {
10584                     target_pfd[i].revents = tswap16(pfd[i].revents);
10585                 }
10586             }
10587             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10588         }
10589         return ret;
10590 #endif
10591     case TARGET_NR_flock:
10592         /* NOTE: the flock constant seems to be the same for every
10593            Linux platform */
10594         return get_errno(safe_flock(arg1, arg2));
10595     case TARGET_NR_readv:
10596         {
10597             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10598             if (vec != NULL) {
10599                 ret = get_errno(safe_readv(arg1, vec, arg3));
10600                 unlock_iovec(vec, arg2, arg3, 1);
10601             } else {
10602                 ret = -host_to_target_errno(errno);
10603             }
10604         }
10605         return ret;
10606     case TARGET_NR_writev:
10607         {
10608             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10609             if (vec != NULL) {
10610                 ret = get_errno(safe_writev(arg1, vec, arg3));
10611                 unlock_iovec(vec, arg2, arg3, 0);
10612             } else {
10613                 ret = -host_to_target_errno(errno);
10614             }
10615         }
10616         return ret;
10617 #if defined(TARGET_NR_preadv)
10618     case TARGET_NR_preadv:
10619         {
10620             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10621             if (vec != NULL) {
10622                 unsigned long low, high;
10623 
10624                 target_to_host_low_high(arg4, arg5, &low, &high);
10625                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10626                 unlock_iovec(vec, arg2, arg3, 1);
10627             } else {
10628                 ret = -host_to_target_errno(errno);
10629            }
10630         }
10631         return ret;
10632 #endif
10633 #if defined(TARGET_NR_pwritev)
10634     case TARGET_NR_pwritev:
10635         {
10636             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10637             if (vec != NULL) {
10638                 unsigned long low, high;
10639 
10640                 target_to_host_low_high(arg4, arg5, &low, &high);
10641                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10642                 unlock_iovec(vec, arg2, arg3, 0);
10643             } else {
10644                 ret = -host_to_target_errno(errno);
10645            }
10646         }
10647         return ret;
10648 #endif
10649     case TARGET_NR_getsid:
10650         return get_errno(getsid(arg1));
10651 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10652     case TARGET_NR_fdatasync:
10653         return get_errno(fdatasync(arg1));
10654 #endif
10655 #ifdef TARGET_NR__sysctl
10656     case TARGET_NR__sysctl:
10657         /* We don't implement this, but ENOTDIR is always a safe
10658            return value. */
10659         return -TARGET_ENOTDIR;
10660 #endif
10661     case TARGET_NR_sched_getaffinity:
10662         {
10663             unsigned int mask_size;
10664             unsigned long *mask;
10665 
10666             /*
10667              * sched_getaffinity needs multiples of ulong, so need to take
10668              * care of mismatches between target ulong and host ulong sizes.
10669              */
10670             if (arg2 & (sizeof(abi_ulong) - 1)) {
10671                 return -TARGET_EINVAL;
10672             }
10673             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10674 
10675             mask = alloca(mask_size);
10676             memset(mask, 0, mask_size);
10677             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10678 
10679             if (!is_error(ret)) {
10680                 if (ret > arg2) {
10681                     /* More data returned than the caller's buffer will fit.
10682                      * This only happens if sizeof(abi_long) < sizeof(long)
10683                      * and the caller passed us a buffer holding an odd number
10684                      * of abi_longs. If the host kernel is actually using the
10685                      * extra 4 bytes then fail EINVAL; otherwise we can just
10686                      * ignore them and only copy the interesting part.
10687                      */
10688                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10689                     if (numcpus > arg2 * 8) {
10690                         return -TARGET_EINVAL;
10691                     }
10692                     ret = arg2;
10693                 }
10694 
10695                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10696                     return -TARGET_EFAULT;
10697                 }
10698             }
10699         }
10700         return ret;
10701     case TARGET_NR_sched_setaffinity:
10702         {
10703             unsigned int mask_size;
10704             unsigned long *mask;
10705 
10706             /*
10707              * sched_setaffinity needs multiples of ulong, so need to take
10708              * care of mismatches between target ulong and host ulong sizes.
10709              */
10710             if (arg2 & (sizeof(abi_ulong) - 1)) {
10711                 return -TARGET_EINVAL;
10712             }
10713             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10714             mask = alloca(mask_size);
10715 
10716             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10717             if (ret) {
10718                 return ret;
10719             }
10720 
10721             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10722         }
10723     case TARGET_NR_getcpu:
10724         {
10725             unsigned cpu, node;
10726             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10727                                        arg2 ? &node : NULL,
10728                                        NULL));
10729             if (is_error(ret)) {
10730                 goto fail;
10731             }
10732             if (arg1 && put_user_u32(cpu, arg1)) {
10733                 return -TARGET_EFAULT;
10734             }
10735             if (arg2 && put_user_u32(node, arg2)) {
10736                 return -TARGET_EFAULT;
10737             }
10738         }
10739         return ret;
10740     case TARGET_NR_sched_setparam:
10741         {
10742             struct sched_param *target_schp;
10743             struct sched_param schp;
10744 
10745             if (arg2 == 0) {
10746                 return -TARGET_EINVAL;
10747             }
10748             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10749                 return -TARGET_EFAULT;
10750             schp.sched_priority = tswap32(target_schp->sched_priority);
10751             unlock_user_struct(target_schp, arg2, 0);
10752             return get_errno(sched_setparam(arg1, &schp));
10753         }
10754     case TARGET_NR_sched_getparam:
10755         {
10756             struct sched_param *target_schp;
10757             struct sched_param schp;
10758 
10759             if (arg2 == 0) {
10760                 return -TARGET_EINVAL;
10761             }
10762             ret = get_errno(sched_getparam(arg1, &schp));
10763             if (!is_error(ret)) {
10764                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10765                     return -TARGET_EFAULT;
10766                 target_schp->sched_priority = tswap32(schp.sched_priority);
10767                 unlock_user_struct(target_schp, arg2, 1);
10768             }
10769         }
10770         return ret;
10771     case TARGET_NR_sched_setscheduler:
10772         {
10773             struct sched_param *target_schp;
10774             struct sched_param schp;
10775             if (arg3 == 0) {
10776                 return -TARGET_EINVAL;
10777             }
10778             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10779                 return -TARGET_EFAULT;
10780             schp.sched_priority = tswap32(target_schp->sched_priority);
10781             unlock_user_struct(target_schp, arg3, 0);
10782             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10783         }
10784     case TARGET_NR_sched_getscheduler:
10785         return get_errno(sched_getscheduler(arg1));
10786     case TARGET_NR_sched_yield:
10787         return get_errno(sched_yield());
10788     case TARGET_NR_sched_get_priority_max:
10789         return get_errno(sched_get_priority_max(arg1));
10790     case TARGET_NR_sched_get_priority_min:
10791         return get_errno(sched_get_priority_min(arg1));
10792     case TARGET_NR_sched_rr_get_interval:
10793         {
10794             struct timespec ts;
10795             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10796             if (!is_error(ret)) {
10797                 ret = host_to_target_timespec(arg2, &ts);
10798             }
10799         }
10800         return ret;
10801     case TARGET_NR_nanosleep:
10802         {
10803             struct timespec req, rem;
10804             target_to_host_timespec(&req, arg1);
10805             ret = get_errno(safe_nanosleep(&req, &rem));
10806             if (is_error(ret) && arg2) {
10807                 host_to_target_timespec(arg2, &rem);
10808             }
10809         }
10810         return ret;
10811 #ifdef TARGET_NR_query_module
10812     case TARGET_NR_query_module:
10813         goto unimplemented;
10814 #endif
10815 #ifdef TARGET_NR_nfsservctl
10816     case TARGET_NR_nfsservctl:
10817         goto unimplemented;
10818 #endif
10819     case TARGET_NR_prctl:
10820         switch (arg1) {
10821         case PR_GET_PDEATHSIG:
10822         {
10823             int deathsig;
10824             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10825             if (!is_error(ret) && arg2
10826                 && put_user_ual(deathsig, arg2)) {
10827                 return -TARGET_EFAULT;
10828             }
10829             return ret;
10830         }
10831 #ifdef PR_GET_NAME
10832         case PR_GET_NAME:
10833         {
10834             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10835             if (!name) {
10836                 return -TARGET_EFAULT;
10837             }
10838             ret = get_errno(prctl(arg1, (unsigned long)name,
10839                                   arg3, arg4, arg5));
10840             unlock_user(name, arg2, 16);
10841             return ret;
10842         }
10843         case PR_SET_NAME:
10844         {
10845             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10846             if (!name) {
10847                 return -TARGET_EFAULT;
10848             }
10849             ret = get_errno(prctl(arg1, (unsigned long)name,
10850                                   arg3, arg4, arg5));
10851             unlock_user(name, arg2, 0);
10852             return ret;
10853         }
10854 #endif
10855 #ifdef TARGET_AARCH64
10856         case TARGET_PR_SVE_SET_VL:
10857             /*
10858              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10859              * PR_SVE_VL_INHERIT.  Note the kernel definition
10860              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10861              * even though the current architectural maximum is VQ=16.
10862              */
10863             ret = -TARGET_EINVAL;
10864             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10865                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10866                 CPUARMState *env = cpu_env;
10867                 ARMCPU *cpu = arm_env_get_cpu(env);
10868                 uint32_t vq, old_vq;
10869 
10870                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10871                 vq = MAX(arg2 / 16, 1);
10872                 vq = MIN(vq, cpu->sve_max_vq);
10873 
10874                 if (vq < old_vq) {
10875                     aarch64_sve_narrow_vq(env, vq);
10876                 }
10877                 env->vfp.zcr_el[1] = vq - 1;
10878                 ret = vq * 16;
10879             }
10880             return ret;
10881         case TARGET_PR_SVE_GET_VL:
10882             ret = -TARGET_EINVAL;
10883             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10884                 CPUARMState *env = cpu_env;
10885                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10886             }
10887             return ret;
10888 #endif /* AARCH64 */
10889         case PR_GET_SECCOMP:
10890         case PR_SET_SECCOMP:
10891             /* Disable seccomp to prevent the target disabling syscalls we
10892              * need. */
10893             return -TARGET_EINVAL;
10894         default:
10895             /* Most prctl options have no pointer arguments */
10896             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10897         }
10898         break;
10899 #ifdef TARGET_NR_arch_prctl
10900     case TARGET_NR_arch_prctl:
10901 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10902         return do_arch_prctl(cpu_env, arg1, arg2);
10903 #else
10904         goto unimplemented;
10905 #endif
10906 #endif
10907 #ifdef TARGET_NR_pread64
10908     case TARGET_NR_pread64:
10909         if (regpairs_aligned(cpu_env, num)) {
10910             arg4 = arg5;
10911             arg5 = arg6;
10912         }
10913         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10914             return -TARGET_EFAULT;
10915         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10916         unlock_user(p, arg2, ret);
10917         return ret;
10918     case TARGET_NR_pwrite64:
10919         if (regpairs_aligned(cpu_env, num)) {
10920             arg4 = arg5;
10921             arg5 = arg6;
10922         }
10923         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10924             return -TARGET_EFAULT;
10925         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10926         unlock_user(p, arg2, 0);
10927         return ret;
10928 #endif
10929     case TARGET_NR_getcwd:
10930         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10931             return -TARGET_EFAULT;
10932         ret = get_errno(sys_getcwd1(p, arg2));
10933         unlock_user(p, arg1, ret);
10934         return ret;
10935     case TARGET_NR_capget:
10936     case TARGET_NR_capset:
10937     {
10938         struct target_user_cap_header *target_header;
10939         struct target_user_cap_data *target_data = NULL;
10940         struct __user_cap_header_struct header;
10941         struct __user_cap_data_struct data[2];
10942         struct __user_cap_data_struct *dataptr = NULL;
10943         int i, target_datalen;
10944         int data_items = 1;
10945 
10946         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10947             return -TARGET_EFAULT;
10948         }
10949         header.version = tswap32(target_header->version);
10950         header.pid = tswap32(target_header->pid);
10951 
10952         if (header.version != _LINUX_CAPABILITY_VERSION) {
10953             /* Version 2 and up takes pointer to two user_data structs */
10954             data_items = 2;
10955         }
10956 
10957         target_datalen = sizeof(*target_data) * data_items;
10958 
10959         if (arg2) {
10960             if (num == TARGET_NR_capget) {
10961                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10962             } else {
10963                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10964             }
10965             if (!target_data) {
10966                 unlock_user_struct(target_header, arg1, 0);
10967                 return -TARGET_EFAULT;
10968             }
10969 
10970             if (num == TARGET_NR_capset) {
10971                 for (i = 0; i < data_items; i++) {
10972                     data[i].effective = tswap32(target_data[i].effective);
10973                     data[i].permitted = tswap32(target_data[i].permitted);
10974                     data[i].inheritable = tswap32(target_data[i].inheritable);
10975                 }
10976             }
10977 
10978             dataptr = data;
10979         }
10980 
10981         if (num == TARGET_NR_capget) {
10982             ret = get_errno(capget(&header, dataptr));
10983         } else {
10984             ret = get_errno(capset(&header, dataptr));
10985         }
10986 
10987         /* The kernel always updates version for both capget and capset */
10988         target_header->version = tswap32(header.version);
10989         unlock_user_struct(target_header, arg1, 1);
10990 
10991         if (arg2) {
10992             if (num == TARGET_NR_capget) {
10993                 for (i = 0; i < data_items; i++) {
10994                     target_data[i].effective = tswap32(data[i].effective);
10995                     target_data[i].permitted = tswap32(data[i].permitted);
10996                     target_data[i].inheritable = tswap32(data[i].inheritable);
10997                 }
10998                 unlock_user(target_data, arg2, target_datalen);
10999             } else {
11000                 unlock_user(target_data, arg2, 0);
11001             }
11002         }
11003         return ret;
11004     }
11005     case TARGET_NR_sigaltstack:
11006         return do_sigaltstack(arg1, arg2,
11007                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11008 
11009 #ifdef CONFIG_SENDFILE
11010 #ifdef TARGET_NR_sendfile
11011     case TARGET_NR_sendfile:
11012     {
11013         off_t *offp = NULL;
11014         off_t off;
11015         if (arg3) {
11016             ret = get_user_sal(off, arg3);
11017             if (is_error(ret)) {
11018                 return ret;
11019             }
11020             offp = &off;
11021         }
11022         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11023         if (!is_error(ret) && arg3) {
11024             abi_long ret2 = put_user_sal(off, arg3);
11025             if (is_error(ret2)) {
11026                 ret = ret2;
11027             }
11028         }
11029         return ret;
11030     }
11031 #endif
11032 #ifdef TARGET_NR_sendfile64
11033     case TARGET_NR_sendfile64:
11034     {
11035         off_t *offp = NULL;
11036         off_t off;
11037         if (arg3) {
11038             ret = get_user_s64(off, arg3);
11039             if (is_error(ret)) {
11040                 return ret;
11041             }
11042             offp = &off;
11043         }
11044         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11045         if (!is_error(ret) && arg3) {
11046             abi_long ret2 = put_user_s64(off, arg3);
11047             if (is_error(ret2)) {
11048                 ret = ret2;
11049             }
11050         }
11051         return ret;
11052     }
11053 #endif
11054 #else
11055     case TARGET_NR_sendfile:
11056 #ifdef TARGET_NR_sendfile64
11057     case TARGET_NR_sendfile64:
11058 #endif
11059         goto unimplemented;
11060 #endif
11061 
11062 #ifdef TARGET_NR_getpmsg
11063     case TARGET_NR_getpmsg:
11064         goto unimplemented;
11065 #endif
11066 #ifdef TARGET_NR_putpmsg
11067     case TARGET_NR_putpmsg:
11068         goto unimplemented;
11069 #endif
11070 #ifdef TARGET_NR_vfork
11071     case TARGET_NR_vfork:
11072         return get_errno(do_fork(cpu_env,
11073                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11074                          0, 0, 0, 0));
11075 #endif
11076 #ifdef TARGET_NR_ugetrlimit
11077     case TARGET_NR_ugetrlimit:
11078     {
11079 	struct rlimit rlim;
11080 	int resource = target_to_host_resource(arg1);
11081 	ret = get_errno(getrlimit(resource, &rlim));
11082 	if (!is_error(ret)) {
11083 	    struct target_rlimit *target_rlim;
11084             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11085                 return -TARGET_EFAULT;
11086 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11087 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11088             unlock_user_struct(target_rlim, arg2, 1);
11089 	}
11090         return ret;
11091     }
11092 #endif
11093 #ifdef TARGET_NR_truncate64
11094     case TARGET_NR_truncate64:
11095         if (!(p = lock_user_string(arg1)))
11096             return -TARGET_EFAULT;
11097 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11098         unlock_user(p, arg1, 0);
11099         return ret;
11100 #endif
11101 #ifdef TARGET_NR_ftruncate64
11102     case TARGET_NR_ftruncate64:
11103         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11104 #endif
11105 #ifdef TARGET_NR_stat64
11106     case TARGET_NR_stat64:
11107         if (!(p = lock_user_string(arg1))) {
11108             return -TARGET_EFAULT;
11109         }
11110         ret = get_errno(stat(path(p), &st));
11111         unlock_user(p, arg1, 0);
11112         if (!is_error(ret))
11113             ret = host_to_target_stat64(cpu_env, arg2, &st);
11114         return ret;
11115 #endif
11116 #ifdef TARGET_NR_lstat64
11117     case TARGET_NR_lstat64:
11118         if (!(p = lock_user_string(arg1))) {
11119             return -TARGET_EFAULT;
11120         }
11121         ret = get_errno(lstat(path(p), &st));
11122         unlock_user(p, arg1, 0);
11123         if (!is_error(ret))
11124             ret = host_to_target_stat64(cpu_env, arg2, &st);
11125         return ret;
11126 #endif
11127 #ifdef TARGET_NR_fstat64
11128     case TARGET_NR_fstat64:
11129         ret = get_errno(fstat(arg1, &st));
11130         if (!is_error(ret))
11131             ret = host_to_target_stat64(cpu_env, arg2, &st);
11132         return ret;
11133 #endif
11134 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11135 #ifdef TARGET_NR_fstatat64
11136     case TARGET_NR_fstatat64:
11137 #endif
11138 #ifdef TARGET_NR_newfstatat
11139     case TARGET_NR_newfstatat:
11140 #endif
11141         if (!(p = lock_user_string(arg2))) {
11142             return -TARGET_EFAULT;
11143         }
11144         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11145         unlock_user(p, arg2, 0);
11146         if (!is_error(ret))
11147             ret = host_to_target_stat64(cpu_env, arg3, &st);
11148         return ret;
11149 #endif
11150 #ifdef TARGET_NR_lchown
11151     case TARGET_NR_lchown:
11152         if (!(p = lock_user_string(arg1)))
11153             return -TARGET_EFAULT;
11154         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11155         unlock_user(p, arg1, 0);
11156         return ret;
11157 #endif
11158 #ifdef TARGET_NR_getuid
11159     case TARGET_NR_getuid:
11160         return get_errno(high2lowuid(getuid()));
11161 #endif
11162 #ifdef TARGET_NR_getgid
11163     case TARGET_NR_getgid:
11164         return get_errno(high2lowgid(getgid()));
11165 #endif
11166 #ifdef TARGET_NR_geteuid
11167     case TARGET_NR_geteuid:
11168         return get_errno(high2lowuid(geteuid()));
11169 #endif
11170 #ifdef TARGET_NR_getegid
11171     case TARGET_NR_getegid:
11172         return get_errno(high2lowgid(getegid()));
11173 #endif
11174     case TARGET_NR_setreuid:
11175         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11176     case TARGET_NR_setregid:
11177         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11178     case TARGET_NR_getgroups:
11179         {
11180             int gidsetsize = arg1;
11181             target_id *target_grouplist;
11182             gid_t *grouplist;
11183             int i;
11184 
11185             grouplist = alloca(gidsetsize * sizeof(gid_t));
11186             ret = get_errno(getgroups(gidsetsize, grouplist));
11187             if (gidsetsize == 0)
11188                 return ret;
11189             if (!is_error(ret)) {
11190                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11191                 if (!target_grouplist)
11192                     return -TARGET_EFAULT;
11193                 for(i = 0;i < ret; i++)
11194                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11195                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11196             }
11197         }
11198         return ret;
11199     case TARGET_NR_setgroups:
11200         {
11201             int gidsetsize = arg1;
11202             target_id *target_grouplist;
11203             gid_t *grouplist = NULL;
11204             int i;
11205             if (gidsetsize) {
11206                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11207                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11208                 if (!target_grouplist) {
11209                     ret = -TARGET_EFAULT;
11210                     goto fail;
11211                 }
11212                 for (i = 0; i < gidsetsize; i++) {
11213                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11214                 }
11215                 unlock_user(target_grouplist, arg2, 0);
11216             }
11217             return get_errno(setgroups(gidsetsize, grouplist));
11218         }
11219     case TARGET_NR_fchown:
11220         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11221 #if defined(TARGET_NR_fchownat)
11222     case TARGET_NR_fchownat:
11223         if (!(p = lock_user_string(arg2)))
11224             return -TARGET_EFAULT;
11225         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11226                                  low2highgid(arg4), arg5));
11227         unlock_user(p, arg2, 0);
11228         return ret;
11229 #endif
11230 #ifdef TARGET_NR_setresuid
11231     case TARGET_NR_setresuid:
11232         return get_errno(sys_setresuid(low2highuid(arg1),
11233                                        low2highuid(arg2),
11234                                        low2highuid(arg3)));
11235 #endif
11236 #ifdef TARGET_NR_getresuid
11237     case TARGET_NR_getresuid:
11238         {
11239             uid_t ruid, euid, suid;
11240             ret = get_errno(getresuid(&ruid, &euid, &suid));
11241             if (!is_error(ret)) {
11242                 if (put_user_id(high2lowuid(ruid), arg1)
11243                     || put_user_id(high2lowuid(euid), arg2)
11244                     || put_user_id(high2lowuid(suid), arg3))
11245                     return -TARGET_EFAULT;
11246             }
11247         }
11248         return ret;
11249 #endif
11250 #ifdef TARGET_NR_getresgid
11251     case TARGET_NR_setresgid:
11252         return get_errno(sys_setresgid(low2highgid(arg1),
11253                                        low2highgid(arg2),
11254                                        low2highgid(arg3)));
11255 #endif
11256 #ifdef TARGET_NR_getresgid
11257     case TARGET_NR_getresgid:
11258         {
11259             gid_t rgid, egid, sgid;
11260             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11261             if (!is_error(ret)) {
11262                 if (put_user_id(high2lowgid(rgid), arg1)
11263                     || put_user_id(high2lowgid(egid), arg2)
11264                     || put_user_id(high2lowgid(sgid), arg3))
11265                     return -TARGET_EFAULT;
11266             }
11267         }
11268         return ret;
11269 #endif
11270 #ifdef TARGET_NR_chown
11271     case TARGET_NR_chown:
11272         if (!(p = lock_user_string(arg1)))
11273             return -TARGET_EFAULT;
11274         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11275         unlock_user(p, arg1, 0);
11276         return ret;
11277 #endif
11278     case TARGET_NR_setuid:
11279         return get_errno(sys_setuid(low2highuid(arg1)));
11280     case TARGET_NR_setgid:
11281         return get_errno(sys_setgid(low2highgid(arg1)));
11282     case TARGET_NR_setfsuid:
11283         return get_errno(setfsuid(arg1));
11284     case TARGET_NR_setfsgid:
11285         return get_errno(setfsgid(arg1));
11286 
11287 #ifdef TARGET_NR_lchown32
11288     case TARGET_NR_lchown32:
11289         if (!(p = lock_user_string(arg1)))
11290             return -TARGET_EFAULT;
11291         ret = get_errno(lchown(p, arg2, arg3));
11292         unlock_user(p, arg1, 0);
11293         return ret;
11294 #endif
11295 #ifdef TARGET_NR_getuid32
11296     case TARGET_NR_getuid32:
11297         return get_errno(getuid());
11298 #endif
11299 
11300 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11301    /* Alpha specific */
11302     case TARGET_NR_getxuid:
11303          {
11304             uid_t euid;
11305             euid=geteuid();
11306             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11307          }
11308         return get_errno(getuid());
11309 #endif
11310 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11311    /* Alpha specific */
11312     case TARGET_NR_getxgid:
11313          {
11314             uid_t egid;
11315             egid=getegid();
11316             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11317          }
11318         return get_errno(getgid());
11319 #endif
11320 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11321     /* Alpha specific */
11322     case TARGET_NR_osf_getsysinfo:
11323         ret = -TARGET_EOPNOTSUPP;
11324         switch (arg1) {
11325           case TARGET_GSI_IEEE_FP_CONTROL:
11326             {
11327                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11328 
11329                 /* Copied from linux ieee_fpcr_to_swcr.  */
11330                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11331                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11332                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11333                                         | SWCR_TRAP_ENABLE_DZE
11334                                         | SWCR_TRAP_ENABLE_OVF);
11335                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11336                                         | SWCR_TRAP_ENABLE_INE);
11337                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11338                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11339 
11340                 if (put_user_u64 (swcr, arg2))
11341                         return -TARGET_EFAULT;
11342                 ret = 0;
11343             }
11344             break;
11345 
11346           /* case GSI_IEEE_STATE_AT_SIGNAL:
11347              -- Not implemented in linux kernel.
11348              case GSI_UACPROC:
11349              -- Retrieves current unaligned access state; not much used.
11350              case GSI_PROC_TYPE:
11351              -- Retrieves implver information; surely not used.
11352              case GSI_GET_HWRPB:
11353              -- Grabs a copy of the HWRPB; surely not used.
11354           */
11355         }
11356         return ret;
11357 #endif
11358 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11359     /* Alpha specific */
11360     case TARGET_NR_osf_setsysinfo:
11361         ret = -TARGET_EOPNOTSUPP;
11362         switch (arg1) {
11363           case TARGET_SSI_IEEE_FP_CONTROL:
11364             {
11365                 uint64_t swcr, fpcr, orig_fpcr;
11366 
11367                 if (get_user_u64 (swcr, arg2)) {
11368                     return -TARGET_EFAULT;
11369                 }
11370                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11371                 fpcr = orig_fpcr & FPCR_DYN_MASK;
11372 
11373                 /* Copied from linux ieee_swcr_to_fpcr.  */
11374                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11375                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11376                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11377                                   | SWCR_TRAP_ENABLE_DZE
11378                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
11379                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11380                                   | SWCR_TRAP_ENABLE_INE)) << 57;
11381                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11382                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11383 
11384                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11385                 ret = 0;
11386             }
11387             break;
11388 
11389           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11390             {
11391                 uint64_t exc, fpcr, orig_fpcr;
11392                 int si_code;
11393 
11394                 if (get_user_u64(exc, arg2)) {
11395                     return -TARGET_EFAULT;
11396                 }
11397 
11398                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11399 
11400                 /* We only add to the exception status here.  */
11401                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11402 
11403                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11404                 ret = 0;
11405 
11406                 /* Old exceptions are not signaled.  */
11407                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11408 
11409                 /* If any exceptions set by this call,
11410                    and are unmasked, send a signal.  */
11411                 si_code = 0;
11412                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11413                     si_code = TARGET_FPE_FLTRES;
11414                 }
11415                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11416                     si_code = TARGET_FPE_FLTUND;
11417                 }
11418                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11419                     si_code = TARGET_FPE_FLTOVF;
11420                 }
11421                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11422                     si_code = TARGET_FPE_FLTDIV;
11423                 }
11424                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11425                     si_code = TARGET_FPE_FLTINV;
11426                 }
11427                 if (si_code != 0) {
11428                     target_siginfo_t info;
11429                     info.si_signo = SIGFPE;
11430                     info.si_errno = 0;
11431                     info.si_code = si_code;
11432                     info._sifields._sigfault._addr
11433                         = ((CPUArchState *)cpu_env)->pc;
11434                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11435                                  QEMU_SI_FAULT, &info);
11436                 }
11437             }
11438             break;
11439 
11440           /* case SSI_NVPAIRS:
11441              -- Used with SSIN_UACPROC to enable unaligned accesses.
11442              case SSI_IEEE_STATE_AT_SIGNAL:
11443              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11444              -- Not implemented in linux kernel
11445           */
11446         }
11447         return ret;
11448 #endif
11449 #ifdef TARGET_NR_osf_sigprocmask
11450     /* Alpha specific.  */
11451     case TARGET_NR_osf_sigprocmask:
11452         {
11453             abi_ulong mask;
11454             int how;
11455             sigset_t set, oldset;
11456 
11457             switch(arg1) {
11458             case TARGET_SIG_BLOCK:
11459                 how = SIG_BLOCK;
11460                 break;
11461             case TARGET_SIG_UNBLOCK:
11462                 how = SIG_UNBLOCK;
11463                 break;
11464             case TARGET_SIG_SETMASK:
11465                 how = SIG_SETMASK;
11466                 break;
11467             default:
11468                 ret = -TARGET_EINVAL;
11469                 goto fail;
11470             }
11471             mask = arg2;
11472             target_to_host_old_sigset(&set, &mask);
11473             ret = do_sigprocmask(how, &set, &oldset);
11474             if (!ret) {
11475                 host_to_target_old_sigset(&mask, &oldset);
11476                 ret = mask;
11477             }
11478         }
11479         return ret;
11480 #endif
11481 
11482 #ifdef TARGET_NR_getgid32
11483     case TARGET_NR_getgid32:
11484         return get_errno(getgid());
11485 #endif
11486 #ifdef TARGET_NR_geteuid32
11487     case TARGET_NR_geteuid32:
11488         return get_errno(geteuid());
11489 #endif
11490 #ifdef TARGET_NR_getegid32
11491     case TARGET_NR_getegid32:
11492         return get_errno(getegid());
11493 #endif
11494 #ifdef TARGET_NR_setreuid32
11495     case TARGET_NR_setreuid32:
11496         return get_errno(setreuid(arg1, arg2));
11497 #endif
11498 #ifdef TARGET_NR_setregid32
11499     case TARGET_NR_setregid32:
11500         return get_errno(setregid(arg1, arg2));
11501 #endif
11502 #ifdef TARGET_NR_getgroups32
11503     case TARGET_NR_getgroups32:
11504         {
11505             int gidsetsize = arg1;
11506             uint32_t *target_grouplist;
11507             gid_t *grouplist;
11508             int i;
11509 
11510             grouplist = alloca(gidsetsize * sizeof(gid_t));
11511             ret = get_errno(getgroups(gidsetsize, grouplist));
11512             if (gidsetsize == 0)
11513                 return ret;
11514             if (!is_error(ret)) {
11515                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11516                 if (!target_grouplist) {
11517                     ret = -TARGET_EFAULT;
11518                     goto fail;
11519                 }
11520                 for(i = 0;i < ret; i++)
11521                     target_grouplist[i] = tswap32(grouplist[i]);
11522                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11523             }
11524         }
11525         return ret;
11526 #endif
11527 #ifdef TARGET_NR_setgroups32
11528     case TARGET_NR_setgroups32:
11529         {
11530             int gidsetsize = arg1;
11531             uint32_t *target_grouplist;
11532             gid_t *grouplist;
11533             int i;
11534 
11535             grouplist = alloca(gidsetsize * sizeof(gid_t));
11536             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11537             if (!target_grouplist) {
11538                 ret = -TARGET_EFAULT;
11539                 goto fail;
11540             }
11541             for(i = 0;i < gidsetsize; i++)
11542                 grouplist[i] = tswap32(target_grouplist[i]);
11543             unlock_user(target_grouplist, arg2, 0);
11544             return get_errno(setgroups(gidsetsize, grouplist));
11545         }
11546 #endif
11547 #ifdef TARGET_NR_fchown32
11548     case TARGET_NR_fchown32:
11549         return get_errno(fchown(arg1, arg2, arg3));
11550 #endif
11551 #ifdef TARGET_NR_setresuid32
11552     case TARGET_NR_setresuid32:
11553         return get_errno(sys_setresuid(arg1, arg2, arg3));
11554 #endif
11555 #ifdef TARGET_NR_getresuid32
11556     case TARGET_NR_getresuid32:
11557         {
11558             uid_t ruid, euid, suid;
11559             ret = get_errno(getresuid(&ruid, &euid, &suid));
11560             if (!is_error(ret)) {
11561                 if (put_user_u32(ruid, arg1)
11562                     || put_user_u32(euid, arg2)
11563                     || put_user_u32(suid, arg3))
11564                     return -TARGET_EFAULT;
11565             }
11566         }
11567         return ret;
11568 #endif
11569 #ifdef TARGET_NR_setresgid32
11570     case TARGET_NR_setresgid32:
11571         return get_errno(sys_setresgid(arg1, arg2, arg3));
11572 #endif
11573 #ifdef TARGET_NR_getresgid32
11574     case TARGET_NR_getresgid32:
11575         {
11576             gid_t rgid, egid, sgid;
11577             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11578             if (!is_error(ret)) {
11579                 if (put_user_u32(rgid, arg1)
11580                     || put_user_u32(egid, arg2)
11581                     || put_user_u32(sgid, arg3))
11582                     return -TARGET_EFAULT;
11583             }
11584         }
11585         return ret;
11586 #endif
11587 #ifdef TARGET_NR_chown32
11588     case TARGET_NR_chown32:
11589         if (!(p = lock_user_string(arg1)))
11590             return -TARGET_EFAULT;
11591         ret = get_errno(chown(p, arg2, arg3));
11592         unlock_user(p, arg1, 0);
11593         return ret;
11594 #endif
11595 #ifdef TARGET_NR_setuid32
11596     case TARGET_NR_setuid32:
11597         return get_errno(sys_setuid(arg1));
11598 #endif
11599 #ifdef TARGET_NR_setgid32
11600     case TARGET_NR_setgid32:
11601         return get_errno(sys_setgid(arg1));
11602 #endif
11603 #ifdef TARGET_NR_setfsuid32
11604     case TARGET_NR_setfsuid32:
11605         return get_errno(setfsuid(arg1));
11606 #endif
11607 #ifdef TARGET_NR_setfsgid32
11608     case TARGET_NR_setfsgid32:
11609         return get_errno(setfsgid(arg1));
11610 #endif
11611 
11612     case TARGET_NR_pivot_root:
11613         goto unimplemented;
11614 #ifdef TARGET_NR_mincore
11615     case TARGET_NR_mincore:
11616         {
11617             void *a;
11618             ret = -TARGET_ENOMEM;
11619             a = lock_user(VERIFY_READ, arg1, arg2, 0);
11620             if (!a) {
11621                 goto fail;
11622             }
11623             ret = -TARGET_EFAULT;
11624             p = lock_user_string(arg3);
11625             if (!p) {
11626                 goto mincore_fail;
11627             }
11628             ret = get_errno(mincore(a, arg2, p));
11629             unlock_user(p, arg3, ret);
11630             mincore_fail:
11631             unlock_user(a, arg1, 0);
11632         }
11633         return ret;
11634 #endif
11635 #ifdef TARGET_NR_arm_fadvise64_64
11636     case TARGET_NR_arm_fadvise64_64:
11637         /* arm_fadvise64_64 looks like fadvise64_64 but
11638          * with different argument order: fd, advice, offset, len
11639          * rather than the usual fd, offset, len, advice.
11640          * Note that offset and len are both 64-bit so appear as
11641          * pairs of 32-bit registers.
11642          */
11643         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11644                             target_offset64(arg5, arg6), arg2);
11645         return -host_to_target_errno(ret);
11646 #endif
11647 
11648 #if TARGET_ABI_BITS == 32
11649 
11650 #ifdef TARGET_NR_fadvise64_64
11651     case TARGET_NR_fadvise64_64:
11652 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11653         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11654         ret = arg2;
11655         arg2 = arg3;
11656         arg3 = arg4;
11657         arg4 = arg5;
11658         arg5 = arg6;
11659         arg6 = ret;
11660 #else
11661         /* 6 args: fd, offset (high, low), len (high, low), advice */
11662         if (regpairs_aligned(cpu_env, num)) {
11663             /* offset is in (3,4), len in (5,6) and advice in 7 */
11664             arg2 = arg3;
11665             arg3 = arg4;
11666             arg4 = arg5;
11667             arg5 = arg6;
11668             arg6 = arg7;
11669         }
11670 #endif
11671         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11672                             target_offset64(arg4, arg5), arg6);
11673         return -host_to_target_errno(ret);
11674 #endif
11675 
11676 #ifdef TARGET_NR_fadvise64
11677     case TARGET_NR_fadvise64:
11678         /* 5 args: fd, offset (high, low), len, advice */
11679         if (regpairs_aligned(cpu_env, num)) {
11680             /* offset is in (3,4), len in 5 and advice in 6 */
11681             arg2 = arg3;
11682             arg3 = arg4;
11683             arg4 = arg5;
11684             arg5 = arg6;
11685         }
11686         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11687         return -host_to_target_errno(ret);
11688 #endif
11689 
11690 #else /* not a 32-bit ABI */
11691 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11692 #ifdef TARGET_NR_fadvise64_64
11693     case TARGET_NR_fadvise64_64:
11694 #endif
11695 #ifdef TARGET_NR_fadvise64
11696     case TARGET_NR_fadvise64:
11697 #endif
11698 #ifdef TARGET_S390X
11699         switch (arg4) {
11700         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11701         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11702         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11703         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11704         default: break;
11705         }
11706 #endif
11707         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11708 #endif
11709 #endif /* end of 64-bit ABI fadvise handling */
11710 
11711 #ifdef TARGET_NR_madvise
11712     case TARGET_NR_madvise:
11713         /* A straight passthrough may not be safe because qemu sometimes
11714            turns private file-backed mappings into anonymous mappings.
11715            This will break MADV_DONTNEED.
11716            This is a hint, so ignoring and returning success is ok.  */
11717         return 0;
11718 #endif
11719 #if TARGET_ABI_BITS == 32
11720     case TARGET_NR_fcntl64:
11721     {
11722 	int cmd;
11723 	struct flock64 fl;
11724         from_flock64_fn *copyfrom = copy_from_user_flock64;
11725         to_flock64_fn *copyto = copy_to_user_flock64;
11726 
11727 #ifdef TARGET_ARM
11728         if (!((CPUARMState *)cpu_env)->eabi) {
11729             copyfrom = copy_from_user_oabi_flock64;
11730             copyto = copy_to_user_oabi_flock64;
11731         }
11732 #endif
11733 
11734 	cmd = target_to_host_fcntl_cmd(arg2);
11735         if (cmd == -TARGET_EINVAL) {
11736             return cmd;
11737         }
11738 
11739         switch(arg2) {
11740         case TARGET_F_GETLK64:
11741             ret = copyfrom(&fl, arg3);
11742             if (ret) {
11743                 break;
11744             }
11745             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11746             if (ret == 0) {
11747                 ret = copyto(arg3, &fl);
11748             }
11749 	    break;
11750 
11751         case TARGET_F_SETLK64:
11752         case TARGET_F_SETLKW64:
11753             ret = copyfrom(&fl, arg3);
11754             if (ret) {
11755                 break;
11756             }
11757             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11758 	    break;
11759         default:
11760             ret = do_fcntl(arg1, arg2, arg3);
11761             break;
11762         }
11763         return ret;
11764     }
11765 #endif
11766 #ifdef TARGET_NR_cacheflush
11767     case TARGET_NR_cacheflush:
11768         /* self-modifying code is handled automatically, so nothing needed */
11769         return 0;
11770 #endif
11771 #ifdef TARGET_NR_security
11772     case TARGET_NR_security:
11773         goto unimplemented;
11774 #endif
11775 #ifdef TARGET_NR_getpagesize
11776     case TARGET_NR_getpagesize:
11777         return TARGET_PAGE_SIZE;
11778 #endif
11779     case TARGET_NR_gettid:
11780         return get_errno(gettid());
11781 #ifdef TARGET_NR_readahead
11782     case TARGET_NR_readahead:
11783 #if TARGET_ABI_BITS == 32
11784         if (regpairs_aligned(cpu_env, num)) {
11785             arg2 = arg3;
11786             arg3 = arg4;
11787             arg4 = arg5;
11788         }
11789         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11790 #else
11791         ret = get_errno(readahead(arg1, arg2, arg3));
11792 #endif
11793         return ret;
11794 #endif
11795 #ifdef CONFIG_ATTR
11796 #ifdef TARGET_NR_setxattr
11797     case TARGET_NR_listxattr:
11798     case TARGET_NR_llistxattr:
11799     {
11800         void *p, *b = 0;
11801         if (arg2) {
11802             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11803             if (!b) {
11804                 return -TARGET_EFAULT;
11805             }
11806         }
11807         p = lock_user_string(arg1);
11808         if (p) {
11809             if (num == TARGET_NR_listxattr) {
11810                 ret = get_errno(listxattr(p, b, arg3));
11811             } else {
11812                 ret = get_errno(llistxattr(p, b, arg3));
11813             }
11814         } else {
11815             ret = -TARGET_EFAULT;
11816         }
11817         unlock_user(p, arg1, 0);
11818         unlock_user(b, arg2, arg3);
11819         return ret;
11820     }
11821     case TARGET_NR_flistxattr:
11822     {
11823         void *b = 0;
11824         if (arg2) {
11825             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11826             if (!b) {
11827                 return -TARGET_EFAULT;
11828             }
11829         }
11830         ret = get_errno(flistxattr(arg1, b, arg3));
11831         unlock_user(b, arg2, arg3);
11832         return ret;
11833     }
11834     case TARGET_NR_setxattr:
11835     case TARGET_NR_lsetxattr:
11836         {
11837             void *p, *n, *v = 0;
11838             if (arg3) {
11839                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11840                 if (!v) {
11841                     return -TARGET_EFAULT;
11842                 }
11843             }
11844             p = lock_user_string(arg1);
11845             n = lock_user_string(arg2);
11846             if (p && n) {
11847                 if (num == TARGET_NR_setxattr) {
11848                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11849                 } else {
11850                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11851                 }
11852             } else {
11853                 ret = -TARGET_EFAULT;
11854             }
11855             unlock_user(p, arg1, 0);
11856             unlock_user(n, arg2, 0);
11857             unlock_user(v, arg3, 0);
11858         }
11859         return ret;
11860     case TARGET_NR_fsetxattr:
11861         {
11862             void *n, *v = 0;
11863             if (arg3) {
11864                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11865                 if (!v) {
11866                     return -TARGET_EFAULT;
11867                 }
11868             }
11869             n = lock_user_string(arg2);
11870             if (n) {
11871                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11872             } else {
11873                 ret = -TARGET_EFAULT;
11874             }
11875             unlock_user(n, arg2, 0);
11876             unlock_user(v, arg3, 0);
11877         }
11878         return ret;
11879     case TARGET_NR_getxattr:
11880     case TARGET_NR_lgetxattr:
11881         {
11882             void *p, *n, *v = 0;
11883             if (arg3) {
11884                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11885                 if (!v) {
11886                     return -TARGET_EFAULT;
11887                 }
11888             }
11889             p = lock_user_string(arg1);
11890             n = lock_user_string(arg2);
11891             if (p && n) {
11892                 if (num == TARGET_NR_getxattr) {
11893                     ret = get_errno(getxattr(p, n, v, arg4));
11894                 } else {
11895                     ret = get_errno(lgetxattr(p, n, v, arg4));
11896                 }
11897             } else {
11898                 ret = -TARGET_EFAULT;
11899             }
11900             unlock_user(p, arg1, 0);
11901             unlock_user(n, arg2, 0);
11902             unlock_user(v, arg3, arg4);
11903         }
11904         return ret;
11905     case TARGET_NR_fgetxattr:
11906         {
11907             void *n, *v = 0;
11908             if (arg3) {
11909                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11910                 if (!v) {
11911                     return -TARGET_EFAULT;
11912                 }
11913             }
11914             n = lock_user_string(arg2);
11915             if (n) {
11916                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11917             } else {
11918                 ret = -TARGET_EFAULT;
11919             }
11920             unlock_user(n, arg2, 0);
11921             unlock_user(v, arg3, arg4);
11922         }
11923         return ret;
11924     case TARGET_NR_removexattr:
11925     case TARGET_NR_lremovexattr:
11926         {
11927             void *p, *n;
11928             p = lock_user_string(arg1);
11929             n = lock_user_string(arg2);
11930             if (p && n) {
11931                 if (num == TARGET_NR_removexattr) {
11932                     ret = get_errno(removexattr(p, n));
11933                 } else {
11934                     ret = get_errno(lremovexattr(p, n));
11935                 }
11936             } else {
11937                 ret = -TARGET_EFAULT;
11938             }
11939             unlock_user(p, arg1, 0);
11940             unlock_user(n, arg2, 0);
11941         }
11942         return ret;
11943     case TARGET_NR_fremovexattr:
11944         {
11945             void *n;
11946             n = lock_user_string(arg2);
11947             if (n) {
11948                 ret = get_errno(fremovexattr(arg1, n));
11949             } else {
11950                 ret = -TARGET_EFAULT;
11951             }
11952             unlock_user(n, arg2, 0);
11953         }
11954         return ret;
11955 #endif
11956 #endif /* CONFIG_ATTR */
11957 #ifdef TARGET_NR_set_thread_area
11958     case TARGET_NR_set_thread_area:
11959 #if defined(TARGET_MIPS)
11960       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11961       return 0;
11962 #elif defined(TARGET_CRIS)
11963       if (arg1 & 0xff)
11964           ret = -TARGET_EINVAL;
11965       else {
11966           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11967           ret = 0;
11968       }
11969       return ret;
11970 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11971       return do_set_thread_area(cpu_env, arg1);
11972 #elif defined(TARGET_M68K)
11973       {
11974           TaskState *ts = cpu->opaque;
11975           ts->tp_value = arg1;
11976           return 0;
11977       }
11978 #else
11979       return -TARGET_ENOSYS;
11980 #endif
11981 #endif
11982 #ifdef TARGET_NR_get_thread_area
11983     case TARGET_NR_get_thread_area:
11984 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11985         return do_get_thread_area(cpu_env, arg1);
11986 #elif defined(TARGET_M68K)
11987         {
11988             TaskState *ts = cpu->opaque;
11989             return ts->tp_value;
11990         }
11991 #else
11992         return -TARGET_ENOSYS;
11993 #endif
11994 #endif
11995 #ifdef TARGET_NR_getdomainname
11996     case TARGET_NR_getdomainname:
11997         return -TARGET_ENOSYS;
11998 #endif
11999 
12000 #ifdef TARGET_NR_clock_settime
12001     case TARGET_NR_clock_settime:
12002     {
12003         struct timespec ts;
12004 
12005         ret = target_to_host_timespec(&ts, arg2);
12006         if (!is_error(ret)) {
12007             ret = get_errno(clock_settime(arg1, &ts));
12008         }
12009         return ret;
12010     }
12011 #endif
12012 #ifdef TARGET_NR_clock_gettime
12013     case TARGET_NR_clock_gettime:
12014     {
12015         struct timespec ts;
12016         ret = get_errno(clock_gettime(arg1, &ts));
12017         if (!is_error(ret)) {
12018             ret = host_to_target_timespec(arg2, &ts);
12019         }
12020         return ret;
12021     }
12022 #endif
12023 #ifdef TARGET_NR_clock_getres
12024     case TARGET_NR_clock_getres:
12025     {
12026         struct timespec ts;
12027         ret = get_errno(clock_getres(arg1, &ts));
12028         if (!is_error(ret)) {
12029             host_to_target_timespec(arg2, &ts);
12030         }
12031         return ret;
12032     }
12033 #endif
12034 #ifdef TARGET_NR_clock_nanosleep
12035     case TARGET_NR_clock_nanosleep:
12036     {
12037         struct timespec ts;
12038         target_to_host_timespec(&ts, arg3);
12039         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12040                                              &ts, arg4 ? &ts : NULL));
12041         if (arg4)
12042             host_to_target_timespec(arg4, &ts);
12043 
12044 #if defined(TARGET_PPC)
12045         /* clock_nanosleep is odd in that it returns positive errno values.
12046          * On PPC, CR0 bit 3 should be set in such a situation. */
12047         if (ret && ret != -TARGET_ERESTARTSYS) {
12048             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12049         }
12050 #endif
12051         return ret;
12052     }
12053 #endif
12054 
12055 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12056     case TARGET_NR_set_tid_address:
12057         return get_errno(set_tid_address((int *)g2h(arg1)));
12058 #endif
12059 
12060     case TARGET_NR_tkill:
12061         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12062 
12063     case TARGET_NR_tgkill:
12064         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12065                          target_to_host_signal(arg3)));
12066 
12067 #ifdef TARGET_NR_set_robust_list
12068     case TARGET_NR_set_robust_list:
12069     case TARGET_NR_get_robust_list:
12070         /* The ABI for supporting robust futexes has userspace pass
12071          * the kernel a pointer to a linked list which is updated by
12072          * userspace after the syscall; the list is walked by the kernel
12073          * when the thread exits. Since the linked list in QEMU guest
12074          * memory isn't a valid linked list for the host and we have
12075          * no way to reliably intercept the thread-death event, we can't
12076          * support these. Silently return ENOSYS so that guest userspace
12077          * falls back to a non-robust futex implementation (which should
12078          * be OK except in the corner case of the guest crashing while
12079          * holding a mutex that is shared with another process via
12080          * shared memory).
12081          */
12082         return -TARGET_ENOSYS;
12083 #endif
12084 
12085 #if defined(TARGET_NR_utimensat)
12086     case TARGET_NR_utimensat:
12087         {
12088             struct timespec *tsp, ts[2];
12089             if (!arg3) {
12090                 tsp = NULL;
12091             } else {
12092                 target_to_host_timespec(ts, arg3);
12093                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12094                 tsp = ts;
12095             }
12096             if (!arg2)
12097                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12098             else {
12099                 if (!(p = lock_user_string(arg2))) {
12100                     ret = -TARGET_EFAULT;
12101                     goto fail;
12102                 }
12103                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12104                 unlock_user(p, arg2, 0);
12105             }
12106         }
12107         return ret;
12108 #endif
12109     case TARGET_NR_futex:
12110         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12111 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12112     case TARGET_NR_inotify_init:
12113         ret = get_errno(sys_inotify_init());
12114         if (ret >= 0) {
12115             fd_trans_register(ret, &target_inotify_trans);
12116         }
12117         return ret;
12118 #endif
12119 #ifdef CONFIG_INOTIFY1
12120 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12121     case TARGET_NR_inotify_init1:
12122         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12123                                           fcntl_flags_tbl)));
12124         if (ret >= 0) {
12125             fd_trans_register(ret, &target_inotify_trans);
12126         }
12127         return ret;
12128 #endif
12129 #endif
12130 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12131     case TARGET_NR_inotify_add_watch:
12132         p = lock_user_string(arg2);
12133         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12134         unlock_user(p, arg2, 0);
12135         return ret;
12136 #endif
12137 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12138     case TARGET_NR_inotify_rm_watch:
12139         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12140 #endif
12141 
12142 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12143     case TARGET_NR_mq_open:
12144         {
12145             struct mq_attr posix_mq_attr;
12146             struct mq_attr *pposix_mq_attr;
12147             int host_flags;
12148 
12149             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12150             pposix_mq_attr = NULL;
12151             if (arg4) {
12152                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12153                     return -TARGET_EFAULT;
12154                 }
12155                 pposix_mq_attr = &posix_mq_attr;
12156             }
12157             p = lock_user_string(arg1 - 1);
12158             if (!p) {
12159                 return -TARGET_EFAULT;
12160             }
12161             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12162             unlock_user (p, arg1, 0);
12163         }
12164         return ret;
12165 
12166     case TARGET_NR_mq_unlink:
12167         p = lock_user_string(arg1 - 1);
12168         if (!p) {
12169             return -TARGET_EFAULT;
12170         }
12171         ret = get_errno(mq_unlink(p));
12172         unlock_user (p, arg1, 0);
12173         return ret;
12174 
12175     case TARGET_NR_mq_timedsend:
12176         {
12177             struct timespec ts;
12178 
12179             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12180             if (arg5 != 0) {
12181                 target_to_host_timespec(&ts, arg5);
12182                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12183                 host_to_target_timespec(arg5, &ts);
12184             } else {
12185                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12186             }
12187             unlock_user (p, arg2, arg3);
12188         }
12189         return ret;
12190 
12191     case TARGET_NR_mq_timedreceive:
12192         {
12193             struct timespec ts;
12194             unsigned int prio;
12195 
12196             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12197             if (arg5 != 0) {
12198                 target_to_host_timespec(&ts, arg5);
12199                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12200                                                      &prio, &ts));
12201                 host_to_target_timespec(arg5, &ts);
12202             } else {
12203                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12204                                                      &prio, NULL));
12205             }
12206             unlock_user (p, arg2, arg3);
12207             if (arg4 != 0)
12208                 put_user_u32(prio, arg4);
12209         }
12210         return ret;
12211 
12212     /* Not implemented for now... */
12213 /*     case TARGET_NR_mq_notify: */
12214 /*         break; */
12215 
12216     case TARGET_NR_mq_getsetattr:
12217         {
12218             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12219             ret = 0;
12220             if (arg2 != 0) {
12221                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12222                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12223                                            &posix_mq_attr_out));
12224             } else if (arg3 != 0) {
12225                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12226             }
12227             if (ret == 0 && arg3 != 0) {
12228                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12229             }
12230         }
12231         return ret;
12232 #endif
12233 
12234 #ifdef CONFIG_SPLICE
12235 #ifdef TARGET_NR_tee
12236     case TARGET_NR_tee:
12237         {
12238             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12239         }
12240         return ret;
12241 #endif
12242 #ifdef TARGET_NR_splice
12243     case TARGET_NR_splice:
12244         {
12245             loff_t loff_in, loff_out;
12246             loff_t *ploff_in = NULL, *ploff_out = NULL;
12247             if (arg2) {
12248                 if (get_user_u64(loff_in, arg2)) {
12249                     return -TARGET_EFAULT;
12250                 }
12251                 ploff_in = &loff_in;
12252             }
12253             if (arg4) {
12254                 if (get_user_u64(loff_out, arg4)) {
12255                     return -TARGET_EFAULT;
12256                 }
12257                 ploff_out = &loff_out;
12258             }
12259             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12260             if (arg2) {
12261                 if (put_user_u64(loff_in, arg2)) {
12262                     return -TARGET_EFAULT;
12263                 }
12264             }
12265             if (arg4) {
12266                 if (put_user_u64(loff_out, arg4)) {
12267                     return -TARGET_EFAULT;
12268                 }
12269             }
12270         }
12271         return ret;
12272 #endif
12273 #ifdef TARGET_NR_vmsplice
12274 	case TARGET_NR_vmsplice:
12275         {
12276             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12277             if (vec != NULL) {
12278                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12279                 unlock_iovec(vec, arg2, arg3, 0);
12280             } else {
12281                 ret = -host_to_target_errno(errno);
12282             }
12283         }
12284         return ret;
12285 #endif
12286 #endif /* CONFIG_SPLICE */
12287 #ifdef CONFIG_EVENTFD
12288 #if defined(TARGET_NR_eventfd)
12289     case TARGET_NR_eventfd:
12290         ret = get_errno(eventfd(arg1, 0));
12291         if (ret >= 0) {
12292             fd_trans_register(ret, &target_eventfd_trans);
12293         }
12294         return ret;
12295 #endif
12296 #if defined(TARGET_NR_eventfd2)
12297     case TARGET_NR_eventfd2:
12298     {
12299         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12300         if (arg2 & TARGET_O_NONBLOCK) {
12301             host_flags |= O_NONBLOCK;
12302         }
12303         if (arg2 & TARGET_O_CLOEXEC) {
12304             host_flags |= O_CLOEXEC;
12305         }
12306         ret = get_errno(eventfd(arg1, host_flags));
12307         if (ret >= 0) {
12308             fd_trans_register(ret, &target_eventfd_trans);
12309         }
12310         return ret;
12311     }
12312 #endif
12313 #endif /* CONFIG_EVENTFD  */
12314 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12315     case TARGET_NR_fallocate:
12316 #if TARGET_ABI_BITS == 32
12317         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12318                                   target_offset64(arg5, arg6)));
12319 #else
12320         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12321 #endif
12322         return ret;
12323 #endif
12324 #if defined(CONFIG_SYNC_FILE_RANGE)
12325 #if defined(TARGET_NR_sync_file_range)
12326     case TARGET_NR_sync_file_range:
12327 #if TARGET_ABI_BITS == 32
12328 #if defined(TARGET_MIPS)
12329         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12330                                         target_offset64(arg5, arg6), arg7));
12331 #else
12332         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12333                                         target_offset64(arg4, arg5), arg6));
12334 #endif /* !TARGET_MIPS */
12335 #else
12336         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12337 #endif
12338         return ret;
12339 #endif
12340 #if defined(TARGET_NR_sync_file_range2)
12341     case TARGET_NR_sync_file_range2:
12342         /* This is like sync_file_range but the arguments are reordered */
12343 #if TARGET_ABI_BITS == 32
12344         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12345                                         target_offset64(arg5, arg6), arg2));
12346 #else
12347         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12348 #endif
12349         return ret;
12350 #endif
12351 #endif
12352 #if defined(TARGET_NR_signalfd4)
12353     case TARGET_NR_signalfd4:
12354         return do_signalfd4(arg1, arg2, arg4);
12355 #endif
12356 #if defined(TARGET_NR_signalfd)
12357     case TARGET_NR_signalfd:
12358         return do_signalfd4(arg1, arg2, 0);
12359 #endif
12360 #if defined(CONFIG_EPOLL)
12361 #if defined(TARGET_NR_epoll_create)
12362     case TARGET_NR_epoll_create:
12363         return get_errno(epoll_create(arg1));
12364 #endif
12365 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12366     case TARGET_NR_epoll_create1:
12367         return get_errno(epoll_create1(arg1));
12368 #endif
12369 #if defined(TARGET_NR_epoll_ctl)
12370     case TARGET_NR_epoll_ctl:
12371     {
12372         struct epoll_event ep;
12373         struct epoll_event *epp = 0;
12374         if (arg4) {
12375             struct target_epoll_event *target_ep;
12376             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12377                 return -TARGET_EFAULT;
12378             }
12379             ep.events = tswap32(target_ep->events);
12380             /* The epoll_data_t union is just opaque data to the kernel,
12381              * so we transfer all 64 bits across and need not worry what
12382              * actual data type it is.
12383              */
12384             ep.data.u64 = tswap64(target_ep->data.u64);
12385             unlock_user_struct(target_ep, arg4, 0);
12386             epp = &ep;
12387         }
12388         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12389     }
12390 #endif
12391 
12392 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12393 #if defined(TARGET_NR_epoll_wait)
12394     case TARGET_NR_epoll_wait:
12395 #endif
12396 #if defined(TARGET_NR_epoll_pwait)
12397     case TARGET_NR_epoll_pwait:
12398 #endif
12399     {
12400         struct target_epoll_event *target_ep;
12401         struct epoll_event *ep;
12402         int epfd = arg1;
12403         int maxevents = arg3;
12404         int timeout = arg4;
12405 
12406         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12407             return -TARGET_EINVAL;
12408         }
12409 
12410         target_ep = lock_user(VERIFY_WRITE, arg2,
12411                               maxevents * sizeof(struct target_epoll_event), 1);
12412         if (!target_ep) {
12413             return -TARGET_EFAULT;
12414         }
12415 
12416         ep = g_try_new(struct epoll_event, maxevents);
12417         if (!ep) {
12418             unlock_user(target_ep, arg2, 0);
12419             return -TARGET_ENOMEM;
12420         }
12421 
12422         switch (num) {
12423 #if defined(TARGET_NR_epoll_pwait)
12424         case TARGET_NR_epoll_pwait:
12425         {
12426             target_sigset_t *target_set;
12427             sigset_t _set, *set = &_set;
12428 
12429             if (arg5) {
12430                 if (arg6 != sizeof(target_sigset_t)) {
12431                     ret = -TARGET_EINVAL;
12432                     break;
12433                 }
12434 
12435                 target_set = lock_user(VERIFY_READ, arg5,
12436                                        sizeof(target_sigset_t), 1);
12437                 if (!target_set) {
12438                     ret = -TARGET_EFAULT;
12439                     break;
12440                 }
12441                 target_to_host_sigset(set, target_set);
12442                 unlock_user(target_set, arg5, 0);
12443             } else {
12444                 set = NULL;
12445             }
12446 
12447             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12448                                              set, SIGSET_T_SIZE));
12449             break;
12450         }
12451 #endif
12452 #if defined(TARGET_NR_epoll_wait)
12453         case TARGET_NR_epoll_wait:
12454             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12455                                              NULL, 0));
12456             break;
12457 #endif
12458         default:
12459             ret = -TARGET_ENOSYS;
12460         }
12461         if (!is_error(ret)) {
12462             int i;
12463             for (i = 0; i < ret; i++) {
12464                 target_ep[i].events = tswap32(ep[i].events);
12465                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12466             }
12467             unlock_user(target_ep, arg2,
12468                         ret * sizeof(struct target_epoll_event));
12469         } else {
12470             unlock_user(target_ep, arg2, 0);
12471         }
12472         g_free(ep);
12473         return ret;
12474     }
12475 #endif
12476 #endif
12477 #ifdef TARGET_NR_prlimit64
12478     case TARGET_NR_prlimit64:
12479     {
12480         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12481         struct target_rlimit64 *target_rnew, *target_rold;
12482         struct host_rlimit64 rnew, rold, *rnewp = 0;
12483         int resource = target_to_host_resource(arg2);
12484         if (arg3) {
12485             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12486                 return -TARGET_EFAULT;
12487             }
12488             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12489             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12490             unlock_user_struct(target_rnew, arg3, 0);
12491             rnewp = &rnew;
12492         }
12493 
12494         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12495         if (!is_error(ret) && arg4) {
12496             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12497                 return -TARGET_EFAULT;
12498             }
12499             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12500             target_rold->rlim_max = tswap64(rold.rlim_max);
12501             unlock_user_struct(target_rold, arg4, 1);
12502         }
12503         return ret;
12504     }
12505 #endif
12506 #ifdef TARGET_NR_gethostname
12507     case TARGET_NR_gethostname:
12508     {
12509         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12510         if (name) {
12511             ret = get_errno(gethostname(name, arg2));
12512             unlock_user(name, arg1, arg2);
12513         } else {
12514             ret = -TARGET_EFAULT;
12515         }
12516         return ret;
12517     }
12518 #endif
12519 #ifdef TARGET_NR_atomic_cmpxchg_32
12520     case TARGET_NR_atomic_cmpxchg_32:
12521     {
12522         /* should use start_exclusive from main.c */
12523         abi_ulong mem_value;
12524         if (get_user_u32(mem_value, arg6)) {
12525             target_siginfo_t info;
12526             info.si_signo = SIGSEGV;
12527             info.si_errno = 0;
12528             info.si_code = TARGET_SEGV_MAPERR;
12529             info._sifields._sigfault._addr = arg6;
12530             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12531                          QEMU_SI_FAULT, &info);
12532             ret = 0xdeadbeef;
12533 
12534         }
12535         if (mem_value == arg2)
12536             put_user_u32(arg1, arg6);
12537         return mem_value;
12538     }
12539 #endif
12540 #ifdef TARGET_NR_atomic_barrier
12541     case TARGET_NR_atomic_barrier:
12542         /* Like the kernel implementation and the
12543            qemu arm barrier, no-op this? */
12544         return 0;
12545 #endif
12546 
12547 #ifdef TARGET_NR_timer_create
12548     case TARGET_NR_timer_create:
12549     {
12550         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12551 
12552         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12553 
12554         int clkid = arg1;
12555         int timer_index = next_free_host_timer();
12556 
12557         if (timer_index < 0) {
12558             ret = -TARGET_EAGAIN;
12559         } else {
12560             timer_t *phtimer = g_posix_timers  + timer_index;
12561 
12562             if (arg2) {
12563                 phost_sevp = &host_sevp;
12564                 ret = target_to_host_sigevent(phost_sevp, arg2);
12565                 if (ret != 0) {
12566                     return ret;
12567                 }
12568             }
12569 
12570             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12571             if (ret) {
12572                 phtimer = NULL;
12573             } else {
12574                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12575                     return -TARGET_EFAULT;
12576                 }
12577             }
12578         }
12579         return ret;
12580     }
12581 #endif
12582 
12583 #ifdef TARGET_NR_timer_settime
12584     case TARGET_NR_timer_settime:
12585     {
12586         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12587          * struct itimerspec * old_value */
12588         target_timer_t timerid = get_timer_id(arg1);
12589 
12590         if (timerid < 0) {
12591             ret = timerid;
12592         } else if (arg3 == 0) {
12593             ret = -TARGET_EINVAL;
12594         } else {
12595             timer_t htimer = g_posix_timers[timerid];
12596             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12597 
12598             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12599                 return -TARGET_EFAULT;
12600             }
12601             ret = get_errno(
12602                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12603             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12604                 return -TARGET_EFAULT;
12605             }
12606         }
12607         return ret;
12608     }
12609 #endif
12610 
12611 #ifdef TARGET_NR_timer_gettime
12612     case TARGET_NR_timer_gettime:
12613     {
12614         /* args: timer_t timerid, struct itimerspec *curr_value */
12615         target_timer_t timerid = get_timer_id(arg1);
12616 
12617         if (timerid < 0) {
12618             ret = timerid;
12619         } else if (!arg2) {
12620             ret = -TARGET_EFAULT;
12621         } else {
12622             timer_t htimer = g_posix_timers[timerid];
12623             struct itimerspec hspec;
12624             ret = get_errno(timer_gettime(htimer, &hspec));
12625 
12626             if (host_to_target_itimerspec(arg2, &hspec)) {
12627                 ret = -TARGET_EFAULT;
12628             }
12629         }
12630         return ret;
12631     }
12632 #endif
12633 
12634 #ifdef TARGET_NR_timer_getoverrun
12635     case TARGET_NR_timer_getoverrun:
12636     {
12637         /* args: timer_t timerid */
12638         target_timer_t timerid = get_timer_id(arg1);
12639 
12640         if (timerid < 0) {
12641             ret = timerid;
12642         } else {
12643             timer_t htimer = g_posix_timers[timerid];
12644             ret = get_errno(timer_getoverrun(htimer));
12645         }
12646         fd_trans_unregister(ret);
12647         return ret;
12648     }
12649 #endif
12650 
12651 #ifdef TARGET_NR_timer_delete
12652     case TARGET_NR_timer_delete:
12653     {
12654         /* args: timer_t timerid */
12655         target_timer_t timerid = get_timer_id(arg1);
12656 
12657         if (timerid < 0) {
12658             ret = timerid;
12659         } else {
12660             timer_t htimer = g_posix_timers[timerid];
12661             ret = get_errno(timer_delete(htimer));
12662             g_posix_timers[timerid] = 0;
12663         }
12664         return ret;
12665     }
12666 #endif
12667 
12668 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12669     case TARGET_NR_timerfd_create:
12670         return get_errno(timerfd_create(arg1,
12671                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12672 #endif
12673 
12674 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12675     case TARGET_NR_timerfd_gettime:
12676         {
12677             struct itimerspec its_curr;
12678 
12679             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12680 
12681             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12682                 return -TARGET_EFAULT;
12683             }
12684         }
12685         return ret;
12686 #endif
12687 
12688 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12689     case TARGET_NR_timerfd_settime:
12690         {
12691             struct itimerspec its_new, its_old, *p_new;
12692 
12693             if (arg3) {
12694                 if (target_to_host_itimerspec(&its_new, arg3)) {
12695                     return -TARGET_EFAULT;
12696                 }
12697                 p_new = &its_new;
12698             } else {
12699                 p_new = NULL;
12700             }
12701 
12702             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12703 
12704             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12705                 return -TARGET_EFAULT;
12706             }
12707         }
12708         return ret;
12709 #endif
12710 
12711 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12712     case TARGET_NR_ioprio_get:
12713         return get_errno(ioprio_get(arg1, arg2));
12714 #endif
12715 
12716 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12717     case TARGET_NR_ioprio_set:
12718         return get_errno(ioprio_set(arg1, arg2, arg3));
12719 #endif
12720 
12721 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12722     case TARGET_NR_setns:
12723         return get_errno(setns(arg1, arg2));
12724 #endif
12725 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12726     case TARGET_NR_unshare:
12727         return get_errno(unshare(arg1));
12728 #endif
12729 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12730     case TARGET_NR_kcmp:
12731         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12732 #endif
12733 #ifdef TARGET_NR_swapcontext
12734     case TARGET_NR_swapcontext:
12735         /* PowerPC specific.  */
12736         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12737 #endif
12738 
12739     default:
12740     unimplemented:
12741         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12742         return -TARGET_ENOSYS;
12743     }
12744 fail:
12745     return ret;
12746 }
12747 
12748 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12749                     abi_long arg2, abi_long arg3, abi_long arg4,
12750                     abi_long arg5, abi_long arg6, abi_long arg7,
12751                     abi_long arg8)
12752 {
12753     CPUState *cpu = ENV_GET_CPU(cpu_env);
12754     abi_long ret;
12755 
12756 #ifdef DEBUG_ERESTARTSYS
12757     /* Debug-only code for exercising the syscall-restart code paths
12758      * in the per-architecture cpu main loops: restart every syscall
12759      * the guest makes once before letting it through.
12760      */
12761     {
12762         static bool flag;
12763         flag = !flag;
12764         if (flag) {
12765             return -TARGET_ERESTARTSYS;
12766         }
12767     }
12768 #endif
12769 
12770     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
12771                              arg5, arg6, arg7, arg8);
12772 
12773     if (unlikely(do_strace)) {
12774         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12775         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12776                           arg5, arg6, arg7, arg8);
12777         print_syscall_ret(num, ret);
12778     } else {
12779         ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12780                           arg5, arg6, arg7, arg8);
12781     }
12782 
12783     trace_guest_user_syscall_ret(cpu, num, ret);
12784     return ret;
12785 }
12786